Compare commits
23 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
67ad26c35a | ||
|
|
30d448e73c | ||
|
|
d4064e3df4 | ||
|
|
ec354f7a1a | ||
|
|
240e782606 | ||
|
|
fcb0293c0d | ||
|
|
682c4598ee | ||
|
|
a7d105bd69 | ||
|
|
b9eef45305 | ||
|
|
c8f20a66a8 | ||
|
|
1f6a384c9a | ||
|
|
c9fc033cf5 | ||
|
|
32c964d310 | ||
|
|
d60040b222 | ||
|
|
3ce1b4159b | ||
|
|
7516ac4ce7 | ||
|
|
2a73d8c4a3 | ||
|
|
a318dff8b0 | ||
|
|
4a159d5bf5 | ||
|
|
734b040a48 | ||
|
|
10be026ace | ||
|
|
848a620568 | ||
|
|
e18e288fda |
28
.github/workflows/pr-path-guard.yml
vendored
Normal file
28
.github/workflows/pr-path-guard.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: translator-path-guard
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
|
||||
jobs:
|
||||
ensure-no-translator-changes:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Detect internal/translator changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v45
|
||||
with:
|
||||
files: |
|
||||
internal/translator/**
|
||||
- name: Fail when restricted paths change
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
echo "Changes under internal/translator are not allowed in pull requests."
|
||||
echo "You need to create an issue for our maintenance team to make the necessary changes."
|
||||
exit 1
|
||||
10
README.md
10
README.md
@@ -8,6 +8,16 @@ It now also supports OpenAI Codex (GPT models) and Claude Code via OAuth.
|
||||
|
||||
So you can use local or multi-account CLI access with OpenAI(include Responses)/Gemini/Claude-compatible clients and SDKs.
|
||||
|
||||
## Sponsor
|
||||
|
||||
[](https://z.ai/subscribe?ic=8JVLJQFSKB)
|
||||
|
||||
This project is sponsored by Z.ai, supporting us with their GLM CODING PLAN.
|
||||
|
||||
GLM CODING PLAN is a subscription service designed for AI coding, starting at just $3/month. It provides access to their flagship GLM-4.6 model across 10+ popular AI coding tools (Claude Code, Cline, Roo Code, etc.), offering developers top-tier, fast, and stable coding experiences.
|
||||
|
||||
Get 10% OFF GLM CODING PLAN:https://z.ai/subscribe?ic=8JVLJQFSKB
|
||||
|
||||
## Overview
|
||||
|
||||
- OpenAI/Gemini/Claude compatible API endpoints for CLI models
|
||||
|
||||
10
README_CN.md
10
README_CN.md
@@ -8,7 +8,15 @@
|
||||
|
||||
您可以使用本地或多账户的CLI方式,通过任何与 OpenAI(包括Responses)/Gemini/Claude 兼容的客户端和SDK进行访问。
|
||||
|
||||
现已新增国内提供商:[Qwen Code](https://github.com/QwenLM/qwen-code)、[iFlow](https://iflow.cn/)。
|
||||
## 赞助商
|
||||
|
||||
[](https://www.bigmodel.cn/claude-code?ic=RRVJPB5SII)
|
||||
|
||||
本项目由 Z智谱 提供赞助, 他们通过 GLM CODING PLAN 对本项目提供技术支持。
|
||||
|
||||
GLM CODING PLAN 是专为AI编码打造的订阅套餐,每月最低仅需20元,即可在十余款主流AI编码工具如 Claude Code、Cline、Roo Code 中畅享智谱旗舰模型GLM-4.6,为开发者提供顶尖的编码体验。
|
||||
|
||||
智谱AI为本软件提供了特别优惠,使用以下链接购买可以享受九折优惠:https://www.bigmodel.cn/claude-code?ic=RRVJPB5SII
|
||||
|
||||
## 功能特性
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -229,8 +230,32 @@ func (h *Handler) managementCallbackURL(path string) (string, error) {
|
||||
return fmt.Sprintf("http://127.0.0.1:%d%s", h.cfg.Port, path), nil
|
||||
}
|
||||
|
||||
// List auth files
|
||||
func (h *Handler) ListAuthFiles(c *gin.Context) {
|
||||
if h == nil {
|
||||
c.JSON(500, gin.H{"error": "handler not initialized"})
|
||||
return
|
||||
}
|
||||
if h.authManager == nil {
|
||||
h.listAuthFilesFromDisk(c)
|
||||
return
|
||||
}
|
||||
auths := h.authManager.List()
|
||||
files := make([]gin.H, 0, len(auths))
|
||||
for _, auth := range auths {
|
||||
if entry := h.buildAuthFileEntry(auth); entry != nil {
|
||||
files = append(files, entry)
|
||||
}
|
||||
}
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
nameI, _ := files[i]["name"].(string)
|
||||
nameJ, _ := files[j]["name"].(string)
|
||||
return strings.ToLower(nameI) < strings.ToLower(nameJ)
|
||||
})
|
||||
c.JSON(200, gin.H{"files": files})
|
||||
}
|
||||
|
||||
// List auth files from disk when the auth manager is unavailable.
|
||||
func (h *Handler) listAuthFilesFromDisk(c *gin.Context) {
|
||||
entries, err := os.ReadDir(h.cfg.AuthDir)
|
||||
if err != nil {
|
||||
c.JSON(500, gin.H{"error": fmt.Sprintf("failed to read auth dir: %v", err)})
|
||||
@@ -263,6 +288,103 @@ func (h *Handler) ListAuthFiles(c *gin.Context) {
|
||||
c.JSON(200, gin.H{"files": files})
|
||||
}
|
||||
|
||||
func (h *Handler) buildAuthFileEntry(auth *coreauth.Auth) gin.H {
|
||||
if auth == nil {
|
||||
return nil
|
||||
}
|
||||
runtimeOnly := isRuntimeOnlyAuth(auth)
|
||||
path := strings.TrimSpace(authAttribute(auth, "path"))
|
||||
if path == "" && !runtimeOnly {
|
||||
return nil
|
||||
}
|
||||
name := strings.TrimSpace(auth.FileName)
|
||||
if name == "" {
|
||||
name = auth.ID
|
||||
}
|
||||
entry := gin.H{
|
||||
"id": auth.ID,
|
||||
"name": name,
|
||||
"type": strings.TrimSpace(auth.Provider),
|
||||
"provider": strings.TrimSpace(auth.Provider),
|
||||
"label": auth.Label,
|
||||
"status": auth.Status,
|
||||
"status_message": auth.StatusMessage,
|
||||
"disabled": auth.Disabled,
|
||||
"unavailable": auth.Unavailable,
|
||||
"runtime_only": runtimeOnly,
|
||||
"source": "memory",
|
||||
"size": int64(0),
|
||||
}
|
||||
if email := authEmail(auth); email != "" {
|
||||
entry["email"] = email
|
||||
}
|
||||
if accountType, account := auth.AccountInfo(); accountType != "" || account != "" {
|
||||
if accountType != "" {
|
||||
entry["account_type"] = accountType
|
||||
}
|
||||
if account != "" {
|
||||
entry["account"] = account
|
||||
}
|
||||
}
|
||||
if !auth.CreatedAt.IsZero() {
|
||||
entry["created_at"] = auth.CreatedAt
|
||||
}
|
||||
if !auth.UpdatedAt.IsZero() {
|
||||
entry["modtime"] = auth.UpdatedAt
|
||||
entry["updated_at"] = auth.UpdatedAt
|
||||
}
|
||||
if !auth.LastRefreshedAt.IsZero() {
|
||||
entry["last_refresh"] = auth.LastRefreshedAt
|
||||
}
|
||||
if path != "" {
|
||||
entry["path"] = path
|
||||
entry["source"] = "file"
|
||||
if info, err := os.Stat(path); err == nil {
|
||||
entry["size"] = info.Size()
|
||||
entry["modtime"] = info.ModTime()
|
||||
} else if os.IsNotExist(err) {
|
||||
entry["source"] = "memory"
|
||||
} else {
|
||||
log.WithError(err).Warnf("failed to stat auth file %s", path)
|
||||
}
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
func authEmail(auth *coreauth.Auth) string {
|
||||
if auth == nil {
|
||||
return ""
|
||||
}
|
||||
if auth.Metadata != nil {
|
||||
if v, ok := auth.Metadata["email"].(string); ok {
|
||||
return strings.TrimSpace(v)
|
||||
}
|
||||
}
|
||||
if auth.Attributes != nil {
|
||||
if v := strings.TrimSpace(auth.Attributes["email"]); v != "" {
|
||||
return v
|
||||
}
|
||||
if v := strings.TrimSpace(auth.Attributes["account_email"]); v != "" {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func authAttribute(auth *coreauth.Auth, key string) string {
|
||||
if auth == nil || len(auth.Attributes) == 0 {
|
||||
return ""
|
||||
}
|
||||
return auth.Attributes[key]
|
||||
}
|
||||
|
||||
func isRuntimeOnlyAuth(auth *coreauth.Auth) bool {
|
||||
if auth == nil || len(auth.Attributes) == 0 {
|
||||
return false
|
||||
}
|
||||
return strings.EqualFold(strings.TrimSpace(auth.Attributes["runtime_only"]), "true")
|
||||
}
|
||||
|
||||
// Download single auth file by name
|
||||
func (h *Handler) DownloadAuthFile(c *gin.Context) {
|
||||
name := c.Query("name")
|
||||
|
||||
@@ -116,36 +116,6 @@ func GeminiModels() []*ModelInfo {
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
Thinking: &ThinkingSupport{Min: 512, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
||||
},
|
||||
{
|
||||
ID: "gemini-2.5-flash-image-preview",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "google",
|
||||
Type: "gemini",
|
||||
Name: "models/gemini-2.5-flash-image-preview",
|
||||
Version: "2.5",
|
||||
DisplayName: "Gemini 2.5 Flash Image Preview",
|
||||
Description: "State-of-the-art image generation and editing model.",
|
||||
InputTokenLimit: 1048576,
|
||||
OutputTokenLimit: 8192,
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
// image models don't support thinkingConfig; leave Thinking nil
|
||||
},
|
||||
{
|
||||
ID: "gemini-2.5-flash-image",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "google",
|
||||
Type: "gemini",
|
||||
Name: "models/gemini-2.5-flash-image",
|
||||
Version: "2.5",
|
||||
DisplayName: "Gemini 2.5 Flash Image",
|
||||
Description: "State-of-the-art image generation and editing model.",
|
||||
InputTokenLimit: 1048576,
|
||||
OutputTokenLimit: 8192,
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
// image models don't support thinkingConfig; leave Thinking nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,57 +123,111 @@ func GeminiModels() []*ModelInfo {
|
||||
func GetGeminiModels() []*ModelInfo { return GeminiModels() }
|
||||
|
||||
// GetGeminiCLIModels returns the standard Gemini model definitions
|
||||
func GetGeminiCLIModels() []*ModelInfo { return GeminiModels() }
|
||||
func GetGeminiCLIModels() []*ModelInfo {
|
||||
base := GeminiModels()
|
||||
return append(base,
|
||||
[]*ModelInfo{
|
||||
{
|
||||
ID: "gemini-3-pro-preview-11-2025",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "google",
|
||||
Type: "gemini",
|
||||
Name: "models/gemini-3-pro-preview-11-2025",
|
||||
Version: "3",
|
||||
DisplayName: "Gemini 3 Pro Preview 11-2025",
|
||||
Description: "Latest preview of Gemini Pro",
|
||||
InputTokenLimit: 1048576,
|
||||
OutputTokenLimit: 65536,
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true},
|
||||
},
|
||||
}...,
|
||||
)
|
||||
}
|
||||
|
||||
// GetAIStudioModels returns the Gemini model definitions for AI Studio integrations
|
||||
func GetAIStudioModels() []*ModelInfo {
|
||||
base := GeminiModels()
|
||||
|
||||
return append(base,
|
||||
&ModelInfo{
|
||||
ID: "gemini-pro-latest",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "google",
|
||||
Type: "gemini",
|
||||
Name: "models/gemini-pro-latest",
|
||||
Version: "2.5",
|
||||
DisplayName: "Gemini Pro Latest",
|
||||
Description: "Latest release of Gemini Pro",
|
||||
InputTokenLimit: 1048576,
|
||||
OutputTokenLimit: 65536,
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true},
|
||||
},
|
||||
&ModelInfo{
|
||||
ID: "gemini-flash-latest",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "google",
|
||||
Type: "gemini",
|
||||
Name: "models/gemini-flash-latest",
|
||||
Version: "2.5",
|
||||
DisplayName: "Gemini Flash Latest",
|
||||
Description: "Latest release of Gemini Flash",
|
||||
InputTokenLimit: 1048576,
|
||||
OutputTokenLimit: 65536,
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
||||
},
|
||||
&ModelInfo{
|
||||
ID: "gemini-flash-lite-latest",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "google",
|
||||
Type: "gemini",
|
||||
Name: "models/gemini-flash-lite-latest",
|
||||
Version: "2.5",
|
||||
DisplayName: "Gemini Flash-Lite Latest",
|
||||
Description: "Latest release of Gemini Flash-Lite",
|
||||
InputTokenLimit: 1048576,
|
||||
OutputTokenLimit: 65536,
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
Thinking: &ThinkingSupport{Min: 512, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
||||
},
|
||||
[]*ModelInfo{
|
||||
{
|
||||
ID: "gemini-pro-latest",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "google",
|
||||
Type: "gemini",
|
||||
Name: "models/gemini-pro-latest",
|
||||
Version: "2.5",
|
||||
DisplayName: "Gemini Pro Latest",
|
||||
Description: "Latest release of Gemini Pro",
|
||||
InputTokenLimit: 1048576,
|
||||
OutputTokenLimit: 65536,
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true},
|
||||
},
|
||||
{
|
||||
ID: "gemini-flash-latest",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "google",
|
||||
Type: "gemini",
|
||||
Name: "models/gemini-flash-latest",
|
||||
Version: "2.5",
|
||||
DisplayName: "Gemini Flash Latest",
|
||||
Description: "Latest release of Gemini Flash",
|
||||
InputTokenLimit: 1048576,
|
||||
OutputTokenLimit: 65536,
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
||||
},
|
||||
{
|
||||
ID: "gemini-flash-lite-latest",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "google",
|
||||
Type: "gemini",
|
||||
Name: "models/gemini-flash-lite-latest",
|
||||
Version: "2.5",
|
||||
DisplayName: "Gemini Flash-Lite Latest",
|
||||
Description: "Latest release of Gemini Flash-Lite",
|
||||
InputTokenLimit: 1048576,
|
||||
OutputTokenLimit: 65536,
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
Thinking: &ThinkingSupport{Min: 512, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
||||
},
|
||||
{
|
||||
ID: "gemini-2.5-flash-image-preview",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "google",
|
||||
Type: "gemini",
|
||||
Name: "models/gemini-2.5-flash-image-preview",
|
||||
Version: "2.5",
|
||||
DisplayName: "Gemini 2.5 Flash Image Preview",
|
||||
Description: "State-of-the-art image generation and editing model.",
|
||||
InputTokenLimit: 1048576,
|
||||
OutputTokenLimit: 8192,
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
// image models don't support thinkingConfig; leave Thinking nil
|
||||
},
|
||||
{
|
||||
ID: "gemini-2.5-flash-image",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "google",
|
||||
Type: "gemini",
|
||||
Name: "models/gemini-2.5-flash-image",
|
||||
Version: "2.5",
|
||||
DisplayName: "Gemini 2.5 Flash Image",
|
||||
Description: "State-of-the-art image generation and editing model.",
|
||||
InputTokenLimit: 1048576,
|
||||
OutputTokenLimit: 8192,
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
// image models don't support thinkingConfig; leave Thinking nil
|
||||
},
|
||||
}...,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -328,17 +352,43 @@ func GetOpenAIModels() []*ModelInfo {
|
||||
SupportedParameters: []string{"tools"},
|
||||
},
|
||||
{
|
||||
ID: "codex-mini-latest",
|
||||
ID: "gpt-5-codex-mini",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "openai",
|
||||
Type: "openai",
|
||||
Version: "1.0",
|
||||
DisplayName: "Codex Mini",
|
||||
Description: "Lightweight code generation model",
|
||||
ContextLength: 4096,
|
||||
MaxCompletionTokens: 2048,
|
||||
SupportedParameters: []string{"temperature", "max_tokens", "stream", "stop"},
|
||||
Version: "gpt-5-2025-11-07",
|
||||
DisplayName: "GPT 5 Codex Mini",
|
||||
Description: "Stable version of GPT 5 Codex Mini: cheaper, faster, but less capable version of GPT 5 Codex.",
|
||||
ContextLength: 400000,
|
||||
MaxCompletionTokens: 128000,
|
||||
SupportedParameters: []string{"tools"},
|
||||
},
|
||||
{
|
||||
ID: "gpt-5-codex-mini-medium",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "openai",
|
||||
Type: "openai",
|
||||
Version: "gpt-5-2025-11-07",
|
||||
DisplayName: "GPT 5 Codex Mini Medium",
|
||||
Description: "Stable version of GPT 5 Codex Mini: cheaper, faster, but less capable version of GPT 5 Codex.",
|
||||
ContextLength: 400000,
|
||||
MaxCompletionTokens: 128000,
|
||||
SupportedParameters: []string{"tools"},
|
||||
},
|
||||
{
|
||||
ID: "gpt-5-codex-mini-high",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "openai",
|
||||
Type: "openai",
|
||||
Version: "gpt-5-2025-11-07",
|
||||
DisplayName: "GPT 5 Codex Mini High",
|
||||
Description: "Stable version of GPT 5 Codex Mini: cheaper, faster, but less capable version of GPT 5 Codex.",
|
||||
ContextLength: 400000,
|
||||
MaxCompletionTokens: 128000,
|
||||
SupportedParameters: []string{"tools"},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -414,6 +464,7 @@ func GetIFlowModels() []*ModelInfo {
|
||||
{ID: "qwen3-235b-a22b-thinking-2507", DisplayName: "Qwen3-235B-A22B-Thinking", Description: "Qwen3 235B A22B Thinking (2507)"},
|
||||
{ID: "qwen3-235b-a22b-instruct", DisplayName: "Qwen3-235B-A22B-Instruct", Description: "Qwen3 235B A22B Instruct"},
|
||||
{ID: "qwen3-235b", DisplayName: "Qwen3-235B-A22B", Description: "Qwen3 235B A22B"},
|
||||
{ID: "minimax-m2", DisplayName: "MiniMax-M2", Description: "MiniMax M2"},
|
||||
}
|
||||
models := make([]*ModelInfo, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
|
||||
@@ -75,6 +75,16 @@ func (e *CodexExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, re
|
||||
case "gpt-5-codex-high":
|
||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "high")
|
||||
}
|
||||
} else if util.InArray([]string{"gpt-5-codex-mini", "gpt-5-codex-mini-medium", "gpt-5-codex-mini-high"}, req.Model) {
|
||||
body, _ = sjson.SetBytes(body, "model", "gpt-5-codex-mini")
|
||||
switch req.Model {
|
||||
case "gpt-5-codex-mini-medium":
|
||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "medium")
|
||||
case "gpt-5-codex-mini-high":
|
||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "high")
|
||||
default:
|
||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "medium")
|
||||
}
|
||||
}
|
||||
|
||||
body, _ = sjson.SetBytes(body, "stream", true)
|
||||
@@ -188,6 +198,14 @@ func (e *CodexExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au
|
||||
case "gpt-5-codex-high":
|
||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "high")
|
||||
}
|
||||
} else if util.InArray([]string{"gpt-5-codex-mini", "gpt-5-codex-mini-medium", "gpt-5-codex-mini-high"}, req.Model) {
|
||||
body, _ = sjson.SetBytes(body, "model", "gpt-5-codex-mini")
|
||||
switch req.Model {
|
||||
case "gpt-5-codex-mini-medium":
|
||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "medium")
|
||||
case "gpt-5-codex-mini-high":
|
||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "high")
|
||||
}
|
||||
}
|
||||
|
||||
body, _ = sjson.DeleteBytes(body, "previous_response_id")
|
||||
@@ -312,6 +330,17 @@ func (e *CodexExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.Auth
|
||||
default:
|
||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "low")
|
||||
}
|
||||
} else if util.InArray([]string{"gpt-5-codex-mini", "gpt-5-codex-mini-medium", "gpt-5-codex-mini-high"}, req.Model) {
|
||||
modelForCounting = "gpt-5"
|
||||
body, _ = sjson.SetBytes(body, "model", "codex-mini-latest")
|
||||
switch req.Model {
|
||||
case "gpt-5-codex-mini-medium":
|
||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "medium")
|
||||
case "gpt-5-codex-mini-high":
|
||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "high")
|
||||
default:
|
||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "medium")
|
||||
}
|
||||
}
|
||||
|
||||
body, _ = sjson.DeleteBytes(body, "previous_response_id")
|
||||
@@ -508,6 +537,11 @@ func (e *CodexExecutor) cacheHelper(ctx context.Context, from sdktranslator.Form
|
||||
codexCacheMap[key] = cache
|
||||
}
|
||||
}
|
||||
} else if from == "openai-response" {
|
||||
promptCacheKey := gjson.GetBytes(req.Payload, "prompt_cache_key")
|
||||
if promptCacheKey.Exists() {
|
||||
cache.ID = promptCacheKey.String()
|
||||
}
|
||||
}
|
||||
|
||||
rawJSON, _ = sjson.SetBytes(rawJSON, "prompt_cache_key", cache.ID)
|
||||
|
||||
@@ -634,11 +634,19 @@ func geminiCLIClientMetadata() string {
|
||||
func cliPreviewFallbackOrder(model string) []string {
|
||||
switch model {
|
||||
case "gemini-2.5-pro":
|
||||
return []string{"gemini-2.5-pro-preview-05-06", "gemini-2.5-pro-preview-06-05"}
|
||||
return []string{
|
||||
// "gemini-2.5-pro-preview-05-06",
|
||||
"gemini-2.5-pro-preview-06-05",
|
||||
}
|
||||
case "gemini-2.5-flash":
|
||||
return []string{"gemini-2.5-flash-preview-04-17", "gemini-2.5-flash-preview-05-20"}
|
||||
return []string{
|
||||
// "gemini-2.5-flash-preview-04-17",
|
||||
// "gemini-2.5-flash-preview-05-20",
|
||||
}
|
||||
case "gemini-2.5-flash-lite":
|
||||
return []string{"gemini-2.5-flash-lite-preview-06-17"}
|
||||
return []string{
|
||||
// "gemini-2.5-flash-lite-preview-06-17",
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ func ConvertClaudeRequestToCLI(modelName string, inputRawJSON []byte, _ bool) []
|
||||
if len(toolCallIDs) > 1 {
|
||||
funcName = strings.Join(toolCallIDs[0:len(toolCallIDs)-1], "-")
|
||||
}
|
||||
responseData := contentResult.Get("content").String()
|
||||
responseData := contentResult.Get("content").Raw
|
||||
functionResponse := client.FunctionResponse{Name: funcName, Response: map[string]interface{}{"result": responseData}}
|
||||
clientContent.Parts = append(clientContent.Parts, client.Part{FunctionResponse: &functionResponse})
|
||||
}
|
||||
@@ -127,6 +127,7 @@ func ConvertClaudeRequestToCLI(modelName string, inputRawJSON []byte, _ bool) []
|
||||
inputSchema := inputSchemaResult.Raw
|
||||
tool, _ := sjson.Delete(toolResult.Raw, "input_schema")
|
||||
tool, _ = sjson.SetRaw(tool, "parametersJsonSchema", inputSchema)
|
||||
tool, _ = sjson.Delete(tool, "strict")
|
||||
var toolDeclaration any
|
||||
if err := json.Unmarshal([]byte(tool), &toolDeclaration); err == nil {
|
||||
tools[0].FunctionDeclarations = append(tools[0].FunctionDeclarations, toolDeclaration)
|
||||
|
||||
@@ -65,17 +65,23 @@ func ConvertOpenAIRequestToGeminiCLI(modelName string, inputRawJSON []byte, _ bo
|
||||
if tc := gjson.GetBytes(rawJSON, "extra_body.google.thinking_config"); tc.Exists() && tc.IsObject() {
|
||||
var setBudget bool
|
||||
var normalized int
|
||||
if v := tc.Get("thinking_budget"); v.Exists() {
|
||||
|
||||
if v := tc.Get("thinkingBudget"); v.Exists() {
|
||||
normalized = util.NormalizeThinkingBudget(modelName, int(v.Int()))
|
||||
out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.thinkingBudget", normalized)
|
||||
setBudget = true
|
||||
} else if v := tc.Get("thinking_budget"); v.Exists() {
|
||||
normalized = util.NormalizeThinkingBudget(modelName, int(v.Int()))
|
||||
out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.thinkingBudget", normalized)
|
||||
setBudget = true
|
||||
}
|
||||
if v := tc.Get("include_thoughts"); v.Exists() {
|
||||
|
||||
if v := tc.Get("includeThoughts"); v.Exists() {
|
||||
out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.include_thoughts", v.Bool())
|
||||
} else if setBudget {
|
||||
if normalized != 0 {
|
||||
out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.include_thoughts", true)
|
||||
}
|
||||
} else if v := tc.Get("include_thoughts"); v.Exists() {
|
||||
out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.include_thoughts", v.Bool())
|
||||
} else if setBudget && normalized != 0 {
|
||||
out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.include_thoughts", true)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -149,11 +155,7 @@ func ConvertOpenAIRequestToGeminiCLI(modelName string, inputRawJSON []byte, _ bo
|
||||
toolCallID := m.Get("tool_call_id").String()
|
||||
if toolCallID != "" {
|
||||
c := m.Get("content")
|
||||
if c.Type == gjson.String {
|
||||
toolResponses[toolCallID] = c.String()
|
||||
} else if c.IsObject() && c.Get("type").String() == "text" {
|
||||
toolResponses[toolCallID] = c.Get("text").String()
|
||||
}
|
||||
toolResponses[toolCallID] = c.Raw
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -254,7 +256,7 @@ func ConvertOpenAIRequestToGeminiCLI(modelName string, inputRawJSON []byte, _ bo
|
||||
if resp == "" {
|
||||
resp = "{}"
|
||||
}
|
||||
toolNode, _ = sjson.SetRawBytes(toolNode, "parts."+itoa(pp)+".functionResponse.response", []byte(`{"result":`+quoteIfNeeded(resp)+`}`))
|
||||
toolNode, _ = sjson.SetBytes(toolNode, "parts."+itoa(pp)+".functionResponse.response.result", []byte(resp))
|
||||
pp++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,7 +92,7 @@ func ConvertClaudeRequestToGemini(modelName string, inputRawJSON []byte, _ bool)
|
||||
if len(toolCallIDs) > 1 {
|
||||
funcName = strings.Join(toolCallIDs[0:len(toolCallIDs)-1], "-")
|
||||
}
|
||||
responseData := contentResult.Get("content").String()
|
||||
responseData := contentResult.Get("content").Raw
|
||||
functionResponse := client.FunctionResponse{Name: funcName, Response: map[string]interface{}{"result": responseData}}
|
||||
clientContent.Parts = append(clientContent.Parts, client.Part{FunctionResponse: &functionResponse})
|
||||
}
|
||||
@@ -120,6 +120,7 @@ func ConvertClaudeRequestToGemini(modelName string, inputRawJSON []byte, _ bool)
|
||||
inputSchema := inputSchemaResult.Raw
|
||||
tool, _ := sjson.Delete(toolResult.Raw, "input_schema")
|
||||
tool, _ = sjson.SetRaw(tool, "parametersJsonSchema", inputSchema)
|
||||
tool, _ = sjson.Delete(tool, "strict")
|
||||
var toolDeclaration any
|
||||
if err := json.Unmarshal([]byte(tool), &toolDeclaration); err == nil {
|
||||
tools[0].FunctionDeclarations = append(tools[0].FunctionDeclarations, toolDeclaration)
|
||||
|
||||
@@ -65,18 +65,23 @@ func ConvertOpenAIRequestToGemini(modelName string, inputRawJSON []byte, _ bool)
|
||||
if tc := gjson.GetBytes(rawJSON, "extra_body.google.thinking_config"); tc.Exists() && tc.IsObject() {
|
||||
var setBudget bool
|
||||
var normalized int
|
||||
if v := tc.Get("thinking_budget"); v.Exists() {
|
||||
// Normalize budget to model range
|
||||
|
||||
if v := tc.Get("thinkingBudget"); v.Exists() {
|
||||
normalized = util.NormalizeThinkingBudget(modelName, int(v.Int()))
|
||||
out, _ = sjson.SetBytes(out, "generationConfig.thinkingConfig.thinkingBudget", normalized)
|
||||
setBudget = true
|
||||
} else if v := tc.Get("thinking_budget"); v.Exists() {
|
||||
normalized = util.NormalizeThinkingBudget(modelName, int(v.Int()))
|
||||
out, _ = sjson.SetBytes(out, "generationConfig.thinkingConfig.thinkingBudget", normalized)
|
||||
setBudget = true
|
||||
}
|
||||
if v := tc.Get("include_thoughts"); v.Exists() {
|
||||
|
||||
if v := tc.Get("includeThoughts"); v.Exists() {
|
||||
out, _ = sjson.SetBytes(out, "generationConfig.thinkingConfig.include_thoughts", v.Bool())
|
||||
} else if setBudget {
|
||||
if normalized != 0 {
|
||||
out, _ = sjson.SetBytes(out, "generationConfig.thinkingConfig.include_thoughts", true)
|
||||
}
|
||||
} else if v := tc.Get("include_thoughts"); v.Exists() {
|
||||
out, _ = sjson.SetBytes(out, "generationConfig.thinkingConfig.include_thoughts", v.Bool())
|
||||
} else if setBudget && normalized != 0 {
|
||||
out, _ = sjson.SetBytes(out, "generationConfig.thinkingConfig.include_thoughts", true)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -150,14 +155,11 @@ func ConvertOpenAIRequestToGemini(modelName string, inputRawJSON []byte, _ bool)
|
||||
toolCallID := m.Get("tool_call_id").String()
|
||||
if toolCallID != "" {
|
||||
c := m.Get("content")
|
||||
if c.Type == gjson.String {
|
||||
toolResponses[toolCallID] = c.String()
|
||||
} else if c.IsObject() && c.Get("type").String() == "text" {
|
||||
toolResponses[toolCallID] = c.Get("text").String()
|
||||
}
|
||||
toolResponses[toolCallID] = c.Raw
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Printf("11111")
|
||||
|
||||
for i := 0; i < len(arr); i++ {
|
||||
m := arr[i]
|
||||
@@ -280,7 +282,7 @@ func ConvertOpenAIRequestToGemini(modelName string, inputRawJSON []byte, _ bool)
|
||||
if resp == "" {
|
||||
resp = "{}"
|
||||
}
|
||||
toolNode, _ = sjson.SetRawBytes(toolNode, "parts."+itoa(pp)+".functionResponse.response", []byte(`{"result":`+quoteIfNeeded(resp)+`}`))
|
||||
toolNode, _ = sjson.SetBytes(toolNode, "parts."+itoa(pp)+".functionResponse.response.result", []byte(resp))
|
||||
pp++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,17 +143,11 @@ func ConvertOpenAIResponsesRequestToGemini(modelName string, inputRawJSON []byte
|
||||
}
|
||||
|
||||
functionResponse, _ = sjson.Set(functionResponse, "functionResponse.name", functionName)
|
||||
// Also set response.name to align with docs/convert-2.md
|
||||
functionResponse, _ = sjson.Set(functionResponse, "functionResponse.response.name", functionName)
|
||||
|
||||
// Parse output JSON string and set as response content
|
||||
if output != "" {
|
||||
outputResult := gjson.Parse(output)
|
||||
if outputResult.IsObject() {
|
||||
functionResponse, _ = sjson.SetRaw(functionResponse, "functionResponse.response.content", outputResult.String())
|
||||
} else {
|
||||
functionResponse, _ = sjson.Set(functionResponse, "functionResponse.response.content", output)
|
||||
}
|
||||
functionResponse, _ = sjson.Set(functionResponse, "functionResponse.response.result", outputResult.Raw)
|
||||
}
|
||||
|
||||
functionContent, _ = sjson.SetRaw(functionContent, "parts.-1", functionResponse)
|
||||
|
||||
@@ -2,6 +2,7 @@ package responses
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/tidwall/gjson"
|
||||
"github.com/tidwall/sjson"
|
||||
)
|
||||
@@ -147,6 +148,11 @@ func ConvertOpenAIResponsesRequestToOpenAIChatCompletions(modelName string, inpu
|
||||
|
||||
return true
|
||||
})
|
||||
} else if input.Type == gjson.String {
|
||||
msg := "{}"
|
||||
msg, _ = sjson.Set(msg, "role", "user")
|
||||
msg, _ = sjson.Set(msg, "content", input.String())
|
||||
out, _ = sjson.SetRaw(out, "messages.-1", msg)
|
||||
}
|
||||
|
||||
// Convert tools from responses format to chat completions format
|
||||
|
||||
Reference in New Issue
Block a user