Compare commits
17 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f58d0faf8c | ||
|
|
df3b00621a | ||
|
|
72cb2689e8 | ||
|
|
ade279d1f2 | ||
|
|
9c5ac2927a | ||
|
|
7980f055fa | ||
|
|
eb2549a782 | ||
|
|
c419264a70 | ||
|
|
6b23e2da74 | ||
|
|
5ab0854b5b | ||
|
|
15981aa412 | ||
|
|
ac4f52c532 | ||
|
|
84fa497169 | ||
|
|
b641d90287 | ||
|
|
32d01a6a7c | ||
|
|
9ef76dcc61 | ||
|
|
4576f9915b |
34
.env.example
Normal file
34
.env.example
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
# Example environment configuration for CLIProxyAPI.
|
||||||
|
# Copy this file to `.env` and uncomment the variables you need.
|
||||||
|
#
|
||||||
|
# NOTE: Environment variables are only required when using remote storage options.
|
||||||
|
# For local file-based storage (default), no environment variables need to be set.
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Management Web UI
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# MANAGEMENT_PASSWORD=change-me-to-a-strong-password
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Postgres Token Store (optional)
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# PGSTORE_DSN=postgresql://user:pass@localhost:5432/cliproxy
|
||||||
|
# PGSTORE_SCHEMA=public
|
||||||
|
# PGSTORE_LOCAL_PATH=/var/lib/cliproxy
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Git-Backed Config Store (optional)
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# GITSTORE_GIT_URL=https://github.com/your-org/cli-proxy-config.git
|
||||||
|
# GITSTORE_GIT_USERNAME=git-user
|
||||||
|
# GITSTORE_GIT_TOKEN=ghp_your_personal_access_token
|
||||||
|
# GITSTORE_LOCAL_PATH=/data/cliproxy/gitstore
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Object Store Token Store (optional)
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# OBJECTSTORE_ENDPOINT=https://s3.your-cloud.example.com
|
||||||
|
# OBJECTSTORE_BUCKET=cli-proxy-config
|
||||||
|
# OBJECTSTORE_ACCESS_KEY=your_access_key
|
||||||
|
# OBJECTSTORE_SECRET_KEY=your_secret_key
|
||||||
|
# OBJECTSTORE_LOCAL_PATH=/data/cliproxy/objectstore
|
||||||
34
.gitignore
vendored
34
.gitignore
vendored
@@ -1,20 +1,32 @@
|
|||||||
|
# Binaries
|
||||||
|
cli-proxy-api
|
||||||
|
*.exe
|
||||||
|
|
||||||
|
# Configuration
|
||||||
config.yaml
|
config.yaml
|
||||||
|
.env
|
||||||
|
|
||||||
|
# Generated content
|
||||||
bin/*
|
bin/*
|
||||||
docs/*
|
|
||||||
logs/*
|
logs/*
|
||||||
conv/*
|
conv/*
|
||||||
|
temp/*
|
||||||
|
pgstore/*
|
||||||
|
gitstore/*
|
||||||
|
objectstore/*
|
||||||
|
static/*
|
||||||
|
|
||||||
|
# Authentication data
|
||||||
auths/*
|
auths/*
|
||||||
!auths/.gitkeep
|
!auths/.gitkeep
|
||||||
.vscode/*
|
|
||||||
.claude/*
|
# Documentation
|
||||||
.serena/*
|
docs/*
|
||||||
AGENTS.md
|
AGENTS.md
|
||||||
CLAUDE.md
|
CLAUDE.md
|
||||||
GEMINI.md
|
GEMINI.md
|
||||||
*.exe
|
|
||||||
temp/*
|
# Tooling metadata
|
||||||
cli-proxy-api
|
.vscode/*
|
||||||
static/*
|
.claude/*
|
||||||
.env
|
.serena/*
|
||||||
pgstore/*
|
|
||||||
gitstore/*
|
|
||||||
|
|||||||
21
README.md
21
README.md
@@ -456,6 +456,27 @@ You can also persist configuration and authentication data in PostgreSQL when ru
|
|||||||
3. **Bootstrapping:** If no configuration row exists, `config.example.yaml` seeds the database using the fixed identifier `config`.
|
3. **Bootstrapping:** If no configuration row exists, `config.example.yaml` seeds the database using the fixed identifier `config`.
|
||||||
4. **Token Sync:** Changes flow both ways—file updates are written to PostgreSQL and database records are mirrored back to disk so watchers and management APIs continue to operate.
|
4. **Token Sync:** Changes flow both ways—file updates are written to PostgreSQL and database records are mirrored back to disk so watchers and management APIs continue to operate.
|
||||||
|
|
||||||
|
### Object Storage-backed Configuration and Token Store
|
||||||
|
|
||||||
|
An S3-compatible object storage service can host configuration and authentication records.
|
||||||
|
|
||||||
|
**Environment Variables**
|
||||||
|
|
||||||
|
| Variable | Required | Default | Description |
|
||||||
|
|--------------------------|----------|--------------------------------|--------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `OBJECTSTORE_ENDPOINT` | Yes | | Object storage endpoint. Include `http://` or `https://` to force the protocol (omitted scheme → HTTPS). |
|
||||||
|
| `OBJECTSTORE_BUCKET` | Yes | | Bucket that stores `config/config.yaml` and `auths/*.json`. |
|
||||||
|
| `OBJECTSTORE_ACCESS_KEY` | Yes | | Access key ID for the object storage account. |
|
||||||
|
| `OBJECTSTORE_SECRET_KEY` | Yes | | Secret key for the object storage account. |
|
||||||
|
| `OBJECTSTORE_LOCAL_PATH` | No | Current working directory | Root directory for the local mirror; the server writes to `<value>/objectstore`. If unset, defaults to current CWD. |
|
||||||
|
|
||||||
|
**How it Works**
|
||||||
|
|
||||||
|
1. **Startup:** The endpoint is parsed (respecting any scheme prefix), a MinIO-compatible client is created in path-style mode, and the bucket is created when missing.
|
||||||
|
2. **Local Mirror:** A writable cache at `<OBJECTSTORE_LOCAL_PATH or CWD>/objectstore` mirrors `config/config.yaml` and `auths/`.
|
||||||
|
3. **Bootstrapping:** When `config/config.yaml` is absent in the bucket, the server copies `config.example.yaml`, uploads it, and uses it as the initial configuration.
|
||||||
|
4. **Sync:** Changes to configuration or auth files are uploaded to the bucket, and remote updates are mirrored back to disk, keeping watchers and management APIs in sync.
|
||||||
|
|
||||||
### OpenAI Compatibility Providers
|
### OpenAI Compatibility Providers
|
||||||
|
|
||||||
Configure upstream OpenAI-compatible providers (e.g., OpenRouter) via `openai-compatibility`.
|
Configure upstream OpenAI-compatible providers (e.g., OpenRouter) via `openai-compatibility`.
|
||||||
|
|||||||
21
README_CN.md
21
README_CN.md
@@ -469,6 +469,27 @@ openai-compatibility:
|
|||||||
3. **引导:** 若数据库中无配置记录,会使用 `config.example.yaml` 初始化,并以固定标识 `config` 写入。
|
3. **引导:** 若数据库中无配置记录,会使用 `config.example.yaml` 初始化,并以固定标识 `config` 写入。
|
||||||
4. **令牌同步:** 配置与令牌的更改会写入 PostgreSQL,同时数据库中的内容也会反向同步至本地镜像,便于文件监听与管理接口继续工作。
|
4. **令牌同步:** 配置与令牌的更改会写入 PostgreSQL,同时数据库中的内容也会反向同步至本地镜像,便于文件监听与管理接口继续工作。
|
||||||
|
|
||||||
|
### 对象存储驱动的配置与令牌存储
|
||||||
|
|
||||||
|
可以选择使用 S3 兼容的对象存储来托管配置与鉴权数据。
|
||||||
|
|
||||||
|
**环境变量**
|
||||||
|
|
||||||
|
| 变量 | 是否必填 | 默认值 | 说明 |
|
||||||
|
|--------------------------|----------|--------------------|--------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `OBJECTSTORE_ENDPOINT` | 是 | | 对象存储访问端点。可带 `http://` 或 `https://` 前缀指定协议(省略则默认 HTTPS)。 |
|
||||||
|
| `OBJECTSTORE_BUCKET` | 是 | | 用于存放 `config/config.yaml` 与 `auths/*.json` 的 Bucket 名称。 |
|
||||||
|
| `OBJECTSTORE_ACCESS_KEY` | 是 | | 对象存储账号的访问密钥 ID。 |
|
||||||
|
| `OBJECTSTORE_SECRET_KEY` | 是 | | 对象存储账号的访问密钥 Secret。 |
|
||||||
|
| `OBJECTSTORE_LOCAL_PATH` | 否 | 当前工作目录 (CWD) | 本地镜像根目录;服务会写入到 `<值>/objectstore`。 |
|
||||||
|
|
||||||
|
**工作流程**
|
||||||
|
|
||||||
|
1. **启动阶段:** 解析端点地址(识别协议前缀),创建 MinIO 兼容客户端并使用 Path-Style 模式,如 Bucket 不存在会自动创建。
|
||||||
|
2. **本地镜像:** 在 `<OBJECTSTORE_LOCAL_PATH 或当前工作目录>/objectstore` 维护可写缓存,同步 `config/config.yaml` 与 `auths/`。
|
||||||
|
3. **初始化:** 若 Bucket 中缺少配置文件,将以 `config.example.yaml` 为模板生成 `config/config.yaml` 并上传。
|
||||||
|
4. **双向同步:** 本地变更会上传到对象存储,同时远端对象也会拉回到本地,保证文件监听、管理 API 与 CLI 命令行为一致。
|
||||||
|
|
||||||
### OpenAI 兼容上游提供商
|
### OpenAI 兼容上游提供商
|
||||||
|
|
||||||
通过 `openai-compatibility` 配置上游 OpenAI 兼容提供商(例如 OpenRouter)。
|
通过 `openai-compatibility` 配置上游 OpenAI 兼容提供商(例如 OpenRouter)。
|
||||||
|
|||||||
@@ -9,11 +9,13 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/joho/godotenv"
|
||||||
configaccess "github.com/router-for-me/CLIProxyAPI/v6/internal/access/config_access"
|
configaccess "github.com/router-for-me/CLIProxyAPI/v6/internal/access/config_access"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/cmd"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/cmd"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
@@ -102,18 +104,25 @@ func main() {
|
|||||||
var cfg *config.Config
|
var cfg *config.Config
|
||||||
var isCloudDeploy bool
|
var isCloudDeploy bool
|
||||||
var (
|
var (
|
||||||
usePostgresStore bool
|
usePostgresStore bool
|
||||||
pgStoreDSN string
|
pgStoreDSN string
|
||||||
pgStoreSchema string
|
pgStoreSchema string
|
||||||
pgStoreLocalPath string
|
pgStoreLocalPath string
|
||||||
pgStoreInst *store.PostgresStore
|
pgStoreInst *store.PostgresStore
|
||||||
gitStoreLocalPath string
|
useGitStore bool
|
||||||
useGitStore bool
|
gitStoreRemoteURL string
|
||||||
gitStoreRemoteURL string
|
gitStoreUser string
|
||||||
gitStoreUser string
|
gitStorePassword string
|
||||||
gitStorePassword string
|
gitStoreLocalPath string
|
||||||
gitStoreInst *store.GitTokenStore
|
gitStoreInst *store.GitTokenStore
|
||||||
gitStoreRoot string
|
gitStoreRoot string
|
||||||
|
useObjectStore bool
|
||||||
|
objectStoreEndpoint string
|
||||||
|
objectStoreAccess string
|
||||||
|
objectStoreSecret string
|
||||||
|
objectStoreBucket string
|
||||||
|
objectStoreLocalPath string
|
||||||
|
objectStoreInst *store.ObjectTokenStore
|
||||||
)
|
)
|
||||||
|
|
||||||
wd, err := os.Getwd()
|
wd, err := os.Getwd()
|
||||||
@@ -121,6 +130,13 @@ func main() {
|
|||||||
log.Fatalf("failed to get working directory: %v", err)
|
log.Fatalf("failed to get working directory: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Load environment variables from .env if present.
|
||||||
|
if errLoad := godotenv.Load(filepath.Join(wd, ".env")); errLoad != nil {
|
||||||
|
if !errors.Is(errLoad, os.ErrNotExist) {
|
||||||
|
log.WithError(errLoad).Warn("failed to load .env file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
lookupEnv := func(keys ...string) (string, bool) {
|
lookupEnv := func(keys ...string) (string, bool) {
|
||||||
for _, key := range keys {
|
for _, key := range keys {
|
||||||
if value, ok := os.LookupEnv(key); ok {
|
if value, ok := os.LookupEnv(key); ok {
|
||||||
@@ -157,6 +173,22 @@ func main() {
|
|||||||
if value, ok := lookupEnv("GITSTORE_LOCAL_PATH", "gitstore_local_path"); ok {
|
if value, ok := lookupEnv("GITSTORE_LOCAL_PATH", "gitstore_local_path"); ok {
|
||||||
gitStoreLocalPath = value
|
gitStoreLocalPath = value
|
||||||
}
|
}
|
||||||
|
if value, ok := lookupEnv("OBJECTSTORE_ENDPOINT", "objectstore_endpoint"); ok {
|
||||||
|
useObjectStore = true
|
||||||
|
objectStoreEndpoint = value
|
||||||
|
}
|
||||||
|
if value, ok := lookupEnv("OBJECTSTORE_ACCESS_KEY", "objectstore_access_key"); ok {
|
||||||
|
objectStoreAccess = value
|
||||||
|
}
|
||||||
|
if value, ok := lookupEnv("OBJECTSTORE_SECRET_KEY", "objectstore_secret_key"); ok {
|
||||||
|
objectStoreSecret = value
|
||||||
|
}
|
||||||
|
if value, ok := lookupEnv("OBJECTSTORE_BUCKET", "objectstore_bucket"); ok {
|
||||||
|
objectStoreBucket = value
|
||||||
|
}
|
||||||
|
if value, ok := lookupEnv("OBJECTSTORE_LOCAL_PATH", "objectstore_local_path"); ok {
|
||||||
|
objectStoreLocalPath = value
|
||||||
|
}
|
||||||
|
|
||||||
// Check for cloud deploy mode only on first execution
|
// Check for cloud deploy mode only on first execution
|
||||||
// Read env var name in uppercase: DEPLOY
|
// Read env var name in uppercase: DEPLOY
|
||||||
@@ -196,6 +228,65 @@ func main() {
|
|||||||
cfg.AuthDir = pgStoreInst.AuthDir()
|
cfg.AuthDir = pgStoreInst.AuthDir()
|
||||||
log.Infof("postgres-backed token store enabled, workspace path: %s", pgStoreInst.WorkDir())
|
log.Infof("postgres-backed token store enabled, workspace path: %s", pgStoreInst.WorkDir())
|
||||||
}
|
}
|
||||||
|
} else if useObjectStore {
|
||||||
|
objectStoreRoot := objectStoreLocalPath
|
||||||
|
if objectStoreRoot == "" {
|
||||||
|
objectStoreRoot = wd
|
||||||
|
}
|
||||||
|
objectStoreRoot = filepath.Join(objectStoreRoot, "objectstore")
|
||||||
|
resolvedEndpoint := strings.TrimSpace(objectStoreEndpoint)
|
||||||
|
useSSL := true
|
||||||
|
if strings.Contains(resolvedEndpoint, "://") {
|
||||||
|
parsed, errParse := url.Parse(resolvedEndpoint)
|
||||||
|
if errParse != nil {
|
||||||
|
log.Fatalf("failed to parse object store endpoint %q: %v", objectStoreEndpoint, errParse)
|
||||||
|
}
|
||||||
|
switch strings.ToLower(parsed.Scheme) {
|
||||||
|
case "http":
|
||||||
|
useSSL = false
|
||||||
|
case "https":
|
||||||
|
useSSL = true
|
||||||
|
default:
|
||||||
|
log.Fatalf("unsupported object store scheme %q (only http and https are allowed)", parsed.Scheme)
|
||||||
|
}
|
||||||
|
if parsed.Host == "" {
|
||||||
|
log.Fatalf("object store endpoint %q is missing host information", objectStoreEndpoint)
|
||||||
|
}
|
||||||
|
resolvedEndpoint = parsed.Host
|
||||||
|
if parsed.Path != "" && parsed.Path != "/" {
|
||||||
|
resolvedEndpoint = strings.TrimSuffix(parsed.Host+parsed.Path, "/")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resolvedEndpoint = strings.TrimRight(resolvedEndpoint, "/")
|
||||||
|
objCfg := store.ObjectStoreConfig{
|
||||||
|
Endpoint: resolvedEndpoint,
|
||||||
|
Bucket: objectStoreBucket,
|
||||||
|
AccessKey: objectStoreAccess,
|
||||||
|
SecretKey: objectStoreSecret,
|
||||||
|
LocalRoot: objectStoreRoot,
|
||||||
|
UseSSL: useSSL,
|
||||||
|
PathStyle: true,
|
||||||
|
}
|
||||||
|
objectStoreInst, err = store.NewObjectTokenStore(objCfg)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to initialize object token store: %v", err)
|
||||||
|
}
|
||||||
|
examplePath := filepath.Join(wd, "config.example.yaml")
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
if errBootstrap := objectStoreInst.Bootstrap(ctx, examplePath); errBootstrap != nil {
|
||||||
|
cancel()
|
||||||
|
log.Fatalf("failed to bootstrap object-backed config: %v", errBootstrap)
|
||||||
|
}
|
||||||
|
cancel()
|
||||||
|
configFilePath = objectStoreInst.ConfigPath()
|
||||||
|
cfg, err = config.LoadConfigOptional(configFilePath, isCloudDeploy)
|
||||||
|
if err == nil {
|
||||||
|
if cfg == nil {
|
||||||
|
cfg = &config.Config{}
|
||||||
|
}
|
||||||
|
cfg.AuthDir = objectStoreInst.AuthDir()
|
||||||
|
log.Infof("object-backed token store enabled, bucket: %s", objectStoreBucket)
|
||||||
|
}
|
||||||
} else if useGitStore {
|
} else if useGitStore {
|
||||||
if gitStoreLocalPath == "" {
|
if gitStoreLocalPath == "" {
|
||||||
gitStoreLocalPath = wd
|
gitStoreLocalPath = wd
|
||||||
@@ -294,6 +385,8 @@ func main() {
|
|||||||
// Register the shared token store once so all components use the same persistence backend.
|
// Register the shared token store once so all components use the same persistence backend.
|
||||||
if usePostgresStore {
|
if usePostgresStore {
|
||||||
sdkAuth.RegisterTokenStore(pgStoreInst)
|
sdkAuth.RegisterTokenStore(pgStoreInst)
|
||||||
|
} else if useObjectStore {
|
||||||
|
sdkAuth.RegisterTokenStore(objectStoreInst)
|
||||||
} else if useGitStore {
|
} else if useGitStore {
|
||||||
sdkAuth.RegisterTokenStore(gitStoreInst)
|
sdkAuth.RegisterTokenStore(gitStoreInst)
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -10,6 +10,8 @@ services:
|
|||||||
COMMIT: ${COMMIT:-none}
|
COMMIT: ${COMMIT:-none}
|
||||||
BUILD_DATE: ${BUILD_DATE:-unknown}
|
BUILD_DATE: ${BUILD_DATE:-unknown}
|
||||||
container_name: cli-proxy-api
|
container_name: cli-proxy-api
|
||||||
|
# env_file:
|
||||||
|
# - .env
|
||||||
environment:
|
environment:
|
||||||
DEPLOY: ${DEPLOY:-}
|
DEPLOY: ${DEPLOY:-}
|
||||||
ports:
|
ports:
|
||||||
|
|||||||
9
go.mod
9
go.mod
@@ -7,8 +7,10 @@ require (
|
|||||||
github.com/gin-gonic/gin v1.10.1
|
github.com/gin-gonic/gin v1.10.1
|
||||||
github.com/go-git/go-git/v6 v6.0.0-20251009132922-75a182125145
|
github.com/go-git/go-git/v6 v6.0.0-20251009132922-75a182125145
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
|
github.com/joho/godotenv v1.5.1
|
||||||
github.com/jackc/pgx/v5 v5.7.6
|
github.com/jackc/pgx/v5 v5.7.6
|
||||||
github.com/klauspost/compress v1.17.3
|
github.com/klauspost/compress v1.17.4
|
||||||
|
github.com/minio/minio-go/v7 v7.0.66
|
||||||
github.com/sirupsen/logrus v1.9.3
|
github.com/sirupsen/logrus v1.9.3
|
||||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
|
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
|
||||||
github.com/tidwall/gjson v1.18.0
|
github.com/tidwall/gjson v1.18.0
|
||||||
@@ -30,6 +32,7 @@ require (
|
|||||||
github.com/cloudwego/base64x v0.1.4 // indirect
|
github.com/cloudwego/base64x v0.1.4 // indirect
|
||||||
github.com/cloudwego/iasm v0.2.0 // indirect
|
github.com/cloudwego/iasm v0.2.0 // indirect
|
||||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||||
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/emirpasic/gods v1.18.1 // indirect
|
github.com/emirpasic/gods v1.18.1 // indirect
|
||||||
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
||||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||||
@@ -48,10 +51,13 @@ require (
|
|||||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||||
github.com/leodido/go-urn v1.4.0 // indirect
|
github.com/leodido/go-urn v1.4.0 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
|
github.com/minio/md5-simd v1.1.2 // indirect
|
||||||
|
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||||
github.com/pjbgf/sha1cd v0.5.0 // indirect
|
github.com/pjbgf/sha1cd v0.5.0 // indirect
|
||||||
|
github.com/rs/xid v1.5.0 // indirect
|
||||||
github.com/sergi/go-diff v1.4.0 // indirect
|
github.com/sergi/go-diff v1.4.0 // indirect
|
||||||
github.com/tidwall/match v1.1.1 // indirect
|
github.com/tidwall/match v1.1.1 // indirect
|
||||||
github.com/tidwall/pretty v1.2.0 // indirect
|
github.com/tidwall/pretty v1.2.0 // indirect
|
||||||
@@ -62,4 +68,5 @@ require (
|
|||||||
golang.org/x/sys v0.37.0 // indirect
|
golang.org/x/sys v0.37.0 // indirect
|
||||||
golang.org/x/text v0.30.0 // indirect
|
golang.org/x/text v0.30.0 // indirect
|
||||||
google.golang.org/protobuf v1.34.1 // indirect
|
google.golang.org/protobuf v1.34.1 // indirect
|
||||||
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
17
go.sum
17
go.sum
@@ -23,6 +23,8 @@ github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGL
|
|||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||||
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o=
|
github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o=
|
||||||
github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=
|
github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=
|
||||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
||||||
@@ -70,12 +72,17 @@ github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk=
|
|||||||
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
|
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
|
||||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||||
|
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
||||||
|
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||||
github.com/kevinburke/ssh_config v1.4.0 h1:6xxtP5bZ2E4NF5tuQulISpTO2z8XbtH8cg1PWkxoFkQ=
|
github.com/kevinburke/ssh_config v1.4.0 h1:6xxtP5bZ2E4NF5tuQulISpTO2z8XbtH8cg1PWkxoFkQ=
|
||||||
github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M=
|
github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M=
|
||||||
github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA=
|
github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA=
|
||||||
github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||||
|
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
|
||||||
|
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||||
@@ -90,6 +97,12 @@ github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
|||||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
|
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||||
|
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||||
|
github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw=
|
||||||
|
github.com/minio/minio-go/v7 v7.0.66/go.mod h1:DHAgmyQEGdW3Cif0UooKOyrT3Vxs82zNdV6tkKhRtbs=
|
||||||
|
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||||
|
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
@@ -103,6 +116,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
|||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||||
|
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
|
||||||
|
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||||
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
|
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
|
||||||
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||||
@@ -163,6 +178,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
|
|||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
|
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||||
|
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
|||||||
@@ -1150,126 +1150,50 @@ func (h *Handler) RequestIFlowToken(c *gin.Context) {
|
|||||||
c.JSON(http.StatusInternalServerError, gin.H{"status": "error", "error": "failed to start callback server"})
|
c.JSON(http.StatusInternalServerError, gin.H{"status": "error", "error": "failed to start callback server"})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer stopCallbackForwarder(iflowauth.CallbackPort)
|
|
||||||
fmt.Println("Waiting for authentication...")
|
|
||||||
|
|
||||||
waitFile := filepath.Join(h.cfg.AuthDir, fmt.Sprintf(".oauth-iflow-%s.oauth", state))
|
|
||||||
deadline := time.Now().Add(5 * time.Minute)
|
|
||||||
var resultMap map[string]string
|
|
||||||
for {
|
|
||||||
if time.Now().After(deadline) {
|
|
||||||
oauthStatus[state] = "Authentication failed"
|
|
||||||
fmt.Println("Authentication failed: timeout waiting for callback")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if data, errR := os.ReadFile(waitFile); errR == nil {
|
|
||||||
_ = os.Remove(waitFile)
|
|
||||||
_ = json.Unmarshal(data, &resultMap)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(500 * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
if errStr := strings.TrimSpace(resultMap["error"]); errStr != "" {
|
|
||||||
oauthStatus[state] = "Authentication failed"
|
|
||||||
fmt.Printf("Authentication failed: %s\n", errStr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if resultState := strings.TrimSpace(resultMap["state"]); resultState != state {
|
|
||||||
oauthStatus[state] = "Authentication failed"
|
|
||||||
fmt.Println("Authentication failed: state mismatch")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
code := strings.TrimSpace(resultMap["code"])
|
|
||||||
if code == "" {
|
|
||||||
oauthStatus[state] = "Authentication failed"
|
|
||||||
fmt.Println("Authentication failed: code missing")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
tokenData, errExchange := authSvc.ExchangeCodeForTokens(ctx, code, redirectURI)
|
|
||||||
if errExchange != nil {
|
|
||||||
oauthStatus[state] = "Authentication failed"
|
|
||||||
fmt.Printf("Authentication failed: %v\n", errExchange)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
tokenStorage := authSvc.CreateTokenStorage(tokenData)
|
|
||||||
identifier := strings.TrimSpace(tokenStorage.Email)
|
|
||||||
if identifier == "" {
|
|
||||||
identifier = fmt.Sprintf("iflow-%d", time.Now().UnixMilli())
|
|
||||||
tokenStorage.Email = identifier
|
|
||||||
}
|
|
||||||
record := &coreauth.Auth{
|
|
||||||
ID: fmt.Sprintf("iflow-%s.json", identifier),
|
|
||||||
Provider: "iflow",
|
|
||||||
FileName: fmt.Sprintf("iflow-%s.json", identifier),
|
|
||||||
Storage: tokenStorage,
|
|
||||||
Metadata: map[string]any{"email": identifier, "api_key": tokenStorage.APIKey},
|
|
||||||
Attributes: map[string]string{"api_key": tokenStorage.APIKey},
|
|
||||||
}
|
|
||||||
|
|
||||||
savedPath, errSave := h.saveTokenRecord(ctx, record)
|
|
||||||
if errSave != nil {
|
|
||||||
oauthStatus[state] = "Failed to save authentication tokens"
|
|
||||||
log.Fatalf("Failed to save authentication tokens: %v", errSave)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("Authentication successful! Token saved to %s\n", savedPath)
|
|
||||||
if tokenStorage.APIKey != "" {
|
|
||||||
fmt.Println("API key obtained and saved")
|
|
||||||
}
|
|
||||||
fmt.Println("You can now use iFlow services through this CLI")
|
|
||||||
delete(oauthStatus, state)
|
|
||||||
}()
|
|
||||||
|
|
||||||
oauthStatus[state] = ""
|
|
||||||
c.JSON(http.StatusOK, gin.H{"status": "ok", "url": authURL, "state": state})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
oauthServer := iflowauth.NewOAuthServer(iflowauth.CallbackPort)
|
|
||||||
if err := oauthServer.Start(); err != nil {
|
|
||||||
oauthStatus[state] = "Failed to start authentication server"
|
|
||||||
log.Errorf("Failed to start iFlow OAuth server: %v", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"status": "error", "error": "failed to start local oauth server"})
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
if isWebUI {
|
||||||
|
defer stopCallbackForwarder(iflowauth.CallbackPort)
|
||||||
|
}
|
||||||
fmt.Println("Waiting for authentication...")
|
fmt.Println("Waiting for authentication...")
|
||||||
defer func() {
|
|
||||||
stopCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
waitFile := filepath.Join(h.cfg.AuthDir, fmt.Sprintf(".oauth-iflow-%s.oauth", state))
|
||||||
defer cancel()
|
deadline := time.Now().Add(5 * time.Minute)
|
||||||
if err := oauthServer.Stop(stopCtx); err != nil {
|
var resultMap map[string]string
|
||||||
log.Warnf("Failed to stop iFlow OAuth server: %v", err)
|
for {
|
||||||
|
if time.Now().After(deadline) {
|
||||||
|
oauthStatus[state] = "Authentication failed"
|
||||||
|
fmt.Println("Authentication failed: timeout waiting for callback")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}()
|
if data, errR := os.ReadFile(waitFile); errR == nil {
|
||||||
|
_ = os.Remove(waitFile)
|
||||||
result, err := oauthServer.WaitForCallback(5 * time.Minute)
|
_ = json.Unmarshal(data, &resultMap)
|
||||||
if err != nil {
|
break
|
||||||
oauthStatus[state] = "Authentication failed"
|
}
|
||||||
fmt.Printf("Authentication failed: %v\n", err)
|
time.Sleep(500 * time.Millisecond)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Error != "" {
|
if errStr := strings.TrimSpace(resultMap["error"]); errStr != "" {
|
||||||
oauthStatus[state] = "Authentication failed"
|
oauthStatus[state] = "Authentication failed"
|
||||||
fmt.Printf("Authentication failed: %s\n", result.Error)
|
fmt.Printf("Authentication failed: %s\n", errStr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if resultState := strings.TrimSpace(resultMap["state"]); resultState != state {
|
||||||
if result.State != state {
|
|
||||||
oauthStatus[state] = "Authentication failed"
|
oauthStatus[state] = "Authentication failed"
|
||||||
fmt.Println("Authentication failed: state mismatch")
|
fmt.Println("Authentication failed: state mismatch")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
tokenData, errExchange := authSvc.ExchangeCodeForTokens(ctx, result.Code, redirectURI)
|
code := strings.TrimSpace(resultMap["code"])
|
||||||
|
if code == "" {
|
||||||
|
oauthStatus[state] = "Authentication failed"
|
||||||
|
fmt.Println("Authentication failed: code missing")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenData, errExchange := authSvc.ExchangeCodeForTokens(ctx, code, redirectURI)
|
||||||
if errExchange != nil {
|
if errExchange != nil {
|
||||||
oauthStatus[state] = "Authentication failed"
|
oauthStatus[state] = "Authentication failed"
|
||||||
fmt.Printf("Authentication failed: %v\n", errExchange)
|
fmt.Printf("Authentication failed: %v\n", errExchange)
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@@ -37,6 +38,7 @@ type Handler struct {
|
|||||||
localPassword string
|
localPassword string
|
||||||
allowRemoteOverride bool
|
allowRemoteOverride bool
|
||||||
envSecret string
|
envSecret string
|
||||||
|
logDir string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewHandler creates a new management handler instance.
|
// NewHandler creates a new management handler instance.
|
||||||
@@ -68,6 +70,19 @@ func (h *Handler) SetUsageStatistics(stats *usage.RequestStatistics) { h.usageSt
|
|||||||
// SetLocalPassword configures the runtime-local password accepted for localhost requests.
|
// SetLocalPassword configures the runtime-local password accepted for localhost requests.
|
||||||
func (h *Handler) SetLocalPassword(password string) { h.localPassword = password }
|
func (h *Handler) SetLocalPassword(password string) { h.localPassword = password }
|
||||||
|
|
||||||
|
// SetLogDirectory updates the directory where main.log should be looked up.
|
||||||
|
func (h *Handler) SetLogDirectory(dir string) {
|
||||||
|
if dir == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !filepath.IsAbs(dir) {
|
||||||
|
if abs, err := filepath.Abs(dir); err == nil {
|
||||||
|
dir = abs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h.logDir = dir
|
||||||
|
}
|
||||||
|
|
||||||
// Middleware enforces access control for management endpoints.
|
// Middleware enforces access control for management endpoints.
|
||||||
// All requests (local and remote) require a valid management key.
|
// All requests (local and remote) require a valid management key.
|
||||||
// Additionally, remote access requires allow-remote-management=true.
|
// Additionally, remote access requires allow-remote-management=true.
|
||||||
|
|||||||
344
internal/api/handlers/management/logs.go
Normal file
344
internal/api/handlers/management/logs.go
Normal file
@@ -0,0 +1,344 @@
|
|||||||
|
package management
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultLogFileName = "main.log"
|
||||||
|
logScannerInitialBuffer = 64 * 1024
|
||||||
|
logScannerMaxBuffer = 8 * 1024 * 1024
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetLogs returns log lines with optional incremental loading.
|
||||||
|
func (h *Handler) GetLogs(c *gin.Context) {
|
||||||
|
if h == nil {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "handler unavailable"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if h.cfg == nil {
|
||||||
|
c.JSON(http.StatusServiceUnavailable, gin.H{"error": "configuration unavailable"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !h.cfg.LoggingToFile {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": "logging to file disabled"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
logDir := h.logDirectory()
|
||||||
|
if strings.TrimSpace(logDir) == "" {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "log directory not configured"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
files, err := h.collectLogFiles(logDir)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
cutoff := parseCutoff(c.Query("after"))
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"lines": []string{},
|
||||||
|
"line-count": 0,
|
||||||
|
"latest-timestamp": cutoff,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to list log files: %v", err)})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cutoff := parseCutoff(c.Query("after"))
|
||||||
|
acc := newLogAccumulator(cutoff)
|
||||||
|
for i := range files {
|
||||||
|
if errProcess := acc.consumeFile(files[i]); errProcess != nil {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to read log file %s: %v", files[i], errProcess)})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lines, total, latest := acc.result()
|
||||||
|
if latest == 0 || latest < cutoff {
|
||||||
|
latest = cutoff
|
||||||
|
}
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"lines": lines,
|
||||||
|
"line-count": total,
|
||||||
|
"latest-timestamp": latest,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteLogs removes all rotated log files and truncates the active log.
|
||||||
|
func (h *Handler) DeleteLogs(c *gin.Context) {
|
||||||
|
if h == nil {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "handler unavailable"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if h.cfg == nil {
|
||||||
|
c.JSON(http.StatusServiceUnavailable, gin.H{"error": "configuration unavailable"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !h.cfg.LoggingToFile {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": "logging to file disabled"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := h.logDirectory()
|
||||||
|
if strings.TrimSpace(dir) == "" {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "log directory not configured"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
entries, err := os.ReadDir(dir)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
c.JSON(http.StatusNotFound, gin.H{"error": "log directory not found"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to list log directory: %v", err)})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
removed := 0
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := entry.Name()
|
||||||
|
fullPath := filepath.Join(dir, name)
|
||||||
|
if name == defaultLogFileName {
|
||||||
|
if errTrunc := os.Truncate(fullPath, 0); errTrunc != nil && !os.IsNotExist(errTrunc) {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to truncate log file: %v", errTrunc)})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if isRotatedLogFile(name) {
|
||||||
|
if errRemove := os.Remove(fullPath); errRemove != nil && !os.IsNotExist(errRemove) {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to remove %s: %v", name, errRemove)})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
removed++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"success": true,
|
||||||
|
"message": "Logs cleared successfully",
|
||||||
|
"removed": removed,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) logDirectory() string {
|
||||||
|
if h == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if h.logDir != "" {
|
||||||
|
return h.logDir
|
||||||
|
}
|
||||||
|
if h.configFilePath != "" {
|
||||||
|
dir := filepath.Dir(h.configFilePath)
|
||||||
|
if dir != "" && dir != "." {
|
||||||
|
return filepath.Join(dir, "logs")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "logs"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) collectLogFiles(dir string) ([]string, error) {
|
||||||
|
entries, err := os.ReadDir(dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
type candidate struct {
|
||||||
|
path string
|
||||||
|
order int64
|
||||||
|
}
|
||||||
|
cands := make([]candidate, 0, len(entries))
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := entry.Name()
|
||||||
|
if name == defaultLogFileName {
|
||||||
|
cands = append(cands, candidate{path: filepath.Join(dir, name), order: 0})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if order, ok := rotationOrder(name); ok {
|
||||||
|
cands = append(cands, candidate{path: filepath.Join(dir, name), order: order})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(cands) == 0 {
|
||||||
|
return []string{}, nil
|
||||||
|
}
|
||||||
|
sort.Slice(cands, func(i, j int) bool { return cands[i].order < cands[j].order })
|
||||||
|
paths := make([]string, 0, len(cands))
|
||||||
|
for i := len(cands) - 1; i >= 0; i-- {
|
||||||
|
paths = append(paths, cands[i].path)
|
||||||
|
}
|
||||||
|
return paths, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type logAccumulator struct {
|
||||||
|
cutoff int64
|
||||||
|
lines []string
|
||||||
|
total int
|
||||||
|
latest int64
|
||||||
|
include bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLogAccumulator(cutoff int64) *logAccumulator {
|
||||||
|
return &logAccumulator{
|
||||||
|
cutoff: cutoff,
|
||||||
|
lines: make([]string, 0, 256),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (acc *logAccumulator) consumeFile(path string) error {
|
||||||
|
file, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
buf := make([]byte, 0, logScannerInitialBuffer)
|
||||||
|
scanner.Buffer(buf, logScannerMaxBuffer)
|
||||||
|
for scanner.Scan() {
|
||||||
|
acc.addLine(scanner.Text())
|
||||||
|
}
|
||||||
|
if errScan := scanner.Err(); errScan != nil {
|
||||||
|
return errScan
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (acc *logAccumulator) addLine(raw string) {
|
||||||
|
line := strings.TrimRight(raw, "\r")
|
||||||
|
acc.total++
|
||||||
|
ts := parseTimestamp(line)
|
||||||
|
if ts > acc.latest {
|
||||||
|
acc.latest = ts
|
||||||
|
}
|
||||||
|
if ts > 0 {
|
||||||
|
acc.include = acc.cutoff == 0 || ts > acc.cutoff
|
||||||
|
if acc.cutoff == 0 || acc.include {
|
||||||
|
acc.lines = append(acc.lines, line)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if acc.cutoff == 0 || acc.include {
|
||||||
|
acc.lines = append(acc.lines, line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (acc *logAccumulator) result() ([]string, int, int64) {
|
||||||
|
if acc.lines == nil {
|
||||||
|
acc.lines = []string{}
|
||||||
|
}
|
||||||
|
return acc.lines, acc.total, acc.latest
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseCutoff(raw string) int64 {
|
||||||
|
value := strings.TrimSpace(raw)
|
||||||
|
if value == "" {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
ts, err := strconv.ParseInt(value, 10, 64)
|
||||||
|
if err != nil || ts <= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseTimestamp(line string) int64 {
|
||||||
|
if strings.HasPrefix(line, "[") {
|
||||||
|
line = line[1:]
|
||||||
|
}
|
||||||
|
if len(line) < 19 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
candidate := line[:19]
|
||||||
|
t, err := time.ParseInLocation("2006-01-02 15:04:05", candidate, time.Local)
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return t.Unix()
|
||||||
|
}
|
||||||
|
|
||||||
|
func isRotatedLogFile(name string) bool {
|
||||||
|
if _, ok := rotationOrder(name); ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func rotationOrder(name string) (int64, bool) {
|
||||||
|
if order, ok := numericRotationOrder(name); ok {
|
||||||
|
return order, true
|
||||||
|
}
|
||||||
|
if order, ok := timestampRotationOrder(name); ok {
|
||||||
|
return order, true
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func numericRotationOrder(name string) (int64, bool) {
|
||||||
|
if !strings.HasPrefix(name, defaultLogFileName+".") {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
suffix := strings.TrimPrefix(name, defaultLogFileName+".")
|
||||||
|
if suffix == "" {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
n, err := strconv.Atoi(suffix)
|
||||||
|
if err != nil {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return int64(n), true
|
||||||
|
}
|
||||||
|
|
||||||
|
func timestampRotationOrder(name string) (int64, bool) {
|
||||||
|
ext := filepath.Ext(defaultLogFileName)
|
||||||
|
base := strings.TrimSuffix(defaultLogFileName, ext)
|
||||||
|
if base == "" {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
prefix := base + "-"
|
||||||
|
if !strings.HasPrefix(name, prefix) {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
clean := strings.TrimPrefix(name, prefix)
|
||||||
|
if strings.HasSuffix(clean, ".gz") {
|
||||||
|
clean = strings.TrimSuffix(clean, ".gz")
|
||||||
|
}
|
||||||
|
if ext != "" {
|
||||||
|
if !strings.HasSuffix(clean, ext) {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
clean = strings.TrimSuffix(clean, ext)
|
||||||
|
}
|
||||||
|
if clean == "" {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
if idx := strings.IndexByte(clean, '.'); idx != -1 {
|
||||||
|
clean = clean[:idx]
|
||||||
|
}
|
||||||
|
parsed, err := time.ParseInLocation("2006-01-02T15-04-05", clean, time.Local)
|
||||||
|
if err != nil {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return math.MaxInt64 - parsed.Unix(), true
|
||||||
|
}
|
||||||
@@ -19,7 +19,12 @@ import (
|
|||||||
func RequestLoggingMiddleware(logger logging.RequestLogger) gin.HandlerFunc {
|
func RequestLoggingMiddleware(logger logging.RequestLogger) gin.HandlerFunc {
|
||||||
return func(c *gin.Context) {
|
return func(c *gin.Context) {
|
||||||
path := c.Request.URL.Path
|
path := c.Request.URL.Path
|
||||||
if strings.HasPrefix(path, "/v0/management") || path == "/keep-alive" {
|
shouldLog := false
|
||||||
|
if strings.HasPrefix(path, "/v1") {
|
||||||
|
shouldLog = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if !shouldLog {
|
||||||
c.Next()
|
c.Next()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ import (
|
|||||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers/openai"
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers/openai"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
"gopkg.in/yaml.v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
const oauthCallbackSuccessHTML = `<html><head><meta charset="utf-8"><title>Authentication successful</title><script>setTimeout(function(){window.close();},5000);</script></head><body><h1>Authentication successful!</h1><p>You can close this window.</p><p>This window will close automatically in 5 seconds.</p></body></html>`
|
const oauthCallbackSuccessHTML = `<html><head><meta charset="utf-8"><title>Authentication successful</title><script>setTimeout(function(){window.close();},5000);</script></head><body><h1>Authentication successful!</h1><p>You can close this window.</p><p>This window will close automatically in 5 seconds.</p></body></html>`
|
||||||
@@ -116,6 +117,10 @@ type Server struct {
|
|||||||
// cfg holds the current server configuration.
|
// cfg holds the current server configuration.
|
||||||
cfg *config.Config
|
cfg *config.Config
|
||||||
|
|
||||||
|
// oldConfigYaml stores a YAML snapshot of the previous configuration for change detection.
|
||||||
|
// This prevents issues when the config object is modified in place by Management API.
|
||||||
|
oldConfigYaml []byte
|
||||||
|
|
||||||
// accessManager handles request authentication providers.
|
// accessManager handles request authentication providers.
|
||||||
accessManager *sdkaccess.Manager
|
accessManager *sdkaccess.Manager
|
||||||
|
|
||||||
@@ -220,12 +225,15 @@ func NewServer(cfg *config.Config, authManager *auth.Manager, accessManager *sdk
|
|||||||
currentPath: wd,
|
currentPath: wd,
|
||||||
envManagementSecret: envManagementSecret,
|
envManagementSecret: envManagementSecret,
|
||||||
}
|
}
|
||||||
|
// Save initial YAML snapshot
|
||||||
|
s.oldConfigYaml, _ = yaml.Marshal(cfg)
|
||||||
s.applyAccessConfig(nil, cfg)
|
s.applyAccessConfig(nil, cfg)
|
||||||
// Initialize management handler
|
// Initialize management handler
|
||||||
s.mgmt = managementHandlers.NewHandler(cfg, configFilePath, authManager)
|
s.mgmt = managementHandlers.NewHandler(cfg, configFilePath, authManager)
|
||||||
if optionState.localPassword != "" {
|
if optionState.localPassword != "" {
|
||||||
s.mgmt.SetLocalPassword(optionState.localPassword)
|
s.mgmt.SetLocalPassword(optionState.localPassword)
|
||||||
}
|
}
|
||||||
|
s.mgmt.SetLogDirectory(filepath.Join(s.currentPath, "logs"))
|
||||||
s.localPassword = optionState.localPassword
|
s.localPassword = optionState.localPassword
|
||||||
|
|
||||||
// Setup routes
|
// Setup routes
|
||||||
@@ -404,6 +412,8 @@ func (s *Server) registerManagementRoutes() {
|
|||||||
mgmt.PATCH("/generative-language-api-key", s.mgmt.PatchGlKeys)
|
mgmt.PATCH("/generative-language-api-key", s.mgmt.PatchGlKeys)
|
||||||
mgmt.DELETE("/generative-language-api-key", s.mgmt.DeleteGlKeys)
|
mgmt.DELETE("/generative-language-api-key", s.mgmt.DeleteGlKeys)
|
||||||
|
|
||||||
|
mgmt.GET("/logs", s.mgmt.GetLogs)
|
||||||
|
mgmt.DELETE("/logs", s.mgmt.DeleteLogs)
|
||||||
mgmt.GET("/request-log", s.mgmt.GetRequestLog)
|
mgmt.GET("/request-log", s.mgmt.GetRequestLog)
|
||||||
mgmt.PUT("/request-log", s.mgmt.PutRequestLog)
|
mgmt.PUT("/request-log", s.mgmt.PutRequestLog)
|
||||||
mgmt.PATCH("/request-log", s.mgmt.PutRequestLog)
|
mgmt.PATCH("/request-log", s.mgmt.PutRequestLog)
|
||||||
@@ -654,7 +664,11 @@ func (s *Server) applyAccessConfig(oldCfg, newCfg *config.Config) {
|
|||||||
// - clients: The new slice of AI service clients
|
// - clients: The new slice of AI service clients
|
||||||
// - cfg: The new application configuration
|
// - cfg: The new application configuration
|
||||||
func (s *Server) UpdateClients(cfg *config.Config) {
|
func (s *Server) UpdateClients(cfg *config.Config) {
|
||||||
oldCfg := s.cfg
|
// Reconstruct old config from YAML snapshot to avoid reference sharing issues
|
||||||
|
var oldCfg *config.Config
|
||||||
|
if len(s.oldConfigYaml) > 0 {
|
||||||
|
_ = yaml.Unmarshal(s.oldConfigYaml, &oldCfg)
|
||||||
|
}
|
||||||
|
|
||||||
// Update request logger enabled state if it has changed
|
// Update request logger enabled state if it has changed
|
||||||
previousRequestLog := false
|
previousRequestLog := false
|
||||||
@@ -735,6 +749,8 @@ func (s *Server) UpdateClients(cfg *config.Config) {
|
|||||||
|
|
||||||
s.applyAccessConfig(oldCfg, cfg)
|
s.applyAccessConfig(oldCfg, cfg)
|
||||||
s.cfg = cfg
|
s.cfg = cfg
|
||||||
|
// Save YAML snapshot for next comparison
|
||||||
|
s.oldConfigYaml, _ = yaml.Marshal(cfg)
|
||||||
s.handlers.UpdateClients(&cfg.SDKConfig)
|
s.handlers.UpdateClients(&cfg.SDKConfig)
|
||||||
|
|
||||||
if !cfg.RemoteManagement.DisableControlPanel {
|
if !cfg.RemoteManagement.DisableControlPanel {
|
||||||
|
|||||||
@@ -8,6 +8,15 @@ import "time"
|
|||||||
// GetClaudeModels returns the standard Claude model definitions
|
// GetClaudeModels returns the standard Claude model definitions
|
||||||
func GetClaudeModels() []*ModelInfo {
|
func GetClaudeModels() []*ModelInfo {
|
||||||
return []*ModelInfo{
|
return []*ModelInfo{
|
||||||
|
|
||||||
|
{
|
||||||
|
ID: "claude-haiku-4-5-20251001",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1759276800, // 2025-10-01
|
||||||
|
OwnedBy: "anthropic",
|
||||||
|
Type: "claude",
|
||||||
|
DisplayName: "Claude 4.5 Haiku",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
ID: "claude-sonnet-4-5-20250929",
|
ID: "claude-sonnet-4-5-20250929",
|
||||||
Object: "model",
|
Object: "model",
|
||||||
|
|||||||
@@ -143,6 +143,31 @@ func (e *ClaudeExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.A
|
|||||||
go func() {
|
go func() {
|
||||||
defer close(out)
|
defer close(out)
|
||||||
defer func() { _ = resp.Body.Close() }()
|
defer func() { _ = resp.Body.Close() }()
|
||||||
|
|
||||||
|
// If from == to (Claude → Claude), directly forward the SSE stream without translation
|
||||||
|
if from == to {
|
||||||
|
scanner := bufio.NewScanner(resp.Body)
|
||||||
|
buf := make([]byte, 20_971_520)
|
||||||
|
scanner.Buffer(buf, 20_971_520)
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Bytes()
|
||||||
|
appendAPIResponseChunk(ctx, e.cfg, line)
|
||||||
|
if detail, ok := parseClaudeStreamUsage(line); ok {
|
||||||
|
reporter.publish(ctx, detail)
|
||||||
|
}
|
||||||
|
// Forward the line as-is to preserve SSE format
|
||||||
|
cloned := make([]byte, len(line)+1)
|
||||||
|
copy(cloned, line)
|
||||||
|
cloned[len(line)] = '\n'
|
||||||
|
out <- cliproxyexecutor.StreamChunk{Payload: cloned}
|
||||||
|
}
|
||||||
|
if err = scanner.Err(); err != nil {
|
||||||
|
out <- cliproxyexecutor.StreamChunk{Err: err}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// For other formats, use translation
|
||||||
scanner := bufio.NewScanner(resp.Body)
|
scanner := bufio.NewScanner(resp.Body)
|
||||||
buf := make([]byte, 20_971_520)
|
buf := make([]byte, 20_971_520)
|
||||||
scanner.Buffer(buf, 20_971_520)
|
scanner.Buffer(buf, 20_971_520)
|
||||||
|
|||||||
@@ -60,7 +60,11 @@ func (e *GeminiCLIExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth
|
|||||||
|
|
||||||
from := opts.SourceFormat
|
from := opts.SourceFormat
|
||||||
to := sdktranslator.FromString("gemini-cli")
|
to := sdktranslator.FromString("gemini-cli")
|
||||||
|
budgetOverride, includeOverride, hasOverride := util.GeminiThinkingFromMetadata(req.Metadata)
|
||||||
basePayload := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
|
basePayload := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
|
||||||
|
if hasOverride {
|
||||||
|
basePayload = util.ApplyGeminiCLIThinkingConfig(basePayload, budgetOverride, includeOverride)
|
||||||
|
}
|
||||||
basePayload = fixGeminiCLIImageAspectRatio(req.Model, basePayload)
|
basePayload = fixGeminiCLIImageAspectRatio(req.Model, basePayload)
|
||||||
|
|
||||||
action := "generateContent"
|
action := "generateContent"
|
||||||
@@ -149,7 +153,11 @@ func (e *GeminiCLIExecutor) ExecuteStream(ctx context.Context, auth *cliproxyaut
|
|||||||
|
|
||||||
from := opts.SourceFormat
|
from := opts.SourceFormat
|
||||||
to := sdktranslator.FromString("gemini-cli")
|
to := sdktranslator.FromString("gemini-cli")
|
||||||
|
budgetOverride, includeOverride, hasOverride := util.GeminiThinkingFromMetadata(req.Metadata)
|
||||||
basePayload := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), true)
|
basePayload := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), true)
|
||||||
|
if hasOverride {
|
||||||
|
basePayload = util.ApplyGeminiCLIThinkingConfig(basePayload, budgetOverride, includeOverride)
|
||||||
|
}
|
||||||
basePayload = fixGeminiCLIImageAspectRatio(req.Model, basePayload)
|
basePayload = fixGeminiCLIImageAspectRatio(req.Model, basePayload)
|
||||||
|
|
||||||
projectID := strings.TrimSpace(stringValue(auth.Metadata, "project_id"))
|
projectID := strings.TrimSpace(stringValue(auth.Metadata, "project_id"))
|
||||||
@@ -292,8 +300,12 @@ func (e *GeminiCLIExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.
|
|||||||
var lastStatus int
|
var lastStatus int
|
||||||
var lastBody []byte
|
var lastBody []byte
|
||||||
|
|
||||||
|
budgetOverride, includeOverride, hasOverride := util.GeminiThinkingFromMetadata(req.Metadata)
|
||||||
for _, attemptModel := range models {
|
for _, attemptModel := range models {
|
||||||
payload := sdktranslator.TranslateRequest(from, to, attemptModel, bytes.Clone(req.Payload), false)
|
payload := sdktranslator.TranslateRequest(from, to, attemptModel, bytes.Clone(req.Payload), false)
|
||||||
|
if hasOverride {
|
||||||
|
payload = util.ApplyGeminiCLIThinkingConfig(payload, budgetOverride, includeOverride)
|
||||||
|
}
|
||||||
payload = deleteJSONField(payload, "project")
|
payload = deleteJSONField(payload, "project")
|
||||||
payload = deleteJSONField(payload, "model")
|
payload = deleteJSONField(payload, "model")
|
||||||
payload = disableGeminiThinkingConfig(payload, attemptModel)
|
payload = disableGeminiThinkingConfig(payload, attemptModel)
|
||||||
|
|||||||
@@ -77,6 +77,9 @@ func (e *GeminiExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, r
|
|||||||
from := opts.SourceFormat
|
from := opts.SourceFormat
|
||||||
to := sdktranslator.FromString("gemini")
|
to := sdktranslator.FromString("gemini")
|
||||||
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
|
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
|
||||||
|
if budgetOverride, includeOverride, ok := util.GeminiThinkingFromMetadata(req.Metadata); ok {
|
||||||
|
body = util.ApplyGeminiThinkingConfig(body, budgetOverride, includeOverride)
|
||||||
|
}
|
||||||
body = disableGeminiThinkingConfig(body, req.Model)
|
body = disableGeminiThinkingConfig(body, req.Model)
|
||||||
body = fixGeminiImageAspectRatio(req.Model, body)
|
body = fixGeminiImageAspectRatio(req.Model, body)
|
||||||
|
|
||||||
@@ -136,6 +139,9 @@ func (e *GeminiExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.A
|
|||||||
from := opts.SourceFormat
|
from := opts.SourceFormat
|
||||||
to := sdktranslator.FromString("gemini")
|
to := sdktranslator.FromString("gemini")
|
||||||
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), true)
|
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), true)
|
||||||
|
if budgetOverride, includeOverride, ok := util.GeminiThinkingFromMetadata(req.Metadata); ok {
|
||||||
|
body = util.ApplyGeminiThinkingConfig(body, budgetOverride, includeOverride)
|
||||||
|
}
|
||||||
body = disableGeminiThinkingConfig(body, req.Model)
|
body = disableGeminiThinkingConfig(body, req.Model)
|
||||||
body = fixGeminiImageAspectRatio(req.Model, body)
|
body = fixGeminiImageAspectRatio(req.Model, body)
|
||||||
|
|
||||||
@@ -208,6 +214,9 @@ func (e *GeminiExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.Aut
|
|||||||
from := opts.SourceFormat
|
from := opts.SourceFormat
|
||||||
to := sdktranslator.FromString("gemini")
|
to := sdktranslator.FromString("gemini")
|
||||||
translatedReq := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
|
translatedReq := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
|
||||||
|
if budgetOverride, includeOverride, ok := util.GeminiThinkingFromMetadata(req.Metadata); ok {
|
||||||
|
translatedReq = util.ApplyGeminiThinkingConfig(translatedReq, budgetOverride, includeOverride)
|
||||||
|
}
|
||||||
translatedReq = disableGeminiThinkingConfig(translatedReq, req.Model)
|
translatedReq = disableGeminiThinkingConfig(translatedReq, req.Model)
|
||||||
translatedReq = fixGeminiImageAspectRatio(req.Model, translatedReq)
|
translatedReq = fixGeminiImageAspectRatio(req.Model, translatedReq)
|
||||||
respCtx := context.WithValue(ctx, "alt", opts.Alt)
|
respCtx := context.WithValue(ctx, "alt", opts.Alt)
|
||||||
|
|||||||
618
internal/store/objectstore.go
Normal file
618
internal/store/objectstore.go
Normal file
@@ -0,0 +1,618 @@
|
|||||||
|
package store
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/v7"
|
||||||
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||||
|
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
objectStoreConfigKey = "config/config.yaml"
|
||||||
|
objectStoreAuthPrefix = "auths"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ObjectStoreConfig captures configuration for the object storage-backed token store.
|
||||||
|
type ObjectStoreConfig struct {
|
||||||
|
Endpoint string
|
||||||
|
Bucket string
|
||||||
|
AccessKey string
|
||||||
|
SecretKey string
|
||||||
|
Region string
|
||||||
|
Prefix string
|
||||||
|
LocalRoot string
|
||||||
|
UseSSL bool
|
||||||
|
PathStyle bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObjectTokenStore persists configuration and authentication metadata using an S3-compatible object storage backend.
|
||||||
|
// Files are mirrored to a local workspace so existing file-based flows continue to operate.
|
||||||
|
type ObjectTokenStore struct {
|
||||||
|
client *minio.Client
|
||||||
|
cfg ObjectStoreConfig
|
||||||
|
spoolRoot string
|
||||||
|
configPath string
|
||||||
|
authDir string
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewObjectTokenStore initializes an object storage backed token store.
|
||||||
|
func NewObjectTokenStore(cfg ObjectStoreConfig) (*ObjectTokenStore, error) {
|
||||||
|
cfg.Endpoint = strings.TrimSpace(cfg.Endpoint)
|
||||||
|
cfg.Bucket = strings.TrimSpace(cfg.Bucket)
|
||||||
|
cfg.AccessKey = strings.TrimSpace(cfg.AccessKey)
|
||||||
|
cfg.SecretKey = strings.TrimSpace(cfg.SecretKey)
|
||||||
|
cfg.Prefix = strings.Trim(cfg.Prefix, "/")
|
||||||
|
|
||||||
|
if cfg.Endpoint == "" {
|
||||||
|
return nil, fmt.Errorf("object store: endpoint is required")
|
||||||
|
}
|
||||||
|
if cfg.Bucket == "" {
|
||||||
|
return nil, fmt.Errorf("object store: bucket is required")
|
||||||
|
}
|
||||||
|
if cfg.AccessKey == "" {
|
||||||
|
return nil, fmt.Errorf("object store: access key is required")
|
||||||
|
}
|
||||||
|
if cfg.SecretKey == "" {
|
||||||
|
return nil, fmt.Errorf("object store: secret key is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
root := strings.TrimSpace(cfg.LocalRoot)
|
||||||
|
if root == "" {
|
||||||
|
if cwd, err := os.Getwd(); err == nil {
|
||||||
|
root = filepath.Join(cwd, "objectstore")
|
||||||
|
} else {
|
||||||
|
root = filepath.Join(os.TempDir(), "objectstore")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
absRoot, err := filepath.Abs(root)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("object store: resolve spool directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
configDir := filepath.Join(absRoot, "config")
|
||||||
|
authDir := filepath.Join(absRoot, "auths")
|
||||||
|
|
||||||
|
if err = os.MkdirAll(configDir, 0o700); err != nil {
|
||||||
|
return nil, fmt.Errorf("object store: create config directory: %w", err)
|
||||||
|
}
|
||||||
|
if err = os.MkdirAll(authDir, 0o700); err != nil {
|
||||||
|
return nil, fmt.Errorf("object store: create auth directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
options := &minio.Options{
|
||||||
|
Creds: credentials.NewStaticV4(cfg.AccessKey, cfg.SecretKey, ""),
|
||||||
|
Secure: cfg.UseSSL,
|
||||||
|
Region: cfg.Region,
|
||||||
|
}
|
||||||
|
if cfg.PathStyle {
|
||||||
|
options.BucketLookup = minio.BucketLookupPath
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := minio.New(cfg.Endpoint, options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("object store: create client: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ObjectTokenStore{
|
||||||
|
client: client,
|
||||||
|
cfg: cfg,
|
||||||
|
spoolRoot: absRoot,
|
||||||
|
configPath: filepath.Join(configDir, "config.yaml"),
|
||||||
|
authDir: authDir,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBaseDir implements the optional interface used by authenticators; it is a no-op because
|
||||||
|
// the object store controls its own workspace.
|
||||||
|
func (s *ObjectTokenStore) SetBaseDir(string) {}
|
||||||
|
|
||||||
|
// ConfigPath returns the managed configuration file path inside the spool directory.
|
||||||
|
func (s *ObjectTokenStore) ConfigPath() string {
|
||||||
|
if s == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return s.configPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthDir returns the local directory containing mirrored auth files.
|
||||||
|
func (s *ObjectTokenStore) AuthDir() string {
|
||||||
|
if s == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return s.authDir
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bootstrap ensures the target bucket exists and synchronizes data from the object storage backend.
|
||||||
|
func (s *ObjectTokenStore) Bootstrap(ctx context.Context, exampleConfigPath string) error {
|
||||||
|
if s == nil {
|
||||||
|
return fmt.Errorf("object store: not initialized")
|
||||||
|
}
|
||||||
|
if err := s.ensureBucket(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.syncConfigFromBucket(ctx, exampleConfigPath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.syncAuthFromBucket(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save persists authentication metadata to disk and uploads it to the object storage backend.
|
||||||
|
func (s *ObjectTokenStore) Save(ctx context.Context, auth *cliproxyauth.Auth) (string, error) {
|
||||||
|
if auth == nil {
|
||||||
|
return "", fmt.Errorf("object store: auth is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
path, err := s.resolveAuthPath(auth)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if path == "" {
|
||||||
|
return "", fmt.Errorf("object store: missing file path attribute for %s", auth.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if auth.Disabled {
|
||||||
|
if _, statErr := os.Stat(path); errors.Is(statErr, fs.ErrNotExist) {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
if err = os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
|
||||||
|
return "", fmt.Errorf("object store: create auth directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case auth.Storage != nil:
|
||||||
|
if err = auth.Storage.SaveTokenToFile(path); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
case auth.Metadata != nil:
|
||||||
|
raw, errMarshal := json.Marshal(auth.Metadata)
|
||||||
|
if errMarshal != nil {
|
||||||
|
return "", fmt.Errorf("object store: marshal metadata: %w", errMarshal)
|
||||||
|
}
|
||||||
|
if existing, errRead := os.ReadFile(path); errRead == nil {
|
||||||
|
if jsonEqual(existing, raw) {
|
||||||
|
return path, nil
|
||||||
|
}
|
||||||
|
} else if errRead != nil && !errors.Is(errRead, fs.ErrNotExist) {
|
||||||
|
return "", fmt.Errorf("object store: read existing metadata: %w", errRead)
|
||||||
|
}
|
||||||
|
tmp := path + ".tmp"
|
||||||
|
if errWrite := os.WriteFile(tmp, raw, 0o600); errWrite != nil {
|
||||||
|
return "", fmt.Errorf("object store: write temp auth file: %w", errWrite)
|
||||||
|
}
|
||||||
|
if errRename := os.Rename(tmp, path); errRename != nil {
|
||||||
|
return "", fmt.Errorf("object store: rename auth file: %w", errRename)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("object store: nothing to persist for %s", auth.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if auth.Attributes == nil {
|
||||||
|
auth.Attributes = make(map[string]string)
|
||||||
|
}
|
||||||
|
auth.Attributes["path"] = path
|
||||||
|
|
||||||
|
if strings.TrimSpace(auth.FileName) == "" {
|
||||||
|
auth.FileName = auth.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = s.uploadAuth(ctx, path); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return path, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List enumerates auth JSON files from the mirrored workspace.
|
||||||
|
func (s *ObjectTokenStore) List(_ context.Context) ([]*cliproxyauth.Auth, error) {
|
||||||
|
dir := strings.TrimSpace(s.AuthDir())
|
||||||
|
if dir == "" {
|
||||||
|
return nil, fmt.Errorf("object store: auth directory not configured")
|
||||||
|
}
|
||||||
|
entries := make([]*cliproxyauth.Auth, 0, 32)
|
||||||
|
err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, walkErr error) error {
|
||||||
|
if walkErr != nil {
|
||||||
|
return walkErr
|
||||||
|
}
|
||||||
|
if d.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !strings.HasSuffix(strings.ToLower(d.Name()), ".json") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
auth, err := s.readAuthFile(path, dir)
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Warnf("object store: skip auth %s", path)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if auth != nil {
|
||||||
|
entries = append(entries, auth)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("object store: walk auth directory: %w", err)
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes an auth file locally and remotely.
|
||||||
|
func (s *ObjectTokenStore) Delete(ctx context.Context, id string) error {
|
||||||
|
id = strings.TrimSpace(id)
|
||||||
|
if id == "" {
|
||||||
|
return fmt.Errorf("object store: id is empty")
|
||||||
|
}
|
||||||
|
path, err := s.resolveDeletePath(id)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
if err = os.Remove(path); err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||||
|
return fmt.Errorf("object store: delete auth file: %w", err)
|
||||||
|
}
|
||||||
|
if err = s.deleteAuthObject(ctx, path); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PersistAuthFiles uploads the provided auth files to the object storage backend.
|
||||||
|
func (s *ObjectTokenStore) PersistAuthFiles(ctx context.Context, _ string, paths ...string) error {
|
||||||
|
if len(paths) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
for _, p := range paths {
|
||||||
|
trimmed := strings.TrimSpace(p)
|
||||||
|
if trimmed == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
abs := trimmed
|
||||||
|
if !filepath.IsAbs(abs) {
|
||||||
|
abs = filepath.Join(s.authDir, trimmed)
|
||||||
|
}
|
||||||
|
if err := s.uploadAuth(ctx, abs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PersistConfig uploads the local configuration file to the object storage backend.
|
||||||
|
func (s *ObjectTokenStore) PersistConfig(ctx context.Context) error {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
data, err := os.ReadFile(s.configPath)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
|
return s.deleteObject(ctx, objectStoreConfigKey)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("object store: read config file: %w", err)
|
||||||
|
}
|
||||||
|
if len(data) == 0 {
|
||||||
|
return s.deleteObject(ctx, objectStoreConfigKey)
|
||||||
|
}
|
||||||
|
return s.putObject(ctx, objectStoreConfigKey, data, "application/x-yaml")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ObjectTokenStore) ensureBucket(ctx context.Context) error {
|
||||||
|
exists, err := s.client.BucketExists(ctx, s.cfg.Bucket)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("object store: check bucket: %w", err)
|
||||||
|
}
|
||||||
|
if exists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err = s.client.MakeBucket(ctx, s.cfg.Bucket, minio.MakeBucketOptions{Region: s.cfg.Region}); err != nil {
|
||||||
|
return fmt.Errorf("object store: create bucket: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ObjectTokenStore) syncConfigFromBucket(ctx context.Context, example string) error {
|
||||||
|
key := s.prefixedKey(objectStoreConfigKey)
|
||||||
|
_, err := s.client.StatObject(ctx, s.cfg.Bucket, key, minio.StatObjectOptions{})
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
object, errGet := s.client.GetObject(ctx, s.cfg.Bucket, key, minio.GetObjectOptions{})
|
||||||
|
if errGet != nil {
|
||||||
|
return fmt.Errorf("object store: fetch config: %w", errGet)
|
||||||
|
}
|
||||||
|
defer object.Close()
|
||||||
|
data, errRead := io.ReadAll(object)
|
||||||
|
if errRead != nil {
|
||||||
|
return fmt.Errorf("object store: read config: %w", errRead)
|
||||||
|
}
|
||||||
|
if errWrite := os.WriteFile(s.configPath, normalizeLineEndingsBytes(data), 0o600); errWrite != nil {
|
||||||
|
return fmt.Errorf("object store: write config: %w", errWrite)
|
||||||
|
}
|
||||||
|
case isObjectNotFound(err):
|
||||||
|
if _, statErr := os.Stat(s.configPath); errors.Is(statErr, fs.ErrNotExist) {
|
||||||
|
if example != "" {
|
||||||
|
if errCopy := misc.CopyConfigTemplate(example, s.configPath); errCopy != nil {
|
||||||
|
return fmt.Errorf("object store: copy example config: %w", errCopy)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if errCreate := os.MkdirAll(filepath.Dir(s.configPath), 0o700); errCreate != nil {
|
||||||
|
return fmt.Errorf("object store: prepare config directory: %w", errCreate)
|
||||||
|
}
|
||||||
|
if errWrite := os.WriteFile(s.configPath, []byte{}, 0o600); errWrite != nil {
|
||||||
|
return fmt.Errorf("object store: create empty config: %w", errWrite)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
data, errRead := os.ReadFile(s.configPath)
|
||||||
|
if errRead != nil {
|
||||||
|
return fmt.Errorf("object store: read local config: %w", errRead)
|
||||||
|
}
|
||||||
|
if len(data) > 0 {
|
||||||
|
if errPut := s.putObject(ctx, objectStoreConfigKey, data, "application/x-yaml"); errPut != nil {
|
||||||
|
return errPut
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("object store: stat config: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ObjectTokenStore) syncAuthFromBucket(ctx context.Context) error {
|
||||||
|
if err := os.RemoveAll(s.authDir); err != nil {
|
||||||
|
return fmt.Errorf("object store: reset auth directory: %w", err)
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(s.authDir, 0o700); err != nil {
|
||||||
|
return fmt.Errorf("object store: recreate auth directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
prefix := s.prefixedKey(objectStoreAuthPrefix + "/")
|
||||||
|
objectCh := s.client.ListObjects(ctx, s.cfg.Bucket, minio.ListObjectsOptions{
|
||||||
|
Prefix: prefix,
|
||||||
|
Recursive: true,
|
||||||
|
})
|
||||||
|
for object := range objectCh {
|
||||||
|
if object.Err != nil {
|
||||||
|
return fmt.Errorf("object store: list auth objects: %w", object.Err)
|
||||||
|
}
|
||||||
|
rel := strings.TrimPrefix(object.Key, prefix)
|
||||||
|
if rel == "" || strings.HasSuffix(rel, "/") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
relPath := filepath.FromSlash(rel)
|
||||||
|
if filepath.IsAbs(relPath) {
|
||||||
|
log.WithField("key", object.Key).Warn("object store: skip auth outside mirror")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cleanRel := filepath.Clean(relPath)
|
||||||
|
if cleanRel == "." || cleanRel == ".." || strings.HasPrefix(cleanRel, ".."+string(os.PathSeparator)) {
|
||||||
|
log.WithField("key", object.Key).Warn("object store: skip auth outside mirror")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
local := filepath.Join(s.authDir, cleanRel)
|
||||||
|
if err := os.MkdirAll(filepath.Dir(local), 0o700); err != nil {
|
||||||
|
return fmt.Errorf("object store: prepare auth subdir: %w", err)
|
||||||
|
}
|
||||||
|
reader, errGet := s.client.GetObject(ctx, s.cfg.Bucket, object.Key, minio.GetObjectOptions{})
|
||||||
|
if errGet != nil {
|
||||||
|
return fmt.Errorf("object store: download auth %s: %w", object.Key, errGet)
|
||||||
|
}
|
||||||
|
data, errRead := io.ReadAll(reader)
|
||||||
|
_ = reader.Close()
|
||||||
|
if errRead != nil {
|
||||||
|
return fmt.Errorf("object store: read auth %s: %w", object.Key, errRead)
|
||||||
|
}
|
||||||
|
if errWrite := os.WriteFile(local, data, 0o600); errWrite != nil {
|
||||||
|
return fmt.Errorf("object store: write auth %s: %w", local, errWrite)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ObjectTokenStore) uploadAuth(ctx context.Context, path string) error {
|
||||||
|
if path == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
rel, err := filepath.Rel(s.authDir, path)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("object store: resolve auth relative path: %w", err)
|
||||||
|
}
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
|
return s.deleteAuthObject(ctx, path)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("object store: read auth file: %w", err)
|
||||||
|
}
|
||||||
|
if len(data) == 0 {
|
||||||
|
return s.deleteAuthObject(ctx, path)
|
||||||
|
}
|
||||||
|
key := objectStoreAuthPrefix + "/" + filepath.ToSlash(rel)
|
||||||
|
return s.putObject(ctx, key, data, "application/json")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ObjectTokenStore) deleteAuthObject(ctx context.Context, path string) error {
|
||||||
|
if path == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
rel, err := filepath.Rel(s.authDir, path)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("object store: resolve auth relative path: %w", err)
|
||||||
|
}
|
||||||
|
key := objectStoreAuthPrefix + "/" + filepath.ToSlash(rel)
|
||||||
|
return s.deleteObject(ctx, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ObjectTokenStore) putObject(ctx context.Context, key string, data []byte, contentType string) error {
|
||||||
|
if len(data) == 0 {
|
||||||
|
return s.deleteObject(ctx, key)
|
||||||
|
}
|
||||||
|
fullKey := s.prefixedKey(key)
|
||||||
|
reader := bytes.NewReader(data)
|
||||||
|
_, err := s.client.PutObject(ctx, s.cfg.Bucket, fullKey, reader, int64(len(data)), minio.PutObjectOptions{
|
||||||
|
ContentType: contentType,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("object store: put object %s: %w", fullKey, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ObjectTokenStore) deleteObject(ctx context.Context, key string) error {
|
||||||
|
fullKey := s.prefixedKey(key)
|
||||||
|
err := s.client.RemoveObject(ctx, s.cfg.Bucket, fullKey, minio.RemoveObjectOptions{})
|
||||||
|
if err != nil {
|
||||||
|
if isObjectNotFound(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("object store: delete object %s: %w", fullKey, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ObjectTokenStore) prefixedKey(key string) string {
|
||||||
|
key = strings.TrimLeft(key, "/")
|
||||||
|
if s.cfg.Prefix == "" {
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
return strings.TrimLeft(s.cfg.Prefix+"/"+key, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ObjectTokenStore) resolveAuthPath(auth *cliproxyauth.Auth) (string, error) {
|
||||||
|
if auth == nil {
|
||||||
|
return "", fmt.Errorf("object store: auth is nil")
|
||||||
|
}
|
||||||
|
if auth.Attributes != nil {
|
||||||
|
if path := strings.TrimSpace(auth.Attributes["path"]); path != "" {
|
||||||
|
if filepath.IsAbs(path) {
|
||||||
|
return path, nil
|
||||||
|
}
|
||||||
|
return filepath.Join(s.authDir, path), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fileName := strings.TrimSpace(auth.FileName)
|
||||||
|
if fileName == "" {
|
||||||
|
fileName = strings.TrimSpace(auth.ID)
|
||||||
|
}
|
||||||
|
if fileName == "" {
|
||||||
|
return "", fmt.Errorf("object store: auth %s missing filename", auth.ID)
|
||||||
|
}
|
||||||
|
if !strings.HasSuffix(strings.ToLower(fileName), ".json") {
|
||||||
|
fileName += ".json"
|
||||||
|
}
|
||||||
|
return filepath.Join(s.authDir, fileName), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ObjectTokenStore) resolveDeletePath(id string) (string, error) {
|
||||||
|
id = strings.TrimSpace(id)
|
||||||
|
if id == "" {
|
||||||
|
return "", fmt.Errorf("object store: id is empty")
|
||||||
|
}
|
||||||
|
// Absolute paths are honored as-is; callers must ensure they point inside the mirror.
|
||||||
|
if filepath.IsAbs(id) {
|
||||||
|
return id, nil
|
||||||
|
}
|
||||||
|
// Treat any non-absolute id (including nested like "team/foo") as relative to the mirror authDir.
|
||||||
|
// Normalize separators and guard against path traversal.
|
||||||
|
clean := filepath.Clean(filepath.FromSlash(id))
|
||||||
|
if clean == "." || clean == ".." || strings.HasPrefix(clean, ".."+string(os.PathSeparator)) {
|
||||||
|
return "", fmt.Errorf("object store: invalid auth identifier %s", id)
|
||||||
|
}
|
||||||
|
// Ensure .json suffix.
|
||||||
|
if !strings.HasSuffix(strings.ToLower(clean), ".json") {
|
||||||
|
clean += ".json"
|
||||||
|
}
|
||||||
|
return filepath.Join(s.authDir, clean), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ObjectTokenStore) readAuthFile(path, baseDir string) (*cliproxyauth.Auth, error) {
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("read file: %w", err)
|
||||||
|
}
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
metadata := make(map[string]any)
|
||||||
|
if err = json.Unmarshal(data, &metadata); err != nil {
|
||||||
|
return nil, fmt.Errorf("unmarshal auth json: %w", err)
|
||||||
|
}
|
||||||
|
provider := strings.TrimSpace(valueAsString(metadata["type"]))
|
||||||
|
if provider == "" {
|
||||||
|
provider = "unknown"
|
||||||
|
}
|
||||||
|
info, err := os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("stat auth file: %w", err)
|
||||||
|
}
|
||||||
|
rel, errRel := filepath.Rel(baseDir, path)
|
||||||
|
if errRel != nil {
|
||||||
|
rel = filepath.Base(path)
|
||||||
|
}
|
||||||
|
rel = normalizeAuthID(rel)
|
||||||
|
attr := map[string]string{"path": path}
|
||||||
|
if email := strings.TrimSpace(valueAsString(metadata["email"])); email != "" {
|
||||||
|
attr["email"] = email
|
||||||
|
}
|
||||||
|
auth := &cliproxyauth.Auth{
|
||||||
|
ID: rel,
|
||||||
|
Provider: provider,
|
||||||
|
FileName: rel,
|
||||||
|
Label: labelFor(metadata),
|
||||||
|
Status: cliproxyauth.StatusActive,
|
||||||
|
Attributes: attr,
|
||||||
|
Metadata: metadata,
|
||||||
|
CreatedAt: info.ModTime(),
|
||||||
|
UpdatedAt: info.ModTime(),
|
||||||
|
LastRefreshedAt: time.Time{},
|
||||||
|
NextRefreshAfter: time.Time{},
|
||||||
|
}
|
||||||
|
return auth, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeLineEndingsBytes(data []byte) []byte {
|
||||||
|
replaced := bytes.ReplaceAll(data, []byte{'\r', '\n'}, []byte{'\n'})
|
||||||
|
return bytes.ReplaceAll(replaced, []byte{'\r'}, []byte{'\n'})
|
||||||
|
}
|
||||||
|
|
||||||
|
func isObjectNotFound(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
resp := minio.ToErrorResponse(err)
|
||||||
|
if resp.StatusCode == http.StatusNotFound {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
switch resp.Code {
|
||||||
|
case "NoSuchKey", "NotFound", "NoSuchBucket":
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
@@ -186,6 +186,12 @@ func ConvertClaudeRequestToCodex(modelName string, inputRawJSON []byte, _ bool)
|
|||||||
shortMap := buildShortNameMap(names)
|
shortMap := buildShortNameMap(names)
|
||||||
for i := 0; i < len(toolResults); i++ {
|
for i := 0; i < len(toolResults); i++ {
|
||||||
toolResult := toolResults[i]
|
toolResult := toolResults[i]
|
||||||
|
// Special handling: map Claude web search tool to Codex web_search
|
||||||
|
if toolResult.Get("type").String() == "web_search_20250305" {
|
||||||
|
// Replace the tool content entirely with {"type":"web_search"}
|
||||||
|
template, _ = sjson.SetRaw(template, "tools.-1", `{"type":"web_search"}`)
|
||||||
|
continue
|
||||||
|
}
|
||||||
tool := toolResult.Raw
|
tool := toolResult.Raw
|
||||||
tool, _ = sjson.Set(tool, "type", "function")
|
tool, _ = sjson.Set(tool, "type", "function")
|
||||||
// Apply shortened name if needed
|
// Apply shortened name if needed
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ func ConvertOpenAIResponsesRequestToCodex(modelName string, inputRawJSON []byte,
|
|||||||
inputResults = inputResult.Array()
|
inputResults = inputResult.Array()
|
||||||
} else if inputResult.Type == gjson.String {
|
} else if inputResult.Type == gjson.String {
|
||||||
newInput := `[{"type":"message","role":"user","content":[{"type":"input_text","text":""}]}]`
|
newInput := `[{"type":"message","role":"user","content":[{"type":"input_text","text":""}]}]`
|
||||||
newInput, _ = sjson.Set(newInput, "0.content.0.text", inputResult.String())
|
newInput, _ = sjson.SetRaw(newInput, "0.content.0.text", inputResult.Raw)
|
||||||
inputResults = gjson.Parse(newInput).Array()
|
inputResults = gjson.Parse(newInput).Array()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
package responses
|
package responses
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -12,6 +11,7 @@ import (
|
|||||||
|
|
||||||
// ConvertCodexResponseToOpenAIResponses converts OpenAI Chat Completions streaming chunks
|
// ConvertCodexResponseToOpenAIResponses converts OpenAI Chat Completions streaming chunks
|
||||||
// to OpenAI Responses SSE events (response.*).
|
// to OpenAI Responses SSE events (response.*).
|
||||||
|
|
||||||
func ConvertCodexResponseToOpenAIResponses(ctx context.Context, modelName string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, param *any) []string {
|
func ConvertCodexResponseToOpenAIResponses(ctx context.Context, modelName string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, param *any) []string {
|
||||||
if bytes.HasPrefix(rawJSON, []byte("data:")) {
|
if bytes.HasPrefix(rawJSON, []byte("data:")) {
|
||||||
rawJSON = bytes.TrimSpace(rawJSON[5:])
|
rawJSON = bytes.TrimSpace(rawJSON[5:])
|
||||||
@@ -21,7 +21,8 @@ func ConvertCodexResponseToOpenAIResponses(ctx context.Context, modelName string
|
|||||||
rawJSON, _ = sjson.SetBytes(rawJSON, "response.instructions", gjson.GetBytes(originalRequestRawJSON, "instructions").String())
|
rawJSON, _ = sjson.SetBytes(rawJSON, "response.instructions", gjson.GetBytes(originalRequestRawJSON, "instructions").String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return []string{fmt.Sprintf("data: %s", string(rawJSON))}
|
out := fmt.Sprintf("data: %s", string(rawJSON))
|
||||||
|
return []string{out}
|
||||||
}
|
}
|
||||||
return []string{string(rawJSON)}
|
return []string{string(rawJSON)}
|
||||||
}
|
}
|
||||||
@@ -29,31 +30,13 @@ func ConvertCodexResponseToOpenAIResponses(ctx context.Context, modelName string
|
|||||||
// ConvertCodexResponseToOpenAIResponsesNonStream builds a single Responses JSON
|
// ConvertCodexResponseToOpenAIResponsesNonStream builds a single Responses JSON
|
||||||
// from a non-streaming OpenAI Chat Completions response.
|
// from a non-streaming OpenAI Chat Completions response.
|
||||||
func ConvertCodexResponseToOpenAIResponsesNonStream(_ context.Context, modelName string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, _ *any) string {
|
func ConvertCodexResponseToOpenAIResponsesNonStream(_ context.Context, modelName string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, _ *any) string {
|
||||||
scanner := bufio.NewScanner(bytes.NewReader(rawJSON))
|
rootResult := gjson.ParseBytes(rawJSON)
|
||||||
buffer := make([]byte, 20_971_520)
|
// Verify this is a response.completed event
|
||||||
scanner.Buffer(buffer, 20_971_520)
|
if rootResult.Get("type").String() != "response.completed" {
|
||||||
dataTag := []byte("data:")
|
return ""
|
||||||
for scanner.Scan() {
|
|
||||||
line := scanner.Bytes()
|
|
||||||
|
|
||||||
if !bytes.HasPrefix(line, dataTag) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
line = bytes.TrimSpace(line[5:])
|
|
||||||
|
|
||||||
rootResult := gjson.ParseBytes(line)
|
|
||||||
// Verify this is a response.completed event
|
|
||||||
|
|
||||||
if rootResult.Get("type").String() != "response.completed" {
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
responseResult := rootResult.Get("response")
|
|
||||||
template := responseResult.Raw
|
|
||||||
|
|
||||||
template, _ = sjson.Set(template, "instructions", gjson.GetBytes(originalRequestRawJSON, "instructions").String())
|
|
||||||
|
|
||||||
return template
|
|
||||||
}
|
}
|
||||||
return ""
|
responseResult := rootResult.Get("response")
|
||||||
|
template := responseResult.Raw
|
||||||
|
template, _ = sjson.Set(template, "instructions", gjson.GetBytes(originalRequestRawJSON, "instructions").String())
|
||||||
|
return template
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,6 +37,8 @@ type ConvertOpenAIResponseToAnthropicParams struct {
|
|||||||
ContentBlocksStopped bool
|
ContentBlocksStopped bool
|
||||||
// Track if message_delta has been sent
|
// Track if message_delta has been sent
|
||||||
MessageDeltaSent bool
|
MessageDeltaSent bool
|
||||||
|
// Track if message_start has been sent
|
||||||
|
MessageStarted bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToolCallAccumulator holds the state for accumulating tool call data
|
// ToolCallAccumulator holds the state for accumulating tool call data
|
||||||
@@ -84,20 +86,12 @@ func ConvertOpenAIResponseToClaude(_ context.Context, _ string, originalRequestR
|
|||||||
return convertOpenAIDoneToAnthropic((*param).(*ConvertOpenAIResponseToAnthropicParams))
|
return convertOpenAIDoneToAnthropic((*param).(*ConvertOpenAIResponseToAnthropicParams))
|
||||||
}
|
}
|
||||||
|
|
||||||
root := gjson.ParseBytes(rawJSON)
|
streamResult := gjson.GetBytes(originalRequestRawJSON, "stream")
|
||||||
|
if !streamResult.Exists() || (streamResult.Exists() && streamResult.Type == gjson.False) {
|
||||||
// Check if this is a streaming chunk or non-streaming response
|
|
||||||
objectType := root.Get("object").String()
|
|
||||||
|
|
||||||
if objectType == "chat.completion.chunk" {
|
|
||||||
// Handle streaming response
|
|
||||||
return convertOpenAIStreamingChunkToAnthropic(rawJSON, (*param).(*ConvertOpenAIResponseToAnthropicParams))
|
|
||||||
} else if objectType == "chat.completion" {
|
|
||||||
// Handle non-streaming response
|
|
||||||
return convertOpenAINonStreamingToAnthropic(rawJSON)
|
return convertOpenAINonStreamingToAnthropic(rawJSON)
|
||||||
|
} else {
|
||||||
|
return convertOpenAIStreamingChunkToAnthropic(rawJSON, (*param).(*ConvertOpenAIResponseToAnthropicParams))
|
||||||
}
|
}
|
||||||
|
|
||||||
return []string{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// convertOpenAIStreamingChunkToAnthropic converts OpenAI streaming chunk to Anthropic streaming events
|
// convertOpenAIStreamingChunkToAnthropic converts OpenAI streaming chunk to Anthropic streaming events
|
||||||
@@ -118,7 +112,7 @@ func convertOpenAIStreamingChunkToAnthropic(rawJSON []byte, param *ConvertOpenAI
|
|||||||
|
|
||||||
// Check if this is the first chunk (has role)
|
// Check if this is the first chunk (has role)
|
||||||
if delta := root.Get("choices.0.delta"); delta.Exists() {
|
if delta := root.Get("choices.0.delta"); delta.Exists() {
|
||||||
if role := delta.Get("role"); role.Exists() && role.String() == "assistant" {
|
if role := delta.Get("role"); role.Exists() && role.String() == "assistant" && !param.MessageStarted {
|
||||||
// Send message_start event
|
// Send message_start event
|
||||||
messageStart := map[string]interface{}{
|
messageStart := map[string]interface{}{
|
||||||
"type": "message_start",
|
"type": "message_start",
|
||||||
@@ -138,6 +132,7 @@ func convertOpenAIStreamingChunkToAnthropic(rawJSON []byte, param *ConvertOpenAI
|
|||||||
}
|
}
|
||||||
messageStartJSON, _ := json.Marshal(messageStart)
|
messageStartJSON, _ := json.Marshal(messageStart)
|
||||||
results = append(results, "event: message_start\ndata: "+string(messageStartJSON)+"\n\n")
|
results = append(results, "event: message_start\ndata: "+string(messageStartJSON)+"\n\n")
|
||||||
|
param.MessageStarted = true
|
||||||
|
|
||||||
// Don't send content_block_start for text here - wait for actual content
|
// Don't send content_block_start for text here - wait for actual content
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -97,8 +97,8 @@ func ConvertOpenAIResponseToGemini(_ context.Context, _ string, originalRequestR
|
|||||||
var results []string
|
var results []string
|
||||||
|
|
||||||
choices.ForEach(func(choiceIndex, choice gjson.Result) bool {
|
choices.ForEach(func(choiceIndex, choice gjson.Result) bool {
|
||||||
// Base Gemini response template
|
// Base Gemini response template without finishReason; set when known
|
||||||
template := `{"candidates":[{"content":{"parts":[],"role":"model"},"finishReason":"STOP","index":0}]}`
|
template := `{"candidates":[{"content":{"parts":[],"role":"model"},"index":0}]}`
|
||||||
|
|
||||||
// Set model if available
|
// Set model if available
|
||||||
if model := root.Get("model"); model.Exists() {
|
if model := root.Get("model"); model.Exists() {
|
||||||
@@ -514,8 +514,8 @@ func tryParseNumber(s string) (interface{}, bool) {
|
|||||||
func ConvertOpenAIResponseToGeminiNonStream(_ context.Context, _ string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, _ *any) string {
|
func ConvertOpenAIResponseToGeminiNonStream(_ context.Context, _ string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, _ *any) string {
|
||||||
root := gjson.ParseBytes(rawJSON)
|
root := gjson.ParseBytes(rawJSON)
|
||||||
|
|
||||||
// Base Gemini response template
|
// Base Gemini response template without finishReason; set when known
|
||||||
out := `{"candidates":[{"content":{"parts":[],"role":"model"},"finishReason":"STOP","index":0}]}`
|
out := `{"candidates":[{"content":{"parts":[],"role":"model"},"index":0}]}`
|
||||||
|
|
||||||
// Set model if available
|
// Set model if available
|
||||||
if model := root.Get("model"); model.Exists() {
|
if model := root.Get("model"); model.Exists() {
|
||||||
|
|||||||
@@ -67,9 +67,20 @@ func ConvertOpenAIChatCompletionsResponseToOpenAIResponses(ctx context.Context,
|
|||||||
rawJSON = bytes.TrimSpace(rawJSON[5:])
|
rawJSON = bytes.TrimSpace(rawJSON[5:])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rawJSON = bytes.TrimSpace(rawJSON)
|
||||||
|
if len(rawJSON) == 0 {
|
||||||
|
return []string{}
|
||||||
|
}
|
||||||
|
if bytes.Equal(rawJSON, []byte("[DONE]")) {
|
||||||
|
return []string{}
|
||||||
|
}
|
||||||
|
|
||||||
root := gjson.ParseBytes(rawJSON)
|
root := gjson.ParseBytes(rawJSON)
|
||||||
obj := root.Get("object").String()
|
obj := root.Get("object")
|
||||||
if obj != "chat.completion.chunk" {
|
if obj.Exists() && obj.String() != "" && obj.String() != "chat.completion.chunk" {
|
||||||
|
return []string{}
|
||||||
|
}
|
||||||
|
if !root.Get("choices").Exists() || !root.Get("choices").IsArray() {
|
||||||
return []string{}
|
return []string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
181
internal/util/gemini_thinking.go
Normal file
181
internal/util/gemini_thinking.go
Normal file
@@ -0,0 +1,181 @@
|
|||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/tidwall/sjson"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
GeminiThinkingBudgetMetadataKey = "gemini_thinking_budget"
|
||||||
|
GeminiIncludeThoughtsMetadataKey = "gemini_include_thoughts"
|
||||||
|
GeminiOriginalModelMetadataKey = "gemini_original_model"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ParseGeminiThinkingSuffix(model string) (string, *int, *bool, bool) {
|
||||||
|
if model == "" {
|
||||||
|
return model, nil, nil, false
|
||||||
|
}
|
||||||
|
lower := strings.ToLower(model)
|
||||||
|
if !strings.HasPrefix(lower, "gemini-") {
|
||||||
|
return model, nil, nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasSuffix(lower, "-nothinking") {
|
||||||
|
base := model[:len(model)-len("-nothinking")]
|
||||||
|
budgetValue := 0
|
||||||
|
if strings.HasPrefix(lower, "gemini-2.5-pro") {
|
||||||
|
budgetValue = 128
|
||||||
|
}
|
||||||
|
include := false
|
||||||
|
return base, &budgetValue, &include, true
|
||||||
|
}
|
||||||
|
|
||||||
|
idx := strings.LastIndex(lower, "-thinking-")
|
||||||
|
if idx == -1 {
|
||||||
|
return model, nil, nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
digits := model[idx+len("-thinking-"):]
|
||||||
|
if digits == "" {
|
||||||
|
return model, nil, nil, false
|
||||||
|
}
|
||||||
|
end := len(digits)
|
||||||
|
for i := 0; i < len(digits); i++ {
|
||||||
|
if digits[i] < '0' || digits[i] > '9' {
|
||||||
|
end = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if end == 0 {
|
||||||
|
return model, nil, nil, false
|
||||||
|
}
|
||||||
|
valueStr := digits[:end]
|
||||||
|
value, err := strconv.Atoi(valueStr)
|
||||||
|
if err != nil {
|
||||||
|
return model, nil, nil, false
|
||||||
|
}
|
||||||
|
base := model[:idx]
|
||||||
|
budgetValue := value
|
||||||
|
return base, &budgetValue, nil, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func ApplyGeminiThinkingConfig(body []byte, budget *int, includeThoughts *bool) []byte {
|
||||||
|
if budget == nil && includeThoughts == nil {
|
||||||
|
return body
|
||||||
|
}
|
||||||
|
updated := body
|
||||||
|
if budget != nil {
|
||||||
|
valuePath := "generationConfig.thinkingConfig.thinkingBudget"
|
||||||
|
rewritten, err := sjson.SetBytes(updated, valuePath, *budget)
|
||||||
|
if err == nil {
|
||||||
|
updated = rewritten
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if includeThoughts != nil {
|
||||||
|
valuePath := "generationConfig.thinkingConfig.include_thoughts"
|
||||||
|
rewritten, err := sjson.SetBytes(updated, valuePath, *includeThoughts)
|
||||||
|
if err == nil {
|
||||||
|
updated = rewritten
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return updated
|
||||||
|
}
|
||||||
|
|
||||||
|
func ApplyGeminiCLIThinkingConfig(body []byte, budget *int, includeThoughts *bool) []byte {
|
||||||
|
if budget == nil && includeThoughts == nil {
|
||||||
|
return body
|
||||||
|
}
|
||||||
|
updated := body
|
||||||
|
if budget != nil {
|
||||||
|
valuePath := "request.generationConfig.thinkingConfig.thinkingBudget"
|
||||||
|
rewritten, err := sjson.SetBytes(updated, valuePath, *budget)
|
||||||
|
if err == nil {
|
||||||
|
updated = rewritten
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if includeThoughts != nil {
|
||||||
|
valuePath := "request.generationConfig.thinkingConfig.include_thoughts"
|
||||||
|
rewritten, err := sjson.SetBytes(updated, valuePath, *includeThoughts)
|
||||||
|
if err == nil {
|
||||||
|
updated = rewritten
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return updated
|
||||||
|
}
|
||||||
|
|
||||||
|
func GeminiThinkingFromMetadata(metadata map[string]any) (*int, *bool, bool) {
|
||||||
|
if len(metadata) == 0 {
|
||||||
|
return nil, nil, false
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
budgetPtr *int
|
||||||
|
includePtr *bool
|
||||||
|
matched bool
|
||||||
|
)
|
||||||
|
if rawBudget, ok := metadata[GeminiThinkingBudgetMetadataKey]; ok {
|
||||||
|
switch v := rawBudget.(type) {
|
||||||
|
case int:
|
||||||
|
budget := v
|
||||||
|
budgetPtr = &budget
|
||||||
|
matched = true
|
||||||
|
case int32:
|
||||||
|
budget := int(v)
|
||||||
|
budgetPtr = &budget
|
||||||
|
matched = true
|
||||||
|
case int64:
|
||||||
|
budget := int(v)
|
||||||
|
budgetPtr = &budget
|
||||||
|
matched = true
|
||||||
|
case float64:
|
||||||
|
budget := int(v)
|
||||||
|
budgetPtr = &budget
|
||||||
|
matched = true
|
||||||
|
case json.Number:
|
||||||
|
if val, err := v.Int64(); err == nil {
|
||||||
|
budget := int(val)
|
||||||
|
budgetPtr = &budget
|
||||||
|
matched = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if rawInclude, ok := metadata[GeminiIncludeThoughtsMetadataKey]; ok {
|
||||||
|
switch v := rawInclude.(type) {
|
||||||
|
case bool:
|
||||||
|
include := v
|
||||||
|
includePtr = &include
|
||||||
|
matched = true
|
||||||
|
case string:
|
||||||
|
if parsed, err := strconv.ParseBool(v); err == nil {
|
||||||
|
include := parsed
|
||||||
|
includePtr = &include
|
||||||
|
matched = true
|
||||||
|
}
|
||||||
|
case json.Number:
|
||||||
|
if val, err := v.Int64(); err == nil {
|
||||||
|
include := val != 0
|
||||||
|
includePtr = &include
|
||||||
|
matched = true
|
||||||
|
}
|
||||||
|
case int:
|
||||||
|
include := v != 0
|
||||||
|
includePtr = &include
|
||||||
|
matched = true
|
||||||
|
case int32:
|
||||||
|
include := v != 0
|
||||||
|
includePtr = &include
|
||||||
|
matched = true
|
||||||
|
case int64:
|
||||||
|
include := v != 0
|
||||||
|
includePtr = &include
|
||||||
|
matched = true
|
||||||
|
case float64:
|
||||||
|
include := v != 0
|
||||||
|
includePtr = &include
|
||||||
|
matched = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return budgetPtr, includePtr, matched
|
||||||
|
}
|
||||||
@@ -7,8 +7,9 @@
|
|||||||
package claude
|
package claude
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bufio"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
@@ -197,33 +198,65 @@ func (h *ClaudeCodeAPIHandler) handleStreamingResponse(c *gin.Context, rawJSON [
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (h *ClaudeCodeAPIHandler) forwardClaudeStream(c *gin.Context, flusher http.Flusher, cancel func(error), data <-chan []byte, errs <-chan *interfaces.ErrorMessage) {
|
func (h *ClaudeCodeAPIHandler) forwardClaudeStream(c *gin.Context, flusher http.Flusher, cancel func(error), data <-chan []byte, errs <-chan *interfaces.ErrorMessage) {
|
||||||
|
// v6.1: Intelligent Buffered Streamer strategy
|
||||||
|
// Enhanced buffering with larger buffer size (16KB) and longer flush interval (120ms).
|
||||||
|
// Smart flush only when buffer is sufficiently filled (≥50%), dramatically reducing
|
||||||
|
// flush frequency from ~12.5Hz to ~5-8Hz while maintaining low latency.
|
||||||
|
writer := bufio.NewWriterSize(c.Writer, 16*1024) // 4KB → 16KB
|
||||||
|
ticker := time.NewTicker(120 * time.Millisecond) // 80ms → 120ms
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
var chunkIdx int
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-c.Request.Context().Done():
|
case <-c.Request.Context().Done():
|
||||||
|
// Context cancelled, flush any remaining data before exit
|
||||||
|
_ = writer.Flush()
|
||||||
cancel(c.Request.Context().Err())
|
cancel(c.Request.Context().Err())
|
||||||
return
|
return
|
||||||
|
|
||||||
|
case <-ticker.C:
|
||||||
|
// Smart flush: only flush when buffer has sufficient data (≥50% full)
|
||||||
|
// This reduces flush frequency while ensuring data flows naturally
|
||||||
|
buffered := writer.Buffered()
|
||||||
|
if buffered >= 8*1024 { // At least 8KB (50% of 16KB buffer)
|
||||||
|
if err := writer.Flush(); err != nil {
|
||||||
|
// Error flushing, cancel and return
|
||||||
|
cancel(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
flusher.Flush() // Also flush the underlying http.ResponseWriter
|
||||||
|
}
|
||||||
|
|
||||||
case chunk, ok := <-data:
|
case chunk, ok := <-data:
|
||||||
if !ok {
|
if !ok {
|
||||||
flusher.Flush()
|
// Stream ended, flush remaining data
|
||||||
|
_ = writer.Flush()
|
||||||
cancel(nil)
|
cancel(nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if bytes.HasPrefix(chunk, []byte("event:")) {
|
// Forward the complete SSE event block directly (already formatted by the translator).
|
||||||
_, _ = c.Writer.Write([]byte("\n"))
|
// The translator returns a complete SSE-compliant event block, including event:, data:, and separators.
|
||||||
|
// The handler just needs to forward it without reassembly.
|
||||||
|
if len(chunk) > 0 {
|
||||||
|
_, _ = writer.Write(chunk)
|
||||||
}
|
}
|
||||||
|
chunkIdx++
|
||||||
|
|
||||||
_, _ = c.Writer.Write(chunk)
|
|
||||||
_, _ = c.Writer.Write([]byte("\n"))
|
|
||||||
|
|
||||||
flusher.Flush()
|
|
||||||
case errMsg, ok := <-errs:
|
case errMsg, ok := <-errs:
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if errMsg != nil {
|
if errMsg != nil {
|
||||||
h.WriteErrorResponse(c, errMsg)
|
// An error occurred: emit as a proper SSE error event
|
||||||
flusher.Flush()
|
errorBytes, _ := json.Marshal(h.toClaudeError(errMsg))
|
||||||
|
_, _ = writer.WriteString("event: error\n")
|
||||||
|
_, _ = writer.WriteString("data: ")
|
||||||
|
_, _ = writer.Write(errorBytes)
|
||||||
|
_, _ = writer.WriteString("\n\n")
|
||||||
|
_ = writer.Flush()
|
||||||
}
|
}
|
||||||
var execErr error
|
var execErr error
|
||||||
if errMsg != nil {
|
if errMsg != nil {
|
||||||
@@ -231,7 +264,26 @@ func (h *ClaudeCodeAPIHandler) forwardClaudeStream(c *gin.Context, flusher http.
|
|||||||
}
|
}
|
||||||
cancel(execErr)
|
cancel(execErr)
|
||||||
return
|
return
|
||||||
case <-time.After(500 * time.Millisecond):
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type claudeErrorDetail struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type claudeErrorResponse struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Error claudeErrorDetail `json:"error"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *ClaudeCodeAPIHandler) toClaudeError(msg *interfaces.ErrorMessage) claudeErrorResponse {
|
||||||
|
return claudeErrorResponse{
|
||||||
|
Type: "error",
|
||||||
|
Error: claudeErrorDetail{
|
||||||
|
Type: "api_error",
|
||||||
|
Message: msg.Error.Error(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -133,20 +133,27 @@ func (h *BaseAPIHandler) GetContextWithCancel(handler interfaces.APIHandler, c *
|
|||||||
// ExecuteWithAuthManager executes a non-streaming request via the core auth manager.
|
// ExecuteWithAuthManager executes a non-streaming request via the core auth manager.
|
||||||
// This path is the only supported execution route.
|
// This path is the only supported execution route.
|
||||||
func (h *BaseAPIHandler) ExecuteWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) ([]byte, *interfaces.ErrorMessage) {
|
func (h *BaseAPIHandler) ExecuteWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) ([]byte, *interfaces.ErrorMessage) {
|
||||||
providers := util.GetProviderName(modelName)
|
normalizedModel, metadata := normalizeModelMetadata(modelName)
|
||||||
|
providers := util.GetProviderName(normalizedModel)
|
||||||
if len(providers) == 0 {
|
if len(providers) == 0 {
|
||||||
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
||||||
}
|
}
|
||||||
req := coreexecutor.Request{
|
req := coreexecutor.Request{
|
||||||
Model: modelName,
|
Model: normalizedModel,
|
||||||
Payload: cloneBytes(rawJSON),
|
Payload: cloneBytes(rawJSON),
|
||||||
}
|
}
|
||||||
|
if cloned := cloneMetadata(metadata); cloned != nil {
|
||||||
|
req.Metadata = cloned
|
||||||
|
}
|
||||||
opts := coreexecutor.Options{
|
opts := coreexecutor.Options{
|
||||||
Stream: false,
|
Stream: false,
|
||||||
Alt: alt,
|
Alt: alt,
|
||||||
OriginalRequest: cloneBytes(rawJSON),
|
OriginalRequest: cloneBytes(rawJSON),
|
||||||
SourceFormat: sdktranslator.FromString(handlerType),
|
SourceFormat: sdktranslator.FromString(handlerType),
|
||||||
}
|
}
|
||||||
|
if cloned := cloneMetadata(metadata); cloned != nil {
|
||||||
|
opts.Metadata = cloned
|
||||||
|
}
|
||||||
resp, err := h.AuthManager.Execute(ctx, providers, req, opts)
|
resp, err := h.AuthManager.Execute(ctx, providers, req, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusInternalServerError, Error: err}
|
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusInternalServerError, Error: err}
|
||||||
@@ -157,20 +164,27 @@ func (h *BaseAPIHandler) ExecuteWithAuthManager(ctx context.Context, handlerType
|
|||||||
// ExecuteCountWithAuthManager executes a non-streaming request via the core auth manager.
|
// ExecuteCountWithAuthManager executes a non-streaming request via the core auth manager.
|
||||||
// This path is the only supported execution route.
|
// This path is the only supported execution route.
|
||||||
func (h *BaseAPIHandler) ExecuteCountWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) ([]byte, *interfaces.ErrorMessage) {
|
func (h *BaseAPIHandler) ExecuteCountWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) ([]byte, *interfaces.ErrorMessage) {
|
||||||
providers := util.GetProviderName(modelName)
|
normalizedModel, metadata := normalizeModelMetadata(modelName)
|
||||||
|
providers := util.GetProviderName(normalizedModel)
|
||||||
if len(providers) == 0 {
|
if len(providers) == 0 {
|
||||||
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
||||||
}
|
}
|
||||||
req := coreexecutor.Request{
|
req := coreexecutor.Request{
|
||||||
Model: modelName,
|
Model: normalizedModel,
|
||||||
Payload: cloneBytes(rawJSON),
|
Payload: cloneBytes(rawJSON),
|
||||||
}
|
}
|
||||||
|
if cloned := cloneMetadata(metadata); cloned != nil {
|
||||||
|
req.Metadata = cloned
|
||||||
|
}
|
||||||
opts := coreexecutor.Options{
|
opts := coreexecutor.Options{
|
||||||
Stream: false,
|
Stream: false,
|
||||||
Alt: alt,
|
Alt: alt,
|
||||||
OriginalRequest: cloneBytes(rawJSON),
|
OriginalRequest: cloneBytes(rawJSON),
|
||||||
SourceFormat: sdktranslator.FromString(handlerType),
|
SourceFormat: sdktranslator.FromString(handlerType),
|
||||||
}
|
}
|
||||||
|
if cloned := cloneMetadata(metadata); cloned != nil {
|
||||||
|
opts.Metadata = cloned
|
||||||
|
}
|
||||||
resp, err := h.AuthManager.ExecuteCount(ctx, providers, req, opts)
|
resp, err := h.AuthManager.ExecuteCount(ctx, providers, req, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusInternalServerError, Error: err}
|
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusInternalServerError, Error: err}
|
||||||
@@ -181,7 +195,8 @@ func (h *BaseAPIHandler) ExecuteCountWithAuthManager(ctx context.Context, handle
|
|||||||
// ExecuteStreamWithAuthManager executes a streaming request via the core auth manager.
|
// ExecuteStreamWithAuthManager executes a streaming request via the core auth manager.
|
||||||
// This path is the only supported execution route.
|
// This path is the only supported execution route.
|
||||||
func (h *BaseAPIHandler) ExecuteStreamWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) (<-chan []byte, <-chan *interfaces.ErrorMessage) {
|
func (h *BaseAPIHandler) ExecuteStreamWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) (<-chan []byte, <-chan *interfaces.ErrorMessage) {
|
||||||
providers := util.GetProviderName(modelName)
|
normalizedModel, metadata := normalizeModelMetadata(modelName)
|
||||||
|
providers := util.GetProviderName(normalizedModel)
|
||||||
if len(providers) == 0 {
|
if len(providers) == 0 {
|
||||||
errChan := make(chan *interfaces.ErrorMessage, 1)
|
errChan := make(chan *interfaces.ErrorMessage, 1)
|
||||||
errChan <- &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
errChan <- &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
||||||
@@ -189,15 +204,21 @@ func (h *BaseAPIHandler) ExecuteStreamWithAuthManager(ctx context.Context, handl
|
|||||||
return nil, errChan
|
return nil, errChan
|
||||||
}
|
}
|
||||||
req := coreexecutor.Request{
|
req := coreexecutor.Request{
|
||||||
Model: modelName,
|
Model: normalizedModel,
|
||||||
Payload: cloneBytes(rawJSON),
|
Payload: cloneBytes(rawJSON),
|
||||||
}
|
}
|
||||||
|
if cloned := cloneMetadata(metadata); cloned != nil {
|
||||||
|
req.Metadata = cloned
|
||||||
|
}
|
||||||
opts := coreexecutor.Options{
|
opts := coreexecutor.Options{
|
||||||
Stream: true,
|
Stream: true,
|
||||||
Alt: alt,
|
Alt: alt,
|
||||||
OriginalRequest: cloneBytes(rawJSON),
|
OriginalRequest: cloneBytes(rawJSON),
|
||||||
SourceFormat: sdktranslator.FromString(handlerType),
|
SourceFormat: sdktranslator.FromString(handlerType),
|
||||||
}
|
}
|
||||||
|
if cloned := cloneMetadata(metadata); cloned != nil {
|
||||||
|
opts.Metadata = cloned
|
||||||
|
}
|
||||||
chunks, err := h.AuthManager.ExecuteStream(ctx, providers, req, opts)
|
chunks, err := h.AuthManager.ExecuteStream(ctx, providers, req, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errChan := make(chan *interfaces.ErrorMessage, 1)
|
errChan := make(chan *interfaces.ErrorMessage, 1)
|
||||||
@@ -232,6 +253,34 @@ func cloneBytes(src []byte) []byte {
|
|||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func normalizeModelMetadata(modelName string) (string, map[string]any) {
|
||||||
|
baseModel, budget, include, matched := util.ParseGeminiThinkingSuffix(modelName)
|
||||||
|
if !matched {
|
||||||
|
return baseModel, nil
|
||||||
|
}
|
||||||
|
metadata := map[string]any{
|
||||||
|
util.GeminiOriginalModelMetadataKey: modelName,
|
||||||
|
}
|
||||||
|
if budget != nil {
|
||||||
|
metadata[util.GeminiThinkingBudgetMetadataKey] = *budget
|
||||||
|
}
|
||||||
|
if include != nil {
|
||||||
|
metadata[util.GeminiIncludeThoughtsMetadataKey] = *include
|
||||||
|
}
|
||||||
|
return baseModel, metadata
|
||||||
|
}
|
||||||
|
|
||||||
|
func cloneMetadata(src map[string]any) map[string]any {
|
||||||
|
if len(src) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
dst := make(map[string]any, len(src))
|
||||||
|
for k, v := range src {
|
||||||
|
dst[k] = v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
// WriteErrorResponse writes an error message to the response writer using the HTTP status embedded in the message.
|
// WriteErrorResponse writes an error message to the response writer using the HTTP status embedded in the message.
|
||||||
func (h *BaseAPIHandler) WriteErrorResponse(c *gin.Context, msg *interfaces.ErrorMessage) {
|
func (h *BaseAPIHandler) WriteErrorResponse(c *gin.Context, msg *interfaces.ErrorMessage) {
|
||||||
status := http.StatusInternalServerError
|
status := http.StatusInternalServerError
|
||||||
|
|||||||
Reference in New Issue
Block a user