From c809df75fefc5b99740a7822ca8962b9652229bf Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sun, 15 Dec 2024 00:53:41 +0800 Subject: [PATCH 001/167] feat: model info --- service/aiproxy/model/configkey.go | 140 +++++ service/aiproxy/model/modelconfig.go | 80 +-- service/aiproxy/model/owner.go | 36 ++ .../aiproxy/relay/adaptor/ali/constants.go | 523 ++++++++++-------- .../aiproxy/relay/adaptor/baiduv2/adaptor.go | 23 +- .../relay/adaptor/baiduv2/constants.go | 221 ++++---- .../relay/adaptor/deepseek/constants.go | 14 +- .../aiproxy/relay/adaptor/doubao/constants.go | 95 ++-- .../relay/adaptor/doubaoaudio/constants.go | 12 +- .../relay/adaptor/minimax/constants.go | 52 +- .../relay/adaptor/moonshot/constants.go | 21 +- .../aiproxy/relay/adaptor/openai/constants.go | 116 ++-- .../relay/adaptor/tencent/constants.go | 83 ++- .../aiproxy/relay/adaptor/xunfei/constants.go | 39 +- .../aiproxy/relay/adaptor/zhipu/constants.go | 146 ++--- 15 files changed, 935 insertions(+), 666 deletions(-) create mode 100644 service/aiproxy/model/configkey.go create mode 100644 service/aiproxy/model/owner.go diff --git a/service/aiproxy/model/configkey.go b/service/aiproxy/model/configkey.go new file mode 100644 index 00000000000..c826cdf4398 --- /dev/null +++ b/service/aiproxy/model/configkey.go @@ -0,0 +1,140 @@ +package model + +import "reflect" + +//nolint:revive +type ModelConfigKey string + +const ( + ModelConfigMaxContextTokensKey ModelConfigKey = "max_context_tokens" + ModelConfigMaxInputTokensKey ModelConfigKey = "max_input_tokens" + ModelConfigMaxOutputTokensKey ModelConfigKey = "max_output_tokens" + ModelConfigVisionKey ModelConfigKey = "vision" + ModelConfigToolChoiceKey ModelConfigKey = "tool_choice" + ModelConfigSupportFormatsKey ModelConfigKey = "support_formats" + ModelConfigSupportVoicesKey ModelConfigKey = "support_voices" +) + +//nolint:revive +type ModelConfigOption func(config map[ModelConfigKey]any) + +func WithModelConfigMaxContextTokens(maxContextTokens int) ModelConfigOption { + return func(config map[ModelConfigKey]any) { + config[ModelConfigMaxContextTokensKey] = maxContextTokens + } +} + +func WithModelConfigMaxInputTokens(maxInputTokens int) ModelConfigOption { + return func(config map[ModelConfigKey]any) { + config[ModelConfigMaxInputTokensKey] = maxInputTokens + } +} + +func WithModelConfigMaxOutputTokens(maxOutputTokens int) ModelConfigOption { + return func(config map[ModelConfigKey]any) { + config[ModelConfigMaxOutputTokensKey] = maxOutputTokens + } +} + +func WithModelConfigVision(vision bool) ModelConfigOption { + return func(config map[ModelConfigKey]any) { + config[ModelConfigVisionKey] = vision + } +} + +func WithModelConfigToolChoice(toolChoice bool) ModelConfigOption { + return func(config map[ModelConfigKey]any) { + config[ModelConfigToolChoiceKey] = toolChoice + } +} + +func WithModelConfigSupportFormats(supportFormats []string) ModelConfigOption { + return func(config map[ModelConfigKey]any) { + config[ModelConfigSupportFormatsKey] = supportFormats + } +} + +func WithModelConfigSupportVoices(supportVoices []string) ModelConfigOption { + return func(config map[ModelConfigKey]any) { + config[ModelConfigSupportVoicesKey] = supportVoices + } +} + +func NewModelConfig(opts ...ModelConfigOption) map[ModelConfigKey]any { + config := make(map[ModelConfigKey]any) + for _, opt := range opts { + opt(config) + } + return config +} + +func GetModelConfigInt(config map[ModelConfigKey]any, key ModelConfigKey) (int, bool) { + if v, ok := config[key]; ok { + value := reflect.ValueOf(v) + if value.CanInt() { + return int(value.Int()), true + } + if value.CanFloat() { + return int(value.Float()), true + } + } + return 0, false +} + +func GetModelConfigUint(config map[ModelConfigKey]any, key ModelConfigKey) (uint64, bool) { + if v, ok := config[key]; ok { + value := reflect.ValueOf(v) + if value.CanUint() { + return value.Uint(), true + } + if value.CanFloat() { + return uint64(value.Float()), true + } + } + return 0, false +} + +func GetModelConfigFloat(config map[ModelConfigKey]any, key ModelConfigKey) (float64, bool) { + if v, ok := config[key]; ok { + value := reflect.ValueOf(v) + if value.CanFloat() { + return value.Float(), true + } + if value.CanInt() { + return float64(value.Int()), true + } + if value.CanUint() { + return float64(value.Uint()), true + } + } + return 0, false +} + +func GetModelConfigStringSlice(config map[ModelConfigKey]any, key ModelConfigKey) ([]string, bool) { + v, ok := config[key] + if !ok { + return nil, false + } + if slice, ok := v.([]string); ok { + return slice, true + } + if slice, ok := v.([]any); ok { + result := make([]string, len(slice)) + for i, v := range slice { + if s, ok := v.(string); ok { + result[i] = s + continue + } + return nil, false + } + return result, true + } + return nil, false +} + +func GetModelConfigBool(config map[ModelConfigKey]any, key ModelConfigKey) (bool, bool) { + if v, ok := config[key].(bool); ok { + return v, true + } + return false, false +} diff --git a/service/aiproxy/model/modelconfig.go b/service/aiproxy/model/modelconfig.go index df8c92528a6..f513e592e1f 100644 --- a/service/aiproxy/model/modelconfig.go +++ b/service/aiproxy/model/modelconfig.go @@ -10,67 +10,19 @@ import ( "gorm.io/gorm" ) -//nolint:revive -type ModelConfigKey string - -const ( - ModelConfigMaxContextTokensKey ModelConfigKey = "max_context_tokens" - ModelConfigMaxInputTokensKey ModelConfigKey = "max_input_tokens" - ModelConfigMaxOutputTokensKey ModelConfigKey = "max_output_tokens" - ModelConfigToolChoiceKey ModelConfigKey = "tool_choice" - ModelConfigFunctionCallingKey ModelConfigKey = "function_calling" - ModelConfigSupportFormatsKey ModelConfigKey = "support_formats" - ModelConfigSupportVoicesKey ModelConfigKey = "support_voices" -) - -//nolint:revive -type ModelOwner string - -const ( - ModelOwnerOpenAI ModelOwner = "openai" - ModelOwnerAlibaba ModelOwner = "alibaba" - ModelOwnerTencent ModelOwner = "tencent" - ModelOwnerXunfei ModelOwner = "xunfei" - ModelOwnerDeepSeek ModelOwner = "deepseek" - ModelOwnerMoonshot ModelOwner = "moonshot" - ModelOwnerMiniMax ModelOwner = "minimax" - ModelOwnerBaidu ModelOwner = "baidu" - ModelOwnerGoogle ModelOwner = "google" - ModelOwnerBAAI ModelOwner = "baai" - ModelOwnerFunAudioLLM ModelOwner = "funaudiollm" - ModelOwnerDoubao ModelOwner = "doubao" - ModelOwnerFishAudio ModelOwner = "fishaudio" - ModelOwnerChatGLM ModelOwner = "chatglm" - ModelOwnerStabilityAI ModelOwner = "stabilityai" - ModelOwnerNetease ModelOwner = "netease" - ModelOwnerAI360 ModelOwner = "ai360" - ModelOwnerAnthropic ModelOwner = "anthropic" - ModelOwnerMeta ModelOwner = "meta" - ModelOwnerBaichuan ModelOwner = "baichuan" - ModelOwnerMistral ModelOwner = "mistral" - ModelOwnerOpenChat ModelOwner = "openchat" - ModelOwnerMicrosoft ModelOwner = "microsoft" - ModelOwnerDefog ModelOwner = "defog" - ModelOwnerNexusFlow ModelOwner = "nexusflow" - ModelOwnerCohere ModelOwner = "cohere" - ModelOwnerHuggingFace ModelOwner = "huggingface" - ModelOwnerLingyiWanwu ModelOwner = "lingyiwanwu" - ModelOwnerStepFun ModelOwner = "stepfun" -) - //nolint:revive type ModelConfig struct { CreatedAt time.Time `gorm:"index;autoCreateTime" json:"created_at"` UpdatedAt time.Time `gorm:"index;autoUpdateTime" json:"updated_at"` Config map[ModelConfigKey]any `gorm:"serializer:fastjson;type:text" json:"config,omitempty"` - ImagePrices map[string]float64 `gorm:"serializer:fastjson" json:"image_prices"` + ImagePrices map[string]float64 `gorm:"serializer:fastjson" json:"image_prices,omitempty"` Model string `gorm:"primaryKey" json:"model"` Owner ModelOwner `gorm:"type:varchar(255);index" json:"owner"` - ImageMaxBatchSize int `json:"image_batch_size"` + ImageMaxBatchSize int `json:"image_batch_size,omitempty"` // relaymode/define.go Type int `json:"type"` - InputPrice float64 `json:"input_price"` - OutputPrice float64 `json:"output_price"` + InputPrice float64 `json:"input_price,omitempty"` + OutputPrice float64 `json:"output_price,omitempty"` } func (c *ModelConfig) MarshalJSON() ([]byte, error) { @@ -86,6 +38,30 @@ func (c *ModelConfig) MarshalJSON() ([]byte, error) { }) } +func (c *ModelConfig) MaxContextTokens() (int, bool) { + return GetModelConfigInt(c.Config, ModelConfigMaxContextTokensKey) +} + +func (c *ModelConfig) MaxInputTokens() (int, bool) { + return GetModelConfigInt(c.Config, ModelConfigMaxInputTokensKey) +} + +func (c *ModelConfig) MaxOutputTokens() (int, bool) { + return GetModelConfigInt(c.Config, ModelConfigMaxOutputTokensKey) +} + +func (c *ModelConfig) SupportVoices() ([]string, bool) { + return GetModelConfigStringSlice(c.Config, ModelConfigSupportVoicesKey) +} + +func (c *ModelConfig) ToolChoice() (bool, bool) { + return GetModelConfigBool(c.Config, ModelConfigToolChoiceKey) +} + +func (c *ModelConfig) SupportFormats() ([]string, bool) { + return GetModelConfigStringSlice(c.Config, ModelConfigSupportFormatsKey) +} + func GetModelConfigs(startIdx int, num int, model string) (configs []*ModelConfig, total int64, err error) { tx := DB.Model(&ModelConfig{}) if model != "" { diff --git a/service/aiproxy/model/owner.go b/service/aiproxy/model/owner.go new file mode 100644 index 00000000000..4d562120bff --- /dev/null +++ b/service/aiproxy/model/owner.go @@ -0,0 +1,36 @@ +package model + +//nolint:revive +type ModelOwner string + +const ( + ModelOwnerOpenAI ModelOwner = "openai" + ModelOwnerAlibaba ModelOwner = "alibaba" + ModelOwnerTencent ModelOwner = "tencent" + ModelOwnerXunfei ModelOwner = "xunfei" + ModelOwnerDeepSeek ModelOwner = "deepseek" + ModelOwnerMoonshot ModelOwner = "moonshot" + ModelOwnerMiniMax ModelOwner = "minimax" + ModelOwnerBaidu ModelOwner = "baidu" + ModelOwnerGoogle ModelOwner = "google" + ModelOwnerBAAI ModelOwner = "baai" + ModelOwnerFunAudioLLM ModelOwner = "funaudiollm" + ModelOwnerDoubao ModelOwner = "doubao" + ModelOwnerFishAudio ModelOwner = "fishaudio" + ModelOwnerChatGLM ModelOwner = "chatglm" + ModelOwnerStabilityAI ModelOwner = "stabilityai" + ModelOwnerNetease ModelOwner = "netease" + ModelOwnerAI360 ModelOwner = "ai360" + ModelOwnerAnthropic ModelOwner = "anthropic" + ModelOwnerMeta ModelOwner = "meta" + ModelOwnerBaichuan ModelOwner = "baichuan" + ModelOwnerMistral ModelOwner = "mistral" + ModelOwnerOpenChat ModelOwner = "openchat" + ModelOwnerMicrosoft ModelOwner = "microsoft" + ModelOwnerDefog ModelOwner = "defog" + ModelOwnerNexusFlow ModelOwner = "nexusflow" + ModelOwnerCohere ModelOwner = "cohere" + ModelOwnerHuggingFace ModelOwner = "huggingface" + ModelOwnerLingyiWanwu ModelOwner = "lingyiwanwu" + ModelOwnerStepFun ModelOwner = "stepfun" +) diff --git a/service/aiproxy/relay/adaptor/ali/constants.go b/service/aiproxy/relay/adaptor/ali/constants.go index cd4629c5722..e30510f767c 100644 --- a/service/aiproxy/relay/adaptor/ali/constants.go +++ b/service/aiproxy/relay/adaptor/ali/constants.go @@ -15,11 +15,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.02, OutputPrice: 0.06, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 32768, - model.ModelConfigMaxInputTokensKey: 30720, - model.ModelConfigMaxOutputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + model.WithModelConfigMaxInputTokens(30720), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen-max-latest", @@ -27,11 +28,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.02, OutputPrice: 0.06, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 32768, - model.ModelConfigMaxInputTokensKey: 30720, - model.ModelConfigMaxOutputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + model.WithModelConfigMaxInputTokens(30720), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, // 通义千问-Plus @@ -41,11 +43,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0008, OutputPrice: 0.002, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 131072, - model.ModelConfigMaxInputTokensKey: 129024, - model.ModelConfigMaxOutputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(129024), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen-plus-latest", @@ -53,11 +56,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0008, OutputPrice: 0.002, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 32000, - model.ModelConfigMaxInputTokensKey: 30000, - model.ModelConfigMaxOutputTokensKey: 8000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32000), + model.WithModelConfigMaxInputTokens(30000), + model.WithModelConfigMaxOutputTokens(8000), + model.WithModelConfigToolChoice(true), + ), }, // 通义千问-Turbo @@ -67,11 +71,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0003, OutputPrice: 0.0006, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 131072, - model.ModelConfigMaxInputTokensKey: 129024, - model.ModelConfigMaxOutputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(129024), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen-turbo-latest", @@ -79,11 +84,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0003, OutputPrice: 0.0006, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 1000000, - model.ModelConfigMaxInputTokensKey: 1000000, - model.ModelConfigMaxOutputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(1000000), + model.WithModelConfigMaxInputTokens(1000000), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, // Qwen-Long @@ -93,11 +99,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0005, OutputPrice: 0.002, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 10000000, - model.ModelConfigMaxInputTokensKey: 10000000, - model.ModelConfigMaxOutputTokensKey: 6000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(10000000), + model.WithModelConfigMaxInputTokens(10000000), + model.WithModelConfigMaxOutputTokens(6000), + model.WithModelConfigToolChoice(true), + ), }, // 通义千问VL @@ -107,11 +114,13 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.02, OutputPrice: 0.02, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 32000, - model.ModelConfigMaxInputTokensKey: 30000, - model.ModelConfigMaxOutputTokensKey: 2000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32000), + model.WithModelConfigMaxInputTokens(30000), + model.WithModelConfigMaxOutputTokens(2000), + model.WithModelConfigVision(true), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen-vl-max-latest", @@ -119,11 +128,13 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.02, OutputPrice: 0.02, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 32000, - model.ModelConfigMaxInputTokensKey: 30000, - model.ModelConfigMaxOutputTokensKey: 2000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32000), + model.WithModelConfigMaxInputTokens(30000), + model.WithModelConfigMaxOutputTokens(2000), + model.WithModelConfigVision(true), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen-vl-plus", @@ -131,11 +142,13 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.008, OutputPrice: 0.008, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 8000, - model.ModelConfigMaxInputTokensKey: 6000, - model.ModelConfigMaxOutputTokensKey: 2000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(8000), + model.WithModelConfigMaxInputTokens(6000), + model.WithModelConfigMaxOutputTokens(2000), + model.WithModelConfigVision(true), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen-vl-plus-latest", @@ -143,11 +156,13 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.008, OutputPrice: 0.008, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 32000, - model.ModelConfigMaxInputTokensKey: 30000, - model.ModelConfigMaxOutputTokensKey: 2000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32000), + model.WithModelConfigMaxInputTokens(30000), + model.WithModelConfigMaxOutputTokens(2000), + model.WithModelConfigVision(true), + model.WithModelConfigToolChoice(true), + ), }, // 通义千问OCR @@ -157,11 +172,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.005, OutputPrice: 0.005, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 34096, - model.ModelConfigMaxInputTokensKey: 30000, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(34096), + model.WithModelConfigMaxInputTokens(30000), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigVision(true), + ), }, { Model: "qwen-vl-ocr-latest", @@ -169,11 +185,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.005, OutputPrice: 0.005, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 34096, - model.ModelConfigMaxInputTokensKey: 30000, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(34096), + model.WithModelConfigMaxInputTokens(30000), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigVision(true), + ), }, // 通义千问Math @@ -183,11 +200,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.004, OutputPrice: 0.012, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 4096, - model.ModelConfigMaxInputTokensKey: 3072, - model.ModelConfigMaxOutputTokensKey: 3072, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(4096), + model.WithModelConfigMaxInputTokens(3072), + model.WithModelConfigMaxOutputTokens(3072), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen-math-plus-latest", @@ -195,11 +213,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.004, OutputPrice: 0.012, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 4096, - model.ModelConfigMaxInputTokensKey: 3072, - model.ModelConfigMaxOutputTokensKey: 3072, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(4096), + model.WithModelConfigMaxInputTokens(3072), + model.WithModelConfigMaxOutputTokens(3072), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen-math-turbo", @@ -207,11 +226,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.002, OutputPrice: 0.006, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 4096, - model.ModelConfigMaxInputTokensKey: 3072, - model.ModelConfigMaxOutputTokensKey: 3072, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(4096), + model.WithModelConfigMaxInputTokens(3072), + model.WithModelConfigMaxOutputTokens(3072), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen-math-turbo-latest", @@ -219,11 +239,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.002, OutputPrice: 0.006, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 4096, - model.ModelConfigMaxInputTokensKey: 3072, - model.ModelConfigMaxOutputTokensKey: 3072, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(4096), + model.WithModelConfigMaxInputTokens(3072), + model.WithModelConfigMaxOutputTokens(3072), + model.WithModelConfigToolChoice(true), + ), }, // 通义千问Coder @@ -233,11 +254,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0035, OutputPrice: 0.007, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 131072, - model.ModelConfigMaxInputTokensKey: 129024, - model.ModelConfigMaxOutputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(129024), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen-coder-plus-latest", @@ -245,11 +267,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0035, OutputPrice: 0.007, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 131072, - model.ModelConfigMaxInputTokensKey: 129024, - model.ModelConfigMaxOutputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(129024), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen-coder-turbo", @@ -257,11 +280,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.002, OutputPrice: 0.006, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 131072, - model.ModelConfigMaxInputTokensKey: 129024, - model.ModelConfigMaxOutputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(129024), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen-coder-turbo-latest", @@ -269,11 +293,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.002, OutputPrice: 0.006, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 131072, - model.ModelConfigMaxInputTokensKey: 129024, - model.ModelConfigMaxOutputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(129024), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, // 通义千问2.5 @@ -283,11 +308,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.004, OutputPrice: 0.012, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 131072, - model.ModelConfigMaxInputTokensKey: 129024, - model.ModelConfigMaxOutputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(129024), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen2.5-32b-instruct", @@ -295,11 +321,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0035, OutputPrice: 0.007, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 131072, - model.ModelConfigMaxInputTokensKey: 129024, - model.ModelConfigMaxOutputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(129024), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen2.5-14b-instruct", @@ -307,11 +334,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.002, OutputPrice: 0.006, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 131072, - model.ModelConfigMaxInputTokensKey: 129024, - model.ModelConfigMaxOutputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(129024), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen2.5-7b-instruct", @@ -319,11 +347,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.001, OutputPrice: 0.002, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 131072, - model.ModelConfigMaxInputTokensKey: 129024, - model.ModelConfigMaxOutputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(129024), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, // 通义千问2 @@ -333,11 +362,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.004, OutputPrice: 0.012, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 131072, - model.ModelConfigMaxInputTokensKey: 128000, - model.ModelConfigMaxOutputTokensKey: 6144, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(128000), + model.WithModelConfigMaxOutputTokens(6144), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen2-57b-a14b-instruct", @@ -345,11 +375,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0035, OutputPrice: 0.007, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 65536, - model.ModelConfigMaxInputTokensKey: 63488, - model.ModelConfigMaxOutputTokensKey: 6144, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(65536), + model.WithModelConfigMaxInputTokens(63488), + model.WithModelConfigMaxOutputTokens(6144), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen2-7b-instruct", @@ -357,11 +388,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.001, OutputPrice: 0.002, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 131072, - model.ModelConfigMaxInputTokensKey: 128000, - model.ModelConfigMaxOutputTokensKey: 6144, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(128000), + model.WithModelConfigMaxOutputTokens(6144), + model.WithModelConfigToolChoice(true), + ), }, // 通义千问1.5 @@ -371,11 +403,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.007, OutputPrice: 0.014, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 32000, - model.ModelConfigMaxInputTokensKey: 30000, - model.ModelConfigMaxOutputTokensKey: 8000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32000), + model.WithModelConfigMaxInputTokens(30000), + model.WithModelConfigMaxOutputTokens(8000), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen1.5-72b-chat", @@ -383,11 +416,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.005, OutputPrice: 0.01, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 32000, - model.ModelConfigMaxInputTokensKey: 30000, - model.ModelConfigMaxOutputTokensKey: 8000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32000), + model.WithModelConfigMaxInputTokens(30000), + model.WithModelConfigMaxOutputTokens(8000), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen1.5-32b-chat", @@ -395,11 +429,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0035, OutputPrice: 0.007, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 32000, - model.ModelConfigMaxInputTokensKey: 30000, - model.ModelConfigMaxOutputTokensKey: 8000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32000), + model.WithModelConfigMaxInputTokens(30000), + model.WithModelConfigMaxOutputTokens(8000), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen1.5-14b-chat", @@ -407,11 +442,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.002, OutputPrice: 0.004, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 8000, - model.ModelConfigMaxInputTokensKey: 6000, - model.ModelConfigMaxOutputTokensKey: 2000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(8000), + model.WithModelConfigMaxInputTokens(6000), + model.WithModelConfigMaxOutputTokens(2000), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen1.5-7b-chat", @@ -419,11 +455,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.001, OutputPrice: 0.002, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 8000, - model.ModelConfigMaxInputTokensKey: 6000, - model.ModelConfigMaxOutputTokensKey: 2000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(8000), + model.WithModelConfigMaxInputTokens(6000), + model.WithModelConfigMaxOutputTokens(2000), + model.WithModelConfigToolChoice(true), + ), }, // 通义千问 @@ -433,11 +470,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.02, OutputPrice: 0.02, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 32000, - model.ModelConfigMaxInputTokensKey: 30000, - model.ModelConfigMaxOutputTokensKey: 2000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32000), + model.WithModelConfigMaxInputTokens(30000), + model.WithModelConfigMaxOutputTokens(2000), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen-14b-chat", @@ -445,11 +483,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.008, OutputPrice: 0.008, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 8000, - model.ModelConfigMaxInputTokensKey: 6000, - model.ModelConfigMaxOutputTokensKey: 2000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(8000), + model.WithModelConfigMaxInputTokens(6000), + model.WithModelConfigMaxOutputTokens(2000), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen-7b-chat", @@ -457,11 +496,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.006, OutputPrice: 0.006, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 7500, - model.ModelConfigMaxInputTokensKey: 6000, - model.ModelConfigMaxOutputTokensKey: 1500, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(7500), + model.WithModelConfigMaxInputTokens(6000), + model.WithModelConfigMaxOutputTokens(1500), + model.WithModelConfigToolChoice(true), + ), }, // 通义千问数学模型 @@ -471,11 +511,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.004, OutputPrice: 0.012, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 4096, - model.ModelConfigMaxInputTokensKey: 3072, - model.ModelConfigMaxOutputTokensKey: 3072, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(4096), + model.WithModelConfigMaxInputTokens(3072), + model.WithModelConfigMaxOutputTokens(3072), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen2.5-math-7b-instruct", @@ -483,11 +524,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.001, OutputPrice: 0.002, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 4096, - model.ModelConfigMaxInputTokensKey: 3072, - model.ModelConfigMaxOutputTokensKey: 3072, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(4096), + model.WithModelConfigMaxInputTokens(3072), + model.WithModelConfigMaxOutputTokens(3072), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen2-math-72b-instruct", @@ -495,11 +537,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.004, OutputPrice: 0.012, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 4096, - model.ModelConfigMaxInputTokensKey: 3072, - model.ModelConfigMaxOutputTokensKey: 3072, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(4096), + model.WithModelConfigMaxInputTokens(3072), + model.WithModelConfigMaxOutputTokens(3072), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen2-math-7b-instruct", @@ -507,11 +550,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.001, OutputPrice: 0.002, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 4096, - model.ModelConfigMaxInputTokensKey: 3072, - model.ModelConfigMaxOutputTokensKey: 3072, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(4096), + model.WithModelConfigMaxInputTokens(3072), + model.WithModelConfigMaxOutputTokens(3072), + model.WithModelConfigToolChoice(true), + ), }, // 通义千问Coder @@ -521,11 +565,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0035, OutputPrice: 0.007, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 131072, - model.ModelConfigMaxInputTokensKey: 129024, - model.ModelConfigMaxOutputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(129024), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen2.5-coder-14b-instruct", @@ -533,11 +578,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.002, OutputPrice: 0.006, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 131072, - model.ModelConfigMaxInputTokensKey: 129024, - model.ModelConfigMaxOutputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(129024), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { Model: "qwen2.5-coder-7b-instruct", @@ -545,11 +591,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.001, OutputPrice: 0.002, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 131072, - model.ModelConfigMaxInputTokensKey: 129024, - model.ModelConfigMaxOutputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(129024), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, // stable-diffusion @@ -578,10 +625,10 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.AudioSpeech, Owner: model.ModelOwnerAlibaba, InputPrice: 0.1, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxInputTokensKey: 10000, - model.ModelConfigSupportFormatsKey: []string{"mp3", "wav", "pcm"}, - model.ModelConfigSupportVoicesKey: []string{ + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(10000), + model.WithModelConfigSupportFormats([]string{"mp3", "wav", "pcm"}), + model.WithModelConfigSupportVoices([]string{ "zhinan", "zhiqi", "zhichu", @@ -623,28 +670,28 @@ var ModelList = []*model.ModelConfig{ "donna", "brian", "waan", - }, - }, + }), + ), }, { Model: "paraformer-realtime-v2", Type: relaymode.AudioTranscription, Owner: model.ModelOwnerAlibaba, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxInputTokensKey: 10000, - model.ModelConfigSupportFormatsKey: []string{"pcm", "wav", "opus", "speex", "aac", "amr"}, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(10000), + model.WithModelConfigSupportFormats([]string{"pcm", "wav", "opus", "speex", "aac", "amr"}), + ), }, { Model: "gte-rerank", Type: relaymode.Rerank, Owner: model.ModelOwnerAlibaba, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 4000, - model.ModelConfigMaxInputTokensKey: 4000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(4000), + model.WithModelConfigMaxInputTokens(4000), + ), }, { @@ -652,26 +699,26 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.Embeddings, Owner: model.ModelOwnerAlibaba, InputPrice: 0.0007, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxInputTokensKey: 2048, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(2048), + ), }, { Model: "text-embedding-v2", Type: relaymode.Embeddings, Owner: model.ModelOwnerAlibaba, InputPrice: 0.0007, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxInputTokensKey: 2048, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(2048), + ), }, { Model: "text-embedding-v3", Type: relaymode.Embeddings, Owner: model.ModelOwnerAlibaba, InputPrice: 0.0007, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxInputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(8192), + ), }, } diff --git a/service/aiproxy/relay/adaptor/baiduv2/adaptor.go b/service/aiproxy/relay/adaptor/baiduv2/adaptor.go index f1c1c487f18..805de27b268 100644 --- a/service/aiproxy/relay/adaptor/baiduv2/adaptor.go +++ b/service/aiproxy/relay/adaptor/baiduv2/adaptor.go @@ -25,25 +25,8 @@ const ( // https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Fm2vrveyu var v2ModelMap = map[string]string{ - "ERNIE-4.0-8K-Latest": "ernie-4.0-8k-latest", - "ERNIE-4.0-8K-Preview": "ernie-4.0-8k-preview", - "ERNIE-4.0-8K": "ernie-4.0-8k", - "ERNIE-4.0-Turbo-8K-Latest": "ernie-4.0-turbo-8k-latest", - "ERNIE-4.0-Turbo-8K-Preview": "ernie-4.0-turbo-8k-preview", - "ERNIE-4.0-Turbo-8K": "ernie-4.0-turbo-8k", - "ERNIE-4.0-Turbo-128K": "ernie-4.0-turbo-128k", - "ERNIE-3.5-8K-Preview": "ernie-3.5-8k-preview", - "ERNIE-3.5-8K": "ernie-3.5-8k", - "ERNIE-3.5-128K": "ernie-3.5-128k", - "ERNIE-Speed-8K": "ernie-speed-8k", - "ERNIE-Speed-128K": "ernie-speed-128k", - "ERNIE-Speed-Pro-128K": "ernie-speed-pro-128k", - "ERNIE-Lite-8K": "ernie-lite-8k", - "ERNIE-Lite-Pro-128K": "ernie-lite-pro-128k", - "ERNIE-Tiny-8K": "ernie-tiny-8k", "ERNIE-Character-8K": "ernie-char-8k", "ERNIE-Character-Fiction-8K": "ernie-char-fiction-8k", - "ERNIE-Novel-8K": "ernie-novel-8k", } func toV2ModelName(modelName string) string { @@ -80,8 +63,10 @@ func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Heade case relaymode.ChatCompletions: actModel := meta.ActualModelName v2Model := toV2ModelName(actModel) - meta.ActualModelName = v2Model - defer func() { meta.ActualModelName = actModel }() + if v2Model != actModel { + meta.ActualModelName = v2Model + defer func() { meta.ActualModelName = actModel }() + } return openai.ConvertRequest(meta, req) default: return nil, nil, fmt.Errorf("unsupported mode: %d", meta.Mode) diff --git a/service/aiproxy/relay/adaptor/baiduv2/constants.go b/service/aiproxy/relay/adaptor/baiduv2/constants.go index 7d11339a440..738df56db67 100644 --- a/service/aiproxy/relay/adaptor/baiduv2/constants.go +++ b/service/aiproxy/relay/adaptor/baiduv2/constants.go @@ -5,78 +5,86 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/relaymode" ) +// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Fm2vrveyu + var ModelList = []*model.ModelConfig{ { - Model: "ERNIE-4.0-8K-Preview", + Model: "ERNIE-4.0-8K-Latest", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerBaidu, InputPrice: 0.03, OutputPrice: 0.09, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 5120, - model.ModelConfigMaxInputTokensKey: 5120, - model.ModelConfigMaxOutputTokensKey: 2048, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(5120), + model.WithModelConfigMaxInputTokens(5120), + model.WithModelConfigMaxOutputTokens(2048), + model.WithModelConfigToolChoice(true), + ), }, { - Model: "ERNIE-4.0-8K", + Model: "ERNIE-4.0-8K-Preview", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerBaidu, InputPrice: 0.03, OutputPrice: 0.09, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 5120, - model.ModelConfigMaxInputTokensKey: 5120, - model.ModelConfigMaxOutputTokensKey: 2048, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(5120), + model.WithModelConfigMaxInputTokens(5120), + model.WithModelConfigMaxOutputTokens(2048), + model.WithModelConfigToolChoice(true), + ), }, { - Model: "ERNIE-4.0-8K-Latest", + Model: "ERNIE-4.0-8K", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerBaidu, InputPrice: 0.03, OutputPrice: 0.09, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 5120, - model.ModelConfigMaxInputTokensKey: 5120, - model.ModelConfigMaxOutputTokensKey: 2048, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(5120), + model.WithModelConfigMaxInputTokens(5120), + model.WithModelConfigMaxOutputTokens(2048), + model.WithModelConfigToolChoice(true), + ), }, { - Model: "ERNIE-4.0-Turbo-8K", + Model: "ERNIE-4.0-Turbo-8K-Latest", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerBaidu, InputPrice: 0.02, OutputPrice: 0.06, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 6144, - model.ModelConfigMaxInputTokensKey: 6144, - model.ModelConfigMaxOutputTokensKey: 2048, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(6144), + model.WithModelConfigMaxInputTokens(6144), + model.WithModelConfigMaxOutputTokens(2048), + model.WithModelConfigToolChoice(true), + ), }, { - Model: "ERNIE-4.0-Turbo-8K-Latest", + Model: "ERNIE-4.0-Turbo-8K-Preview", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerBaidu, InputPrice: 0.02, OutputPrice: 0.06, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 6144, - model.ModelConfigMaxInputTokensKey: 6144, - model.ModelConfigMaxOutputTokensKey: 2048, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(6144), + model.WithModelConfigMaxInputTokens(6144), + model.WithModelConfigMaxOutputTokens(2048), + model.WithModelConfigToolChoice(true), + ), }, { - Model: "ERNIE-4.0-Turbo-8K-Preview", + Model: "ERNIE-4.0-Turbo-8K", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerBaidu, InputPrice: 0.02, OutputPrice: 0.06, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 6144, - model.ModelConfigMaxInputTokensKey: 6144, - model.ModelConfigMaxOutputTokensKey: 2048, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(6144), + model.WithModelConfigMaxInputTokens(6144), + model.WithModelConfigMaxOutputTokens(2048), + model.WithModelConfigToolChoice(true), + ), }, { Model: "ERNIE-4.0-Turbo-128K", @@ -84,24 +92,25 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.02, OutputPrice: 0.06, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 126976, - model.ModelConfigMaxInputTokensKey: 126976, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(126976), + model.WithModelConfigMaxInputTokens(126976), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, - { Model: "ERNIE-3.5-8K-Preview", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerBaidu, InputPrice: 0.0008, OutputPrice: 0.002, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 5120, - model.ModelConfigMaxInputTokensKey: 5120, - model.ModelConfigMaxOutputTokensKey: 2048, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(5120), + model.WithModelConfigMaxInputTokens(5120), + model.WithModelConfigMaxOutputTokens(2048), + model.WithModelConfigToolChoice(true), + ), }, { Model: "ERNIE-3.5-8K", @@ -109,11 +118,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0008, OutputPrice: 0.002, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 5120, - model.ModelConfigMaxInputTokensKey: 5120, - model.ModelConfigMaxOutputTokensKey: 2048, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(5120), + model.WithModelConfigMaxInputTokens(5120), + model.WithModelConfigMaxOutputTokens(2048), + model.WithModelConfigToolChoice(true), + ), }, { Model: "ERNIE-3.5-128K", @@ -121,24 +131,24 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0008, OutputPrice: 0.002, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 126976, - model.ModelConfigMaxInputTokensKey: 126976, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(126976), + model.WithModelConfigMaxInputTokens(126976), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, - { Model: "ERNIE-Speed-8K", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerBaidu, InputPrice: 0.0001, OutputPrice: 0.0001, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 7168, - model.ModelConfigMaxInputTokensKey: 7168, - model.ModelConfigMaxOutputTokensKey: 2048, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(7168), + model.WithModelConfigMaxInputTokens(7168), + model.WithModelConfigMaxOutputTokens(2048), + ), }, { Model: "ERNIE-Speed-128K", @@ -146,11 +156,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0001, OutputPrice: 0.0001, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 126976, - model.ModelConfigMaxInputTokensKey: 126976, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(126976), + model.WithModelConfigMaxInputTokens(126976), + model.WithModelConfigMaxOutputTokens(4096), + ), }, { Model: "ERNIE-Speed-Pro-128K", @@ -158,24 +168,23 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0003, OutputPrice: 0.0006, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 126976, - model.ModelConfigMaxInputTokensKey: 126976, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(126976), + model.WithModelConfigMaxInputTokens(126976), + model.WithModelConfigMaxOutputTokens(4096), + ), }, - { Model: "ERNIE-Lite-8K", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerBaidu, InputPrice: 0.0001, OutputPrice: 0.0001, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 6144, - model.ModelConfigMaxInputTokensKey: 6144, - model.ModelConfigMaxOutputTokensKey: 2048, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(6144), + model.WithModelConfigMaxInputTokens(6144), + model.WithModelConfigMaxOutputTokens(2048), + ), }, { Model: "ERNIE-Lite-Pro-128K", @@ -183,37 +192,36 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0002, OutputPrice: 0.0004, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 126976, - model.ModelConfigMaxInputTokensKey: 126976, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(126976), + model.WithModelConfigMaxInputTokens(126976), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, - { Model: "ERNIE-Tiny-8K", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerBaidu, InputPrice: 0.0001, OutputPrice: 0.0001, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 6144, - model.ModelConfigMaxInputTokensKey: 6144, - model.ModelConfigMaxOutputTokensKey: 2048, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(6144), + model.WithModelConfigMaxInputTokens(6144), + model.WithModelConfigMaxOutputTokens(2048), + ), }, - { Model: "ERNIE-Character-8K", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerBaidu, InputPrice: 0.0003, OutputPrice: 0.0006, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 6144, - model.ModelConfigMaxInputTokensKey: 6144, - model.ModelConfigMaxOutputTokensKey: 2048, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(6144), + model.WithModelConfigMaxInputTokens(6144), + model.WithModelConfigMaxOutputTokens(2048), + ), }, { Model: "ERNIE-Character-Fiction-8K", @@ -221,23 +229,22 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0003, OutputPrice: 0.0006, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 5120, - model.ModelConfigMaxInputTokensKey: 5120, - model.ModelConfigMaxOutputTokensKey: 2048, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(5120), + model.WithModelConfigMaxInputTokens(5120), + model.WithModelConfigMaxOutputTokens(2048), + ), }, - { Model: "ERNIE-Novel-8K", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerBaidu, InputPrice: 0.04, OutputPrice: 0.12, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 6144, - model.ModelConfigMaxInputTokensKey: 6144, - model.ModelConfigMaxOutputTokensKey: 2048, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(6144), + model.WithModelConfigMaxInputTokens(6144), + model.WithModelConfigMaxOutputTokens(2048), + ), }, } diff --git a/service/aiproxy/relay/adaptor/deepseek/constants.go b/service/aiproxy/relay/adaptor/deepseek/constants.go index 6271cb354eb..8a5f73ca8b2 100644 --- a/service/aiproxy/relay/adaptor/deepseek/constants.go +++ b/service/aiproxy/relay/adaptor/deepseek/constants.go @@ -12,14 +12,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerDeepSeek, InputPrice: 0.001, OutputPrice: 0.002, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxInputTokensKey: 64000, - model.ModelConfigMaxOutputTokensKey: 4096, - }, - }, - { - Model: "deepseek-coder", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerDeepSeek, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(64000), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, } diff --git a/service/aiproxy/relay/adaptor/doubao/constants.go b/service/aiproxy/relay/adaptor/doubao/constants.go index d90289c852d..33d11138748 100644 --- a/service/aiproxy/relay/adaptor/doubao/constants.go +++ b/service/aiproxy/relay/adaptor/doubao/constants.go @@ -8,16 +8,37 @@ import ( // https://console.volcengine.com/ark/region:ark+cn-beijing/model var ModelList = []*model.ModelConfig{ + { + Model: "Doubao-vision-lite-32k", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerDoubao, + InputPrice: 0.008, + OutputPrice: 0.008, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(32768), + ), + }, + { + Model: "Doubao-vision-pro-32k", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerDoubao, + InputPrice: 0.02, + OutputPrice: 0.02, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(32768), + ), + }, { Model: "Doubao-pro-256k", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerDoubao, InputPrice: 0.0050, OutputPrice: 0.0090, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 256000, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(256000), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "Doubao-pro-128k", @@ -25,10 +46,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerDoubao, InputPrice: 0.0050, OutputPrice: 0.0090, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 128000, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(128000), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "Doubao-pro-32k", @@ -36,10 +58,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerDoubao, InputPrice: 0.0008, OutputPrice: 0.0020, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 32768, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "Doubao-pro-4k", @@ -47,10 +70,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerDoubao, InputPrice: 0.0008, OutputPrice: 0.0020, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 4096, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(4096), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "Doubao-lite-128k", @@ -58,10 +82,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerDoubao, InputPrice: 0.0008, OutputPrice: 0.0010, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 128000, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(128000), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "Doubao-lite-32k", @@ -69,10 +94,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerDoubao, InputPrice: 0.0003, OutputPrice: 0.0006, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 32768, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "Doubao-lite-4k", @@ -80,10 +106,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerDoubao, InputPrice: 0.0003, OutputPrice: 0.0006, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 4096, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(4096), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { @@ -91,17 +118,17 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.Embeddings, Owner: model.ModelOwnerDoubao, InputPrice: 0.0005, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxInputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(4096), + ), }, { Model: "Doubao-embedding-large", Type: relaymode.Embeddings, Owner: model.ModelOwnerDoubao, InputPrice: 0.0007, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxInputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(4096), + ), }, } diff --git a/service/aiproxy/relay/adaptor/doubaoaudio/constants.go b/service/aiproxy/relay/adaptor/doubaoaudio/constants.go index 0dcc1566aca..18dc46c58d7 100644 --- a/service/aiproxy/relay/adaptor/doubaoaudio/constants.go +++ b/service/aiproxy/relay/adaptor/doubaoaudio/constants.go @@ -13,14 +13,14 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.AudioSpeech, Owner: model.ModelOwnerDoubao, InputPrice: 0.5, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigSupportFormatsKey: []string{ + Config: model.NewModelConfig( + model.WithModelConfigSupportFormats([]string{ "pcm", "mp3", "wav", "ogg_opus", - }, - model.ModelConfigSupportVoicesKey: []string{ + }), + model.WithModelConfigSupportVoices([]string{ "zh_female_cancan_mars_bigtts", "zh_female_qingxinnvsheng_mars_bigtts", "zh_female_shuangkuaisisi_moon_bigtts", @@ -92,8 +92,8 @@ var ModelList = []*model.ModelConfig{ "zh_male_qingcang_mars_bigtts", "zh_female_gufengshaoyu_mars_bigtts", "zh_female_wenroushunv_mars_bigtts", - }, - }, + }), + ), }, // { diff --git a/service/aiproxy/relay/adaptor/minimax/constants.go b/service/aiproxy/relay/adaptor/minimax/constants.go index 468b0ac0e84..fac1a9bd56f 100644 --- a/service/aiproxy/relay/adaptor/minimax/constants.go +++ b/service/aiproxy/relay/adaptor/minimax/constants.go @@ -14,9 +14,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMiniMax, InputPrice: 0.01, OutputPrice: 0.01, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 245760, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(245760), + model.WithModelConfigToolChoice(true), + ), }, { Model: "abab6.5s-chat", @@ -24,9 +25,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMiniMax, InputPrice: 0.001, OutputPrice: 0.001, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 245760, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(245760), + model.WithModelConfigToolChoice(true), + ), }, { Model: "abab6.5g-chat", @@ -34,9 +36,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMiniMax, InputPrice: 0.005, OutputPrice: 0.005, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { Model: "abab6.5t-chat", @@ -44,9 +47,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMiniMax, InputPrice: 0.005, OutputPrice: 0.005, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { Model: "abab5.5s-chat", @@ -54,9 +58,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMiniMax, InputPrice: 0.005, OutputPrice: 0.005, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { Model: "abab5.5-chat", @@ -64,9 +69,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMiniMax, InputPrice: 0.015, OutputPrice: 0.015, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 16384, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(16384), + model.WithModelConfigToolChoice(true), + ), }, { @@ -74,9 +80,9 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.AudioSpeech, Owner: model.ModelOwnerMiniMax, InputPrice: 0.2, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigSupportFormatsKey: []string{"pcm", "wav", "flac", "mp3"}, - model.ModelConfigSupportVoicesKey: []string{ + Config: model.NewModelConfig( + model.WithModelConfigSupportFormats([]string{"pcm", "wav", "flac", "mp3"}), + model.WithModelConfigSupportVoices([]string{ "male-qn-qingse", "male-qn-jingying", "male-qn-badao", "male-qn-daxuesheng", "female-shaonv", "female-yujie", "female-chengshu", "female-tianmei", "presenter_male", "presenter_female", @@ -90,7 +96,7 @@ var ModelList = []*model.ModelConfig{ "Santa_Claus", "Grinch", "Rudolph", "Arnold", "Charming_Santa", "Charming_Lady", "Sweet_Girl", "Cute_Elf", "Attractive_Girl", "Serene_Woman", - }, - }, + }), + ), }, } diff --git a/service/aiproxy/relay/adaptor/moonshot/constants.go b/service/aiproxy/relay/adaptor/moonshot/constants.go index 2a77f4cef50..11b8cf6a793 100644 --- a/service/aiproxy/relay/adaptor/moonshot/constants.go +++ b/service/aiproxy/relay/adaptor/moonshot/constants.go @@ -12,9 +12,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMoonshot, InputPrice: 0.012, OutputPrice: 0.012, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxInputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { Model: "moonshot-v1-32k", @@ -22,9 +23,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMoonshot, InputPrice: 0.024, OutputPrice: 0.024, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxInputTokensKey: 32768, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(32768), + model.WithModelConfigToolChoice(true), + ), }, { Model: "moonshot-v1-128k", @@ -32,8 +34,9 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMoonshot, InputPrice: 0.06, OutputPrice: 0.06, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxInputTokensKey: 131072, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(131072), + model.WithModelConfigToolChoice(true), + ), }, } diff --git a/service/aiproxy/relay/adaptor/openai/constants.go b/service/aiproxy/relay/adaptor/openai/constants.go index bbdb1f728f1..a72ce8e8649 100644 --- a/service/aiproxy/relay/adaptor/openai/constants.go +++ b/service/aiproxy/relay/adaptor/openai/constants.go @@ -12,26 +12,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerOpenAI, InputPrice: 0.022, OutputPrice: 0.044, - }, - { - Model: "gpt-3.5-turbo-0301", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerOpenAI, - }, - { - Model: "gpt-3.5-turbo-0613", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerOpenAI, - }, - { - Model: "gpt-3.5-turbo-1106", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerOpenAI, - }, - { - Model: "gpt-3.5-turbo-0125", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerOpenAI, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "gpt-3.5-turbo-16k", @@ -39,11 +23,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerOpenAI, InputPrice: 0.022, OutputPrice: 0.044, - }, - { - Model: "gpt-3.5-turbo-16k-0613", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerOpenAI, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(16384), + model.WithModelConfigToolChoice(true), + ), }, { Model: "gpt-3.5-turbo-instruct", @@ -56,26 +39,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerOpenAI, InputPrice: 0.22, OutputPrice: 0.44, - }, - { - Model: "gpt-4-0314", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerOpenAI, - }, - { - Model: "gpt-4-0613", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerOpenAI, - }, - { - Model: "gpt-4-1106-preview", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerOpenAI, - }, - { - Model: "gpt-4-0125-preview", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerOpenAI, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { Model: "gpt-4-32k", @@ -83,21 +50,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerOpenAI, InputPrice: 0.44, OutputPrice: 0.88, - }, - { - Model: "gpt-4-32k-0314", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerOpenAI, - }, - { - Model: "gpt-4-32k-0613", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerOpenAI, - }, - { - Model: "gpt-4-turbo-preview", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerOpenAI, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + model.WithModelConfigToolChoice(true), + ), }, { Model: "gpt-4-turbo", @@ -105,11 +61,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerOpenAI, InputPrice: 0.071, OutputPrice: 0.213, - }, - { - Model: "gpt-4-turbo-2024-04-09", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerOpenAI, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigToolChoice(true), + ), }, { Model: "gpt-4o", @@ -117,16 +72,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerOpenAI, InputPrice: 0.01775, OutputPrice: 0.071, - }, - { - Model: "gpt-4o-2024-05-13", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerOpenAI, - }, - { - Model: "gpt-4o-2024-08-06", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerOpenAI, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigVision(true), + model.WithModelConfigToolChoice(true), + ), }, { Model: "chatgpt-4o-latest", @@ -139,11 +89,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerOpenAI, InputPrice: 0.001065, OutputPrice: 0.00426, - }, - { - Model: "gpt-4o-mini-2024-07-18", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerOpenAI, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigToolChoice(true), + ), }, { Model: "gpt-4-vision-preview", @@ -156,6 +105,9 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerOpenAI, InputPrice: 0.0213, OutputPrice: 0.0852, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + ), }, { Model: "o1-preview", @@ -163,7 +115,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerOpenAI, InputPrice: 0.1065, OutputPrice: 0.426, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + ), }, + { Model: "text-embedding-ada-002", Type: relaymode.Embeddings, diff --git a/service/aiproxy/relay/adaptor/tencent/constants.go b/service/aiproxy/relay/adaptor/tencent/constants.go index da5770960d9..1768b104c3c 100644 --- a/service/aiproxy/relay/adaptor/tencent/constants.go +++ b/service/aiproxy/relay/adaptor/tencent/constants.go @@ -5,11 +5,33 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/relaymode" ) +// https://cloud.tencent.com/document/product/1729/104753 + var ModelList = []*model.ModelConfig{ { - Model: "hunyuan-lite", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerTencent, + Model: "hunyuan-lite", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerTencent, + InputPrice: 0.0001, + OutputPrice: 0.0001, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(262144), + model.WithModelConfigMaxInputTokens(256000), + model.WithModelConfigMaxOutputTokens(6144), + ), + }, + { + Model: "hunyuan-turbo-latest", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerTencent, + InputPrice: 0.015, + OutputPrice: 0.05, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + model.WithModelConfigMaxInputTokens(28672), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "hunyuan-turbo", @@ -17,13 +39,26 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.015, OutputPrice: 0.05, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + model.WithModelConfigMaxInputTokens(28672), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, + { Model: "hunyuan-pro", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerTencent, InputPrice: 0.03, OutputPrice: 0.10, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + model.WithModelConfigMaxInputTokens(28672), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "hunyuan-large", @@ -31,6 +66,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.004, OutputPrice: 0.012, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + model.WithModelConfigMaxInputTokens(28672), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "hunyuan-large-longcontext", @@ -38,6 +79,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.006, OutputPrice: 0.018, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxOutputTokens(6144), + ), }, { Model: "hunyuan-standard", @@ -45,6 +90,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.0008, OutputPrice: 0.002, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + model.WithModelConfigMaxOutputTokens(2048), + ), }, // { // Model: "hunyuan-standard-256K", @@ -59,6 +108,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.004, OutputPrice: 0.008, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + model.WithModelConfigMaxOutputTokens(4096), + ), }, { Model: "hunyuan-functioncall", @@ -66,6 +119,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.004, OutputPrice: 0.008, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "hunyuan-code", @@ -73,6 +131,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.004, OutputPrice: 0.008, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(8192), + model.WithModelConfigMaxInputTokens(4096), + model.WithModelConfigMaxOutputTokens(4096), + ), }, { Model: "hunyuan-turbo-vision", @@ -80,6 +143,13 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.08, OutputPrice: 0.08, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(8192), + model.WithModelConfigMaxInputTokens(6144), + model.WithModelConfigMaxOutputTokens(2048), + model.WithModelConfigVision(true), + model.WithModelConfigToolChoice(true), + ), }, { Model: "hunyuan-vision", @@ -87,6 +157,13 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.018, OutputPrice: 0.018, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(8192), + model.WithModelConfigMaxInputTokens(6144), + model.WithModelConfigMaxOutputTokens(2048), + model.WithModelConfigVision(true), + model.WithModelConfigToolChoice(true), + ), }, { diff --git a/service/aiproxy/relay/adaptor/xunfei/constants.go b/service/aiproxy/relay/adaptor/xunfei/constants.go index 3f6ad14ccec..ee0b4158502 100644 --- a/service/aiproxy/relay/adaptor/xunfei/constants.go +++ b/service/aiproxy/relay/adaptor/xunfei/constants.go @@ -12,9 +12,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerXunfei, InputPrice: 0.14, OutputPrice: 0.14, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 128000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigToolChoice(true), + ), }, { Model: "SparkDesk-Lite", @@ -22,9 +23,9 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerXunfei, InputPrice: 0.001, OutputPrice: 0.001, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 4000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(4096), + ), }, { Model: "SparkDesk-Max", @@ -32,9 +33,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerXunfei, InputPrice: 0.06, OutputPrice: 0.06, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 128000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigToolChoice(true), + ), }, { Model: "SparkDesk-Max-32k", @@ -42,9 +44,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerXunfei, InputPrice: 0.09, OutputPrice: 0.09, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 32000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + model.WithModelConfigToolChoice(true), + ), }, { Model: "SparkDesk-Pro", @@ -52,9 +55,9 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerXunfei, InputPrice: 0.014, OutputPrice: 0.014, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 128000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + ), }, { Model: "SparkDesk-Pro-128K", @@ -62,8 +65,8 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerXunfei, InputPrice: 0.026, OutputPrice: 0.026, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 128000, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + ), }, } diff --git a/service/aiproxy/relay/adaptor/zhipu/constants.go b/service/aiproxy/relay/adaptor/zhipu/constants.go index bd001273f38..24e36e9df44 100644 --- a/service/aiproxy/relay/adaptor/zhipu/constants.go +++ b/service/aiproxy/relay/adaptor/zhipu/constants.go @@ -12,10 +12,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.001, OutputPrice: 0.001, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 128000, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxOutputTokens(4096), + ), }, { Model: "glm-4", @@ -23,10 +23,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.1, OutputPrice: 0.1, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 128000, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "glm-4-plus", @@ -34,10 +35,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.05, OutputPrice: 0.05, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 128000, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "glm-4-air", @@ -45,10 +47,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.001, OutputPrice: 0.001, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 128000, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "glm-4-airx", @@ -56,10 +59,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.01, OutputPrice: 0.01, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 8192, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(8192), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "glm-4-long", @@ -67,10 +71,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.001, OutputPrice: 0.001, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 1000000, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(1024000), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "glm-4-flashx", @@ -78,10 +83,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.0001, OutputPrice: 0.0001, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 128000, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "glm-4-flash", @@ -89,10 +95,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.0001, OutputPrice: 0.0001, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 128000, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { Model: "glm-4v-flash", @@ -100,10 +107,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.0001, OutputPrice: 0.0001, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxInputTokensKey: 8192, - model.ModelConfigMaxOutputTokensKey: 1024, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(8192), + model.WithModelConfigMaxOutputTokens(1024), + model.WithModelConfigVision(true), + ), }, { Model: "glm-4v", @@ -111,10 +119,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.05, OutputPrice: 0.05, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxInputTokensKey: 2048, - model.ModelConfigMaxOutputTokensKey: 1024, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(2048), + model.WithModelConfigMaxOutputTokens(1024), + model.WithModelConfigVision(true), + ), }, { Model: "glm-4v-plus", @@ -122,10 +131,11 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.01, OutputPrice: 0.01, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxInputTokensKey: 8192, - model.ModelConfigMaxOutputTokensKey: 1024, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(8192), + model.WithModelConfigMaxOutputTokens(1024), + model.WithModelConfigVision(true), + ), }, { @@ -134,10 +144,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.015, OutputPrice: 0.015, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 4096, - model.ModelConfigMaxOutputTokensKey: 2048, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(4096), + model.WithModelConfigMaxOutputTokens(2048), + ), }, { Model: "emohaa", @@ -145,10 +155,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.015, OutputPrice: 0.015, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 8192, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(8192), + model.WithModelConfigMaxOutputTokens(4096), + ), }, { Model: "codegeex-4", @@ -156,10 +166,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.0001, OutputPrice: 0.0001, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 128000, - model.ModelConfigMaxOutputTokensKey: 4096, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxOutputTokens(4096), + ), }, { @@ -167,18 +177,18 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.Embeddings, Owner: model.ModelOwnerChatGLM, InputPrice: 0.0005, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxInputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(8192), + ), }, { Model: "embedding-3", Type: relaymode.Embeddings, Owner: model.ModelOwnerChatGLM, InputPrice: 0.0005, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxInputTokensKey: 8192, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(8192), + ), }, { @@ -189,9 +199,9 @@ var ModelList = []*model.ModelConfig{ ImagePrices: map[string]float64{ "1024x1024": 0.1, }, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxOutputTokensKey: 1024, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxOutputTokens(1024), + ), }, { Model: "cogview-3-plus", @@ -207,8 +217,8 @@ var ModelList = []*model.ModelConfig{ "1440x720": 0.06, "720x1440": 0.06, }, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxOutputTokensKey: 1024, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxOutputTokens(1024), + ), }, } From 76cb09bdec02df7e92da85e44ec90c88d3ea0add Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sun, 15 Dec 2024 10:42:37 +0800 Subject: [PATCH 002/167] fix: model config vision --- service/aiproxy/model/modelconfig.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/service/aiproxy/model/modelconfig.go b/service/aiproxy/model/modelconfig.go index f513e592e1f..09924a613d9 100644 --- a/service/aiproxy/model/modelconfig.go +++ b/service/aiproxy/model/modelconfig.go @@ -50,11 +50,15 @@ func (c *ModelConfig) MaxOutputTokens() (int, bool) { return GetModelConfigInt(c.Config, ModelConfigMaxOutputTokensKey) } +func (c *ModelConfig) SupportVision() (bool, bool) { + return GetModelConfigBool(c.Config, ModelConfigVisionKey) +} + func (c *ModelConfig) SupportVoices() ([]string, bool) { return GetModelConfigStringSlice(c.Config, ModelConfigSupportVoicesKey) } -func (c *ModelConfig) ToolChoice() (bool, bool) { +func (c *ModelConfig) SupportToolChoice() (bool, bool) { return GetModelConfigBool(c.Config, ModelConfigToolChoiceKey) } From 4ffa8f99160e9054e27706d49e278e6e3adf82b8 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 16 Dec 2024 14:45:02 +0800 Subject: [PATCH 003/167] feat: aiproxy dashboard api --- service/aiproxy/controller/dashboard.go | 42 ++++++++ service/aiproxy/controller/group.go | 15 --- service/aiproxy/model/log.go | 127 +++++++++++++++++++----- service/aiproxy/router/api.go | 5 + 4 files changed, 150 insertions(+), 39 deletions(-) create mode 100644 service/aiproxy/controller/dashboard.go diff --git a/service/aiproxy/controller/dashboard.go b/service/aiproxy/controller/dashboard.go new file mode 100644 index 00000000000..d63bd117c04 --- /dev/null +++ b/service/aiproxy/controller/dashboard.go @@ -0,0 +1,42 @@ +package controller + +import ( + "net/http" + "time" + + "github.com/gin-gonic/gin" + "github.com/labring/sealos/service/aiproxy/middleware" + "github.com/labring/sealos/service/aiproxy/model" +) + +func GetGroupDashboard(c *gin.Context) { + id := c.Param("id") + if id == "" { + middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") + return + } + + end := time.Now() + var start time.Time + switch c.Query("type") { + case "month": + start = end.AddDate(0, 0, -30) + case "two_week": + start = end.AddDate(0, 0, -14) + case "week": + start = end.AddDate(0, 0, -7) + case "day": + fallthrough + default: + start = end.AddDate(0, 0, -1) + } + tokenName := c.Query("token_name") + modelName := c.Query("model") + + dashboards, err := model.GetDashboardData(id, start, end, tokenName, modelName) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, "failed to get statistics") + return + } + middleware.SuccessResponse(c, dashboards) +} diff --git a/service/aiproxy/controller/group.go b/service/aiproxy/controller/group.go index b52e8a79e20..0389e52ddc2 100644 --- a/service/aiproxy/controller/group.go +++ b/service/aiproxy/controller/group.go @@ -3,7 +3,6 @@ package controller import ( "net/http" "strconv" - "time" json "github.com/json-iterator/go" @@ -78,20 +77,6 @@ func GetGroup(c *gin.Context) { middleware.SuccessResponse(c, group) } -func GetGroupDashboard(c *gin.Context) { - id := c.Param("id") - now := time.Now() - startOfDay := now.Truncate(24*time.Hour).AddDate(0, 0, -6).Unix() - endOfDay := now.Truncate(24 * time.Hour).Add(24*time.Hour - time.Second).Unix() - - dashboards, err := model.SearchLogsByDayAndModel(id, time.Unix(startOfDay, 0), time.Unix(endOfDay, 0)) - if err != nil { - middleware.ErrorResponse(c, http.StatusOK, "failed to get statistics") - return - } - middleware.SuccessResponse(c, dashboards) -} - type UpdateGroupQPMRequest struct { QPM int64 `json:"qpm"` } diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index 66cb6e95d17..7aeb62cb845 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -567,36 +567,115 @@ func DeleteGroupLogs(groupID string) (int64, error) { return result.RowsAffected, result.Error } -type LogStatistic struct { - Day string `gorm:"column:day"` - Model string `gorm:"column:model"` - RequestCount int `gorm:"column:request_count"` - PromptTokens int `gorm:"column:prompt_tokens"` - CompletionTokens int `gorm:"column:completion_tokens"` +type HourlyChartData struct { + Timestamp int64 `json:"timestamp"` + RequestCount int64 `json:"request_count"` + TotalCost float64 `json:"total_cost"` + ExceptionCount int64 `json:"exception_count"` } -func SearchLogsByDayAndModel(group string, start time.Time, end time.Time) (logStatistics []*LogStatistic, err error) { - groupSelect := "DATE_FORMAT(FROM_UNIXTIME(created_at), '%Y-%m-%d') as day" +type DashboardResponse struct { + ChartData []*HourlyChartData `json:"chart_data"` + TokenNames []string `json:"token_names"` + ModelNames []string `json:"model_names"` + TotalCount int64 `json:"total_count"` + ExceptionCount int64 `json:"exception_count"` +} - if common.UsingPostgreSQL { - groupSelect = "TO_CHAR(date_trunc('day', to_timestamp(created_at)), 'YYYY-MM-DD') as day" +func getHourTimestamp() string { + switch { + case common.UsingMySQL: + return "UNIX_TIMESTAMP(DATE_FORMAT(request_at, '%Y-%m-%d %H:00:00'))" + case common.UsingPostgreSQL: + return "EXTRACT(EPOCH FROM date_trunc('hour', request_at))" + case common.UsingSQLite: + return "STRFTIME('%s', STRFTIME('%Y-%m-%d %H:00:00', request_at))" + default: + return "" + } +} + +func getChartData(group string, start, end time.Time, tokenName, modelName string) ([]*HourlyChartData, error) { + var chartData []*HourlyChartData + + hourTimestamp := getHourTimestamp() + if hourTimestamp == "" { + return nil, errors.New("unsupported hour format") + } + + query := LogDB.Table("logs"). + Select(hourTimestamp+" as timestamp, count(*) as request_count, sum(price) as total_cost, sum(case when code != 200 then 1 else 0 end) as exception_count"). + Where("group_id = ? AND request_at BETWEEN ? AND ?", group, start, end). + Group("timestamp"). + Order("timestamp ASC") + + if tokenName != "" { + query = query.Where("token_name = ?", tokenName) + } + if modelName != "" { + query = query.Where("model = ?", modelName) + } + + err := query.Scan(&chartData).Error + return chartData, err +} + +func getGroupLogDistinctValues[T any](field string, group string, start, end time.Time) ([]T, error) { + var values []T + err := LogDB. + Model(&Log{}). + Distinct(field). + Where("group_id = ? AND request_at BETWEEN ? AND ?", group, start, end). + Pluck(field, &values).Error + return values, err +} + +func sumTotalCount(chartData []*HourlyChartData) int64 { + var count int64 + for _, data := range chartData { + count += data.RequestCount + } + return count +} + +func sumExceptionCount(chartData []*HourlyChartData) int64 { + var count int64 + for _, data := range chartData { + count += data.ExceptionCount + } + return count +} + +func GetDashboardData(group string, start, end time.Time, tokenName string, modelName string) (*DashboardResponse, error) { + if end.IsZero() { + end = time.Now() + } else if end.Before(start) { + return nil, errors.New("end time is before start time") + } + + chartData, err := getChartData(group, start, end, tokenName, modelName) + if err != nil { + return nil, err } - if common.UsingSQLite { - groupSelect = "strftime('%Y-%m-%d', datetime(created_at, 'unixepoch')) as day" + tokenNames, err := getGroupLogDistinctValues[string]("token_name", group, start, end) + if err != nil { + return nil, err + } + + modelNames, err := getGroupLogDistinctValues[string]("model", group, start, end) + if err != nil { + return nil, err } - err = LogDB.Raw(` - SELECT `+groupSelect+`, - model, count(1) as request_count, - sum(prompt_tokens) as prompt_tokens, - sum(completion_tokens) as completion_tokens - FROM logs - WHERE group_id = ? - AND created_at BETWEEN ? AND ? - GROUP BY day, model - ORDER BY day, model - `, group, start, end).Scan(&logStatistics).Error + totalCount := sumTotalCount(chartData) + exceptionCount := sumExceptionCount(chartData) - return logStatistics, err + return &DashboardResponse{ + ChartData: chartData, + TokenNames: tokenNames, + ModelNames: modelNames, + TotalCount: totalCount, + ExceptionCount: exceptionCount, + }, nil } diff --git a/service/aiproxy/router/api.go b/service/aiproxy/router/api.go index 0e6d36486c2..251a986ce85 100644 --- a/service/aiproxy/router/api.go +++ b/service/aiproxy/router/api.go @@ -33,6 +33,11 @@ func SetAPIRouter(router *gin.Engine) { modelsRoute.GET("/default/:type", controller.ChannelDefaultModelsAndMappingByType) } + dashboardRoute := apiRouter.Group("/dashboard") + { + dashboardRoute.GET("/:id", controller.GetGroupDashboard) + } + groupsRoute := apiRouter.Group("/groups") { groupsRoute.GET("/", controller.GetGroups) From 2ef25c010a63dc9b99400252d59ff235ae4ffcea Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 16 Dec 2024 14:52:26 +0800 Subject: [PATCH 004/167] fix: two week and pg hour format --- service/aiproxy/controller/dashboard.go | 2 +- service/aiproxy/model/log.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/service/aiproxy/controller/dashboard.go b/service/aiproxy/controller/dashboard.go index d63bd117c04..2a010717805 100644 --- a/service/aiproxy/controller/dashboard.go +++ b/service/aiproxy/controller/dashboard.go @@ -22,7 +22,7 @@ func GetGroupDashboard(c *gin.Context) { case "month": start = end.AddDate(0, 0, -30) case "two_week": - start = end.AddDate(0, 0, -14) + start = end.AddDate(0, 0, -15) case "week": start = end.AddDate(0, 0, -7) case "day": diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index 7aeb62cb845..3024964d4cb 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -587,7 +587,7 @@ func getHourTimestamp() string { case common.UsingMySQL: return "UNIX_TIMESTAMP(DATE_FORMAT(request_at, '%Y-%m-%d %H:00:00'))" case common.UsingPostgreSQL: - return "EXTRACT(EPOCH FROM date_trunc('hour', request_at))" + return "FLOOR(EXTRACT(EPOCH FROM date_trunc('hour', request_at)))" case common.UsingSQLite: return "STRFTIME('%s', STRFTIME('%Y-%m-%d %H:00:00', request_at))" default: From 8a5142f22eb6b560c945bbe283ae5cfe9e0264ce Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 16 Dec 2024 14:58:08 +0800 Subject: [PATCH 005/167] fix: model tag name --- service/aiproxy/model/log.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index 3024964d4cb..7ba8f797b87 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -577,7 +577,7 @@ type HourlyChartData struct { type DashboardResponse struct { ChartData []*HourlyChartData `json:"chart_data"` TokenNames []string `json:"token_names"` - ModelNames []string `json:"model_names"` + Models []string `json:"models"` TotalCount int64 `json:"total_count"` ExceptionCount int64 `json:"exception_count"` } @@ -663,7 +663,7 @@ func GetDashboardData(group string, start, end time.Time, tokenName string, mode return nil, err } - modelNames, err := getGroupLogDistinctValues[string]("model", group, start, end) + models, err := getGroupLogDistinctValues[string]("model", group, start, end) if err != nil { return nil, err } @@ -674,7 +674,7 @@ func GetDashboardData(group string, start, end time.Time, tokenName string, mode return &DashboardResponse{ ChartData: chartData, TokenNames: tokenNames, - ModelNames: modelNames, + Models: models, TotalCount: totalCount, ExceptionCount: exceptionCount, }, nil From 52036b201793099e8f775b204ff3b4da6fe823dc Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 17 Dec 2024 16:06:43 +0800 Subject: [PATCH 006/167] feat: model rpm limit --- service/aiproxy/common/config/config.go | 10 ---- service/aiproxy/controller/group.go | 18 +++---- service/aiproxy/middleware/auth.go | 14 ----- service/aiproxy/middleware/distributor.go | 35 ++++++++++++ service/aiproxy/middleware/rate-limit.go | 12 ++--- service/aiproxy/model/cache.go | 40 +++++++------- service/aiproxy/model/channel.go | 5 +- service/aiproxy/model/group.go | 10 ++-- service/aiproxy/model/modelconfig.go | 8 +++ service/aiproxy/model/option.go | 11 +--- .../aiproxy/relay/adaptor/ali/constants.go | 54 +++++++++++++++++++ .../aiproxy/relay/adaptor/baidu/constants.go | 11 ++-- .../relay/adaptor/baiduv2/constants.go | 19 +++++++ .../relay/adaptor/deepseek/constants.go | 1 + .../aiproxy/relay/adaptor/doubao/constants.go | 12 +++++ .../relay/adaptor/doubaoaudio/constants.go | 1 + .../relay/adaptor/minimax/constants.go | 7 +++ .../relay/adaptor/moonshot/constants.go | 3 ++ .../relay/adaptor/siliconflow/constants.go | 9 ++-- .../relay/adaptor/tencent/constants.go | 16 ++++++ .../aiproxy/relay/adaptor/xunfei/constants.go | 8 +++ .../aiproxy/relay/adaptor/zhipu/constants.go | 34 +++++++----- service/aiproxy/router/api.go | 2 +- 23 files changed, 244 insertions(+), 96 deletions(-) diff --git a/service/aiproxy/common/config/config.go b/service/aiproxy/common/config/config.go index d68f359b1c8..d47a9f7cc8d 100644 --- a/service/aiproxy/common/config/config.go +++ b/service/aiproxy/common/config/config.go @@ -107,7 +107,6 @@ var ( globalAPIRateLimitNum atomic.Int64 defaultChannelModels atomic.Value defaultChannelModelMapping atomic.Value - defaultGroupQPM atomic.Int64 groupMaxTokenNum atomic.Int32 ) @@ -125,15 +124,6 @@ func SetGlobalAPIRateLimitNum(num int64) { globalAPIRateLimitNum.Store(num) } -// group默认qpm,如果group没有设置qpm,则使用该qpm -func GetDefaultGroupQPM() int64 { - return defaultGroupQPM.Load() -} - -func SetDefaultGroupQPM(qpm int64) { - defaultGroupQPM.Store(qpm) -} - func GetDefaultChannelModels() map[int][]string { return defaultChannelModels.Load().(map[int][]string) } diff --git a/service/aiproxy/controller/group.go b/service/aiproxy/controller/group.go index 0389e52ddc2..82575da9a6c 100644 --- a/service/aiproxy/controller/group.go +++ b/service/aiproxy/controller/group.go @@ -77,23 +77,23 @@ func GetGroup(c *gin.Context) { middleware.SuccessResponse(c, group) } -type UpdateGroupQPMRequest struct { - QPM int64 `json:"qpm"` +type UpdateGroupRPMRequest struct { + RPMRatio float64 `json:"rpm_ratio"` } -func UpdateGroupQPM(c *gin.Context) { +func UpdateGroupRPM(c *gin.Context) { id := c.Param("id") if id == "" { middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") return } - req := UpdateGroupQPMRequest{} + req := UpdateGroupRPMRequest{} err := json.NewDecoder(c.Request.Body).Decode(&req) if err != nil { middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") return } - err = model.UpdateGroupQPM(id, req.QPM) + err = model.UpdateGroupRPM(id, req.RPMRatio) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return @@ -155,8 +155,8 @@ func DeleteGroups(c *gin.Context) { } type CreateGroupRequest struct { - ID string `json:"id"` - QPM int64 `json:"qpm"` + ID string `json:"id"` + RPMRatio float64 `json:"rpm_ratio"` } func CreateGroup(c *gin.Context) { @@ -167,8 +167,8 @@ func CreateGroup(c *gin.Context) { return } if err := model.CreateGroup(&model.Group{ - ID: group.ID, - QPM: group.QPM, + ID: group.ID, + RPMRatio: group.RPMRatio, }); err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return diff --git a/service/aiproxy/middleware/auth.go b/service/aiproxy/middleware/auth.go index a8408f1abcd..e63528bb499 100644 --- a/service/aiproxy/middleware/auth.go +++ b/service/aiproxy/middleware/auth.go @@ -4,7 +4,6 @@ import ( "fmt" "net/http" "strings" - "time" "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common/config" @@ -47,7 +46,6 @@ func AdminAuth(c *gin.Context) { func TokenAuth(c *gin.Context) { log := GetLogger(c) - ctx := c.Request.Context() key := c.Request.Header.Get("Authorization") key = strings.TrimPrefix( strings.TrimPrefix(key, "Bearer "), @@ -86,18 +84,6 @@ func TokenAuth(c *gin.Context) { if len(token.Models) == 0 { token.Models = model.CacheGetEnabledModels() } - if group.QPM <= 0 { - group.QPM = config.GetDefaultGroupQPM() - } - if group.QPM > 0 { - ok := ForceRateLimit(ctx, "group_qpm:"+group.ID, int(group.QPM), time.Minute) - if !ok { - abortWithMessage(c, http.StatusTooManyRequests, - group.ID+" is requesting too frequently", - ) - return - } - } c.Set(ctxkey.Group, group) c.Set(ctxkey.Token, token) diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index c0501d35be4..3084594205c 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -5,6 +5,7 @@ import ( "fmt" "net/http" "slices" + "time" "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common/config" @@ -15,6 +16,10 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/relaymode" ) +const ( + groupModelRPMKey = "group_model_rpm:%s:%s" +) + type ModelRequest struct { Model string `form:"model" json:"model"` } @@ -27,6 +32,8 @@ func Distribute(c *gin.Context) { log := GetLogger(c) + group := c.MustGet(ctxkey.Group).(*model.GroupCache) + requestModel, err := getRequestModel(c) if err != nil { abortWithMessage(c, http.StatusBadRequest, err.Error()) @@ -55,6 +62,34 @@ func Distribute(c *gin.Context) { return } + mc, ok := model.CacheGetModelConfig(requestModel) + if !ok { + abortWithMessage(c, http.StatusServiceUnavailable, requestModel+" is not available") + return + } + modelRPM := mc.RPM + if modelRPM <= 0 { + abortWithMessage(c, http.StatusServiceUnavailable, requestModel+" rpm is not available, please contact the administrator") + return + } + groupRPMRatio := group.RPMRatio + if groupRPMRatio <= 0 { + groupRPMRatio = 1 + } + modelRPM = int64(float64(modelRPM) * float64(groupRPMRatio)) + ok = ForceRateLimit( + c.Request.Context(), + fmt.Sprintf(groupModelRPMKey, group.ID, requestModel), + modelRPM, + time.Minute, + ) + if !ok { + abortWithMessage(c, http.StatusTooManyRequests, + group.ID+" is requesting too frequently", + ) + return + } + c.Set(string(ctxkey.OriginalModel), requestModel) ctx := context.WithValue(c.Request.Context(), ctxkey.OriginalModel, requestModel) c.Request = c.Request.WithContext(ctx) diff --git a/service/aiproxy/middleware/rate-limit.go b/service/aiproxy/middleware/rate-limit.go index 12ebfafc4b5..f947ae04bba 100644 --- a/service/aiproxy/middleware/rate-limit.go +++ b/service/aiproxy/middleware/rate-limit.go @@ -44,7 +44,7 @@ else end ` -func redisRateLimitRequest(ctx context.Context, key string, maxRequestNum int, duration time.Duration) (bool, error) { +func redisRateLimitRequest(ctx context.Context, key string, maxRequestNum int64, duration time.Duration) (bool, error) { rdb := common.RDB currentTime := time.Now().UnixMilli() result, err := rdb.Eval(ctx, luaScript, []string{key}, maxRequestNum, duration.Milliseconds(), currentTime).Int64() @@ -54,7 +54,7 @@ func redisRateLimitRequest(ctx context.Context, key string, maxRequestNum int, d return result == 1, nil } -func RateLimit(ctx context.Context, key string, maxRequestNum int, duration time.Duration) (bool, error) { +func RateLimit(ctx context.Context, key string, maxRequestNum int64, duration time.Duration) (bool, error) { if maxRequestNum == 0 { return true, nil } @@ -65,7 +65,7 @@ func RateLimit(ctx context.Context, key string, maxRequestNum int, duration time } // ignore redis error -func ForceRateLimit(ctx context.Context, key string, maxRequestNum int, duration time.Duration) bool { +func ForceRateLimit(ctx context.Context, key string, maxRequestNum int64, duration time.Duration) bool { if maxRequestNum == 0 { return true } @@ -79,10 +79,10 @@ func ForceRateLimit(ctx context.Context, key string, maxRequestNum int, duration return MemoryRateLimit(ctx, key, maxRequestNum, duration) } -func MemoryRateLimit(_ context.Context, key string, maxRequestNum int, duration time.Duration) bool { +func MemoryRateLimit(_ context.Context, key string, maxRequestNum int64, duration time.Duration) bool { // It's safe to call multi times. inMemoryRateLimiter.Init(config.RateLimitKeyExpirationDuration) - return inMemoryRateLimiter.Request(key, maxRequestNum, duration) + return inMemoryRateLimiter.Request(key, int(maxRequestNum), duration) } func GlobalAPIRateLimit(c *gin.Context) { @@ -91,7 +91,7 @@ func GlobalAPIRateLimit(c *gin.Context) { c.Next() return } - ok := ForceRateLimit(c.Request.Context(), "global_qpm", int(globalAPIRateLimitNum), time.Minute) + ok := ForceRateLimit(c.Request.Context(), "global_qpm", globalAPIRateLimitNum, time.Minute) if !ok { c.Status(http.StatusTooManyRequests) c.Abort() diff --git a/service/aiproxy/model/cache.go b/service/aiproxy/model/cache.go index 858c02f9101..3186090207b 100644 --- a/service/aiproxy/model/cache.go +++ b/service/aiproxy/model/cache.go @@ -133,30 +133,30 @@ func CacheGetTokenByKey(key string) (*TokenCache, error) { } var updateTokenUsedAmountScript = redis.NewScript(` - if redis.call("HExists", KEYS[1], "used_amount") then - redis.call("HSet", KEYS[1], "used_amount", ARGV[1]) + if redis.call("HExists", KEYS[1], "ua") then + redis.call("HSet", KEYS[1], "ua", ARGV[1]) end return redis.status_reply("ok") `) var updateTokenUsedAmountOnlyIncreaseScript = redis.NewScript(` - local used_amount = redis.call("HGet", KEYS[1], "used_amount") + local used_amount = redis.call("HGet", KEYS[1], "ua") if used_amount == false then return redis.status_reply("ok") end if ARGV[1] < used_amount then return redis.status_reply("ok") end - redis.call("HSet", KEYS[1], "used_amount", ARGV[1]) + redis.call("HSet", KEYS[1], "ua", ARGV[1]) return redis.status_reply("ok") `) var increaseTokenUsedAmountScript = redis.NewScript(` - local used_amount = redis.call("HGet", KEYS[1], "used_amount") + local used_amount = redis.call("HGet", KEYS[1], "ua") if used_amount == false then return redis.status_reply("ok") end - redis.call("HSet", KEYS[1], "used_amount", used_amount + ARGV[1]) + redis.call("HSet", KEYS[1], "ua", used_amount + ARGV[1]) return redis.status_reply("ok") `) @@ -182,16 +182,16 @@ func CacheIncreaseTokenUsedAmount(key string, amount float64) error { } type GroupCache struct { - ID string `json:"-" redis:"-"` - Status int `json:"status" redis:"st"` - QPM int64 `json:"qpm" redis:"q"` + ID string `json:"-" redis:"-"` + Status int `json:"status" redis:"st"` + RPMRatio float64 `json:"rpm_ratio" redis:"rpm"` } func (g *Group) ToGroupCache() *GroupCache { return &GroupCache{ - ID: g.ID, - Status: g.Status, - QPM: g.QPM, + ID: g.ID, + Status: g.Status, + RPMRatio: g.RPMRatio, } } @@ -202,23 +202,23 @@ func CacheDeleteGroup(id string) error { return common.RedisDel(fmt.Sprintf(GroupCacheKey, id)) } -var updateGroupQPMScript = redis.NewScript(` - if redis.call("HExists", KEYS[1], "qpm") then - redis.call("HSet", KEYS[1], "qpm", ARGV[1]) +var updateGroupRPMScript = redis.NewScript(` + if redis.call("HExists", KEYS[1], "rpm") then + redis.call("HSet", KEYS[1], "rpm", ARGV[1]) end return redis.status_reply("ok") `) -func CacheUpdateGroupQPM(id string, qpm int64) error { +func CacheUpdateGroupRPM(id string, rpmRatio float64) error { if !common.RedisEnabled { return nil } - return updateGroupQPMScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, qpm).Err() + return updateGroupRPMScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, rpmRatio).Err() } var updateGroupStatusScript = redis.NewScript(` - if redis.call("HExists", KEYS[1], "status") then - redis.call("HSet", KEYS[1], "status", ARGV[1]) + if redis.call("HExists", KEYS[1], "st") then + redis.call("HSet", KEYS[1], "st", ARGV[1]) end return redis.status_reply("ok") `) @@ -444,7 +444,7 @@ func initializeChannelModels(channel *Channel) { if len(missingModels) > 0 { slices.Sort(missingModels) - log.Errorf("model config not found: %v", missingModels) + log.Errorf("model config not found or rpm less than 0: %v", missingModels) } slices.Sort(findedModels) channel.Models = findedModels diff --git a/service/aiproxy/model/channel.go b/service/aiproxy/model/channel.go index de5ab2d3501..5656d50cfc3 100644 --- a/service/aiproxy/model/channel.go +++ b/service/aiproxy/model/channel.go @@ -59,11 +59,12 @@ func (c *Channel) BeforeSave(tx *gorm.DB) (err error) { return err } if len(missingModels) > 0 { - return fmt.Errorf("model config not found: %v", missingModels) + return fmt.Errorf("model config not found or rpm less than 0: %v", missingModels) } return nil } +// check model config exist and rpm greater than 0 func CheckModelConfig(models []string) ([]string, []string, error) { return checkModelConfig(DB, models) } @@ -73,7 +74,7 @@ func checkModelConfig(tx *gorm.DB, models []string) ([]string, []string, error) return models, nil, nil } - where := tx.Model(&ModelConfig{}).Where("model IN ?", models) + where := tx.Model(&ModelConfig{}).Where("model IN ? AND rpm > 0", models) var count int64 if err := where.Count(&count).Error; err != nil { return nil, nil, err diff --git a/service/aiproxy/model/group.go b/service/aiproxy/model/group.go index 9e33745c43f..766f9b6fd7a 100644 --- a/service/aiproxy/model/group.go +++ b/service/aiproxy/model/group.go @@ -30,7 +30,7 @@ type Group struct { Tokens []*Token `gorm:"foreignKey:GroupID" json:"-"` Status int `gorm:"default:1;index" json:"status"` UsedAmount float64 `gorm:"index" json:"used_amount"` - QPM int64 `gorm:"index" json:"qpm"` + RPMRatio float64 `gorm:"index" json:"rpm_ratio"` RequestCount int `gorm:"index" json:"request_count"` } @@ -168,15 +168,15 @@ func UpdateGroupRequestCount(id string, count int) error { return HandleUpdateResult(result, ErrGroupNotFound) } -func UpdateGroupQPM(id string, qpm int64) (err error) { +func UpdateGroupRPM(id string, rpmRatio float64) (err error) { defer func() { if err == nil { - if err := CacheUpdateGroupQPM(id, qpm); err != nil { - log.Error("cache update group qpm failed: " + err.Error()) + if err := CacheUpdateGroupRPM(id, rpmRatio); err != nil { + log.Error("cache update group rpm failed: " + err.Error()) } } }() - result := DB.Model(&Group{}).Where("id = ?", id).Update("qpm", qpm) + result := DB.Model(&Group{}).Where("id = ?", id).Update("rpm_ratio", rpmRatio) return HandleUpdateResult(result, ErrGroupNotFound) } diff --git a/service/aiproxy/model/modelconfig.go b/service/aiproxy/model/modelconfig.go index 09924a613d9..ece76af65f0 100644 --- a/service/aiproxy/model/modelconfig.go +++ b/service/aiproxy/model/modelconfig.go @@ -23,6 +23,14 @@ type ModelConfig struct { Type int `json:"type"` InputPrice float64 `json:"input_price,omitempty"` OutputPrice float64 `json:"output_price,omitempty"` + RPM int64 `json:"rpm"` +} + +func (c *ModelConfig) BeforeSave(_ *gorm.DB) error { + if c.RPM <= 0 { + return fmt.Errorf("%s rpm must be greater than 0", c.Model) + } + return nil } func (c *ModelConfig) MarshalJSON() ([]byte, error) { diff --git a/service/aiproxy/model/option.go b/service/aiproxy/model/option.go index 8be3fcb6854..e377efebd7a 100644 --- a/service/aiproxy/model/option.go +++ b/service/aiproxy/model/option.go @@ -39,7 +39,6 @@ func InitOptionMap() error { config.OptionMap["BillingEnabled"] = strconv.FormatBool(config.GetBillingEnabled()) config.OptionMap["RetryTimes"] = strconv.FormatInt(config.GetRetryTimes(), 10) config.OptionMap["GlobalApiRateLimitNum"] = strconv.FormatInt(config.GetGlobalAPIRateLimitNum(), 10) - config.OptionMap["DefaultGroupQPM"] = strconv.FormatInt(config.GetDefaultGroupQPM(), 10) defaultChannelModelsJSON, _ := json.Marshal(config.GetDefaultChannelModels()) config.OptionMap["DefaultChannelModels"] = conv.BytesToString(defaultChannelModelsJSON) defaultChannelModelMappingJSON, _ := json.Marshal(config.GetDefaultChannelModelMapping()) @@ -173,12 +172,6 @@ func updateOptionMap(key string, value string, isInit bool) (err error) { return err } config.SetGlobalAPIRateLimitNum(globalAPIRateLimitNum) - case "DefaultGroupQPM": - defaultGroupQPM, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return err - } - config.SetDefaultGroupQPM(defaultGroupQPM) case "DefaultChannelModels": var newModels map[int][]string err := json.Unmarshal(conv.StringToBytes(value), &newModels) @@ -202,11 +195,11 @@ func updateOptionMap(key string, value string, isInit bool) (err error) { } if !isInit && len(missingModels) > 0 { sort.Strings(missingModels) - return fmt.Errorf("model config not found: %v", missingModels) + return fmt.Errorf("model config not found or rpm less than 0: %v", missingModels) } if len(missingModels) > 0 { sort.Strings(missingModels) - log.Errorf("model config not found: %v", missingModels) + log.Errorf("model config not found or rpm less than 0: %v", missingModels) } allowedNewModels := make(map[int][]string) for t, ms := range newModels { diff --git a/service/aiproxy/relay/adaptor/ali/constants.go b/service/aiproxy/relay/adaptor/ali/constants.go index e30510f767c..09833eed78f 100644 --- a/service/aiproxy/relay/adaptor/ali/constants.go +++ b/service/aiproxy/relay/adaptor/ali/constants.go @@ -15,6 +15,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.02, OutputPrice: 0.06, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32768), model.WithModelConfigMaxInputTokens(30720), @@ -28,6 +29,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.02, OutputPrice: 0.06, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32768), model.WithModelConfigMaxInputTokens(30720), @@ -43,6 +45,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0008, OutputPrice: 0.002, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxInputTokens(129024), @@ -56,6 +59,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0008, OutputPrice: 0.002, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32000), model.WithModelConfigMaxInputTokens(30000), @@ -71,6 +75,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0003, OutputPrice: 0.0006, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxInputTokens(129024), @@ -84,6 +89,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0003, OutputPrice: 0.0006, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(1000000), model.WithModelConfigMaxInputTokens(1000000), @@ -99,6 +105,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0005, OutputPrice: 0.002, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(10000000), model.WithModelConfigMaxInputTokens(10000000), @@ -114,6 +121,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.02, OutputPrice: 0.02, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32000), model.WithModelConfigMaxInputTokens(30000), @@ -128,6 +136,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.02, OutputPrice: 0.02, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32000), model.WithModelConfigMaxInputTokens(30000), @@ -142,6 +151,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.008, OutputPrice: 0.008, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(8000), model.WithModelConfigMaxInputTokens(6000), @@ -156,6 +166,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.008, OutputPrice: 0.008, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32000), model.WithModelConfigMaxInputTokens(30000), @@ -172,6 +183,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.005, OutputPrice: 0.005, + RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(34096), model.WithModelConfigMaxInputTokens(30000), @@ -185,6 +197,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.005, OutputPrice: 0.005, + RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(34096), model.WithModelConfigMaxInputTokens(30000), @@ -200,6 +213,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.004, OutputPrice: 0.012, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(4096), model.WithModelConfigMaxInputTokens(3072), @@ -213,6 +227,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.004, OutputPrice: 0.012, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(4096), model.WithModelConfigMaxInputTokens(3072), @@ -226,6 +241,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.002, OutputPrice: 0.006, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(4096), model.WithModelConfigMaxInputTokens(3072), @@ -239,6 +255,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.002, OutputPrice: 0.006, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(4096), model.WithModelConfigMaxInputTokens(3072), @@ -254,6 +271,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0035, OutputPrice: 0.007, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxInputTokens(129024), @@ -267,6 +285,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0035, OutputPrice: 0.007, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxInputTokens(129024), @@ -280,6 +299,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.002, OutputPrice: 0.006, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxInputTokens(129024), @@ -293,6 +313,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.002, OutputPrice: 0.006, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxInputTokens(129024), @@ -308,6 +329,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.004, OutputPrice: 0.012, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxInputTokens(129024), @@ -321,6 +343,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0035, OutputPrice: 0.007, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxInputTokens(129024), @@ -334,6 +357,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.002, OutputPrice: 0.006, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxInputTokens(129024), @@ -347,6 +371,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.001, OutputPrice: 0.002, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxInputTokens(129024), @@ -362,6 +387,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.004, OutputPrice: 0.012, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxInputTokens(128000), @@ -375,6 +401,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0035, OutputPrice: 0.007, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(65536), model.WithModelConfigMaxInputTokens(63488), @@ -388,6 +415,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.001, OutputPrice: 0.002, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxInputTokens(128000), @@ -403,6 +431,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.007, OutputPrice: 0.014, + RPM: 10, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32000), model.WithModelConfigMaxInputTokens(30000), @@ -416,6 +445,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.005, OutputPrice: 0.01, + RPM: 120, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32000), model.WithModelConfigMaxInputTokens(30000), @@ -429,6 +459,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0035, OutputPrice: 0.007, + RPM: 10, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32000), model.WithModelConfigMaxInputTokens(30000), @@ -442,6 +473,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.002, OutputPrice: 0.004, + RPM: 120, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(8000), model.WithModelConfigMaxInputTokens(6000), @@ -455,6 +487,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.001, OutputPrice: 0.002, + RPM: 120, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(8000), model.WithModelConfigMaxInputTokens(6000), @@ -470,6 +503,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.02, OutputPrice: 0.02, + RPM: 80, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32000), model.WithModelConfigMaxInputTokens(30000), @@ -483,6 +517,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.008, OutputPrice: 0.008, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(8000), model.WithModelConfigMaxInputTokens(6000), @@ -496,6 +531,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.006, OutputPrice: 0.006, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(7500), model.WithModelConfigMaxInputTokens(6000), @@ -511,6 +547,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.004, OutputPrice: 0.012, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(4096), model.WithModelConfigMaxInputTokens(3072), @@ -524,6 +561,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.001, OutputPrice: 0.002, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(4096), model.WithModelConfigMaxInputTokens(3072), @@ -537,6 +575,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.004, OutputPrice: 0.012, + RPM: 10, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(4096), model.WithModelConfigMaxInputTokens(3072), @@ -550,6 +589,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.001, OutputPrice: 0.002, + RPM: 10, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(4096), model.WithModelConfigMaxInputTokens(3072), @@ -565,6 +605,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.0035, OutputPrice: 0.007, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxInputTokens(129024), @@ -578,6 +619,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.002, OutputPrice: 0.006, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxInputTokens(129024), @@ -591,6 +633,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerAlibaba, InputPrice: 0.001, OutputPrice: 0.002, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxInputTokens(129024), @@ -604,27 +647,33 @@ var ModelList = []*model.ModelConfig{ Model: "stable-diffusion-xl", Type: relaymode.ImagesGenerations, Owner: model.ModelOwnerStabilityAI, + RPM: 2, }, { Model: "stable-diffusion-v1.5", Type: relaymode.ImagesGenerations, Owner: model.ModelOwnerStabilityAI, + RPM: 2, }, { Model: "stable-diffusion-3.5-large", Type: relaymode.ImagesGenerations, Owner: model.ModelOwnerStabilityAI, + RPM: 2, }, { Model: "stable-diffusion-3.5-large-turbo", Type: relaymode.ImagesGenerations, Owner: model.ModelOwnerStabilityAI, + RPM: 2, }, + { Model: "sambert-v1", Type: relaymode.AudioSpeech, Owner: model.ModelOwnerAlibaba, InputPrice: 0.1, + RPM: 20, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(10000), model.WithModelConfigSupportFormats([]string{"mp3", "wav", "pcm"}), @@ -678,6 +727,7 @@ var ModelList = []*model.ModelConfig{ Model: "paraformer-realtime-v2", Type: relaymode.AudioTranscription, Owner: model.ModelOwnerAlibaba, + RPM: 20, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(10000), model.WithModelConfigSupportFormats([]string{"pcm", "wav", "opus", "speex", "aac", "amr"}), @@ -688,6 +738,7 @@ var ModelList = []*model.ModelConfig{ Model: "gte-rerank", Type: relaymode.Rerank, Owner: model.ModelOwnerAlibaba, + RPM: 5, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(4000), model.WithModelConfigMaxInputTokens(4000), @@ -699,6 +750,7 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.Embeddings, Owner: model.ModelOwnerAlibaba, InputPrice: 0.0007, + RPM: 30, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(2048), ), @@ -708,6 +760,7 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.Embeddings, Owner: model.ModelOwnerAlibaba, InputPrice: 0.0007, + RPM: 30, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(2048), ), @@ -717,6 +770,7 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.Embeddings, Owner: model.ModelOwnerAlibaba, InputPrice: 0.0007, + RPM: 30, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(8192), ), diff --git a/service/aiproxy/relay/adaptor/baidu/constants.go b/service/aiproxy/relay/adaptor/baidu/constants.go index 23a188be6d3..55a9c3d616e 100644 --- a/service/aiproxy/relay/adaptor/baidu/constants.go +++ b/service/aiproxy/relay/adaptor/baidu/constants.go @@ -12,9 +12,9 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.004, OutputPrice: 0.004, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 4800, - }, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(4800), + ), }, { @@ -23,6 +23,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0005, OutputPrice: 0, + RPM: 1200, }, { Model: "bge-large-zh", @@ -30,6 +31,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBAAI, InputPrice: 0.0005, OutputPrice: 0, + RPM: 1200, }, { Model: "bge-large-en", @@ -37,6 +39,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBAAI, InputPrice: 0.0005, OutputPrice: 0, + RPM: 1200, }, { Model: "tao-8k", @@ -44,6 +47,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0005, OutputPrice: 0, + RPM: 1200, }, { @@ -52,6 +56,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0005, OutputPrice: 0, + RPM: 1200, }, { diff --git a/service/aiproxy/relay/adaptor/baiduv2/constants.go b/service/aiproxy/relay/adaptor/baiduv2/constants.go index 738df56db67..75d2fc52a5d 100644 --- a/service/aiproxy/relay/adaptor/baiduv2/constants.go +++ b/service/aiproxy/relay/adaptor/baiduv2/constants.go @@ -14,6 +14,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.03, OutputPrice: 0.09, + RPM: 120, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(5120), model.WithModelConfigMaxInputTokens(5120), @@ -27,6 +28,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.03, OutputPrice: 0.09, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(5120), model.WithModelConfigMaxInputTokens(5120), @@ -40,6 +42,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.03, OutputPrice: 0.09, + RPM: 10000, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(5120), model.WithModelConfigMaxInputTokens(5120), @@ -53,6 +56,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.02, OutputPrice: 0.06, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(6144), model.WithModelConfigMaxInputTokens(6144), @@ -66,6 +70,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.02, OutputPrice: 0.06, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(6144), model.WithModelConfigMaxInputTokens(6144), @@ -79,6 +84,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.02, OutputPrice: 0.06, + RPM: 10000, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(6144), model.WithModelConfigMaxInputTokens(6144), @@ -92,6 +98,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.02, OutputPrice: 0.06, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(126976), model.WithModelConfigMaxInputTokens(126976), @@ -105,6 +112,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0008, OutputPrice: 0.002, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(5120), model.WithModelConfigMaxInputTokens(5120), @@ -118,6 +126,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0008, OutputPrice: 0.002, + RPM: 10000, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(5120), model.WithModelConfigMaxInputTokens(5120), @@ -131,6 +140,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0008, OutputPrice: 0.002, + RPM: 5000, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(126976), model.WithModelConfigMaxInputTokens(126976), @@ -144,6 +154,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0001, OutputPrice: 0.0001, + RPM: 500, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(7168), model.WithModelConfigMaxInputTokens(7168), @@ -156,6 +167,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0001, OutputPrice: 0.0001, + RPM: 500, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(126976), model.WithModelConfigMaxInputTokens(126976), @@ -168,6 +180,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0003, OutputPrice: 0.0006, + RPM: 10000, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(126976), model.WithModelConfigMaxInputTokens(126976), @@ -180,6 +193,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0001, OutputPrice: 0.0001, + RPM: 500, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(6144), model.WithModelConfigMaxInputTokens(6144), @@ -192,6 +206,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0002, OutputPrice: 0.0004, + RPM: 10000, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(126976), model.WithModelConfigMaxInputTokens(126976), @@ -205,6 +220,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0001, OutputPrice: 0.0001, + RPM: 10000, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(6144), model.WithModelConfigMaxInputTokens(6144), @@ -217,6 +233,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0003, OutputPrice: 0.0006, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(6144), model.WithModelConfigMaxInputTokens(6144), @@ -229,6 +246,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.0003, OutputPrice: 0.0006, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(5120), model.WithModelConfigMaxInputTokens(5120), @@ -241,6 +259,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBaidu, InputPrice: 0.04, OutputPrice: 0.12, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(6144), model.WithModelConfigMaxInputTokens(6144), diff --git a/service/aiproxy/relay/adaptor/deepseek/constants.go b/service/aiproxy/relay/adaptor/deepseek/constants.go index 8a5f73ca8b2..4aa765c8c25 100644 --- a/service/aiproxy/relay/adaptor/deepseek/constants.go +++ b/service/aiproxy/relay/adaptor/deepseek/constants.go @@ -12,6 +12,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerDeepSeek, InputPrice: 0.001, OutputPrice: 0.002, + RPM: 10000, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(64000), model.WithModelConfigMaxOutputTokens(4096), diff --git a/service/aiproxy/relay/adaptor/doubao/constants.go b/service/aiproxy/relay/adaptor/doubao/constants.go index 33d11138748..c5db8903351 100644 --- a/service/aiproxy/relay/adaptor/doubao/constants.go +++ b/service/aiproxy/relay/adaptor/doubao/constants.go @@ -14,8 +14,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerDoubao, InputPrice: 0.008, OutputPrice: 0.008, + RPM: 15000, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(32768), + model.WithModelConfigVision(true), ), }, { @@ -24,8 +26,10 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerDoubao, InputPrice: 0.02, OutputPrice: 0.02, + RPM: 15000, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(32768), + model.WithModelConfigVision(true), ), }, { @@ -46,6 +50,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerDoubao, InputPrice: 0.0050, OutputPrice: 0.0090, + RPM: 1000, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(128000), model.WithModelConfigMaxOutputTokens(4096), @@ -58,6 +63,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerDoubao, InputPrice: 0.0008, OutputPrice: 0.0020, + RPM: 15000, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32768), model.WithModelConfigMaxOutputTokens(4096), @@ -70,6 +76,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerDoubao, InputPrice: 0.0008, OutputPrice: 0.0020, + RPM: 10000, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(4096), model.WithModelConfigMaxOutputTokens(4096), @@ -82,6 +89,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerDoubao, InputPrice: 0.0008, OutputPrice: 0.0010, + RPM: 15000, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(128000), model.WithModelConfigMaxOutputTokens(4096), @@ -94,6 +102,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerDoubao, InputPrice: 0.0003, OutputPrice: 0.0006, + RPM: 15000, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32768), model.WithModelConfigMaxOutputTokens(4096), @@ -106,6 +115,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerDoubao, InputPrice: 0.0003, OutputPrice: 0.0006, + RPM: 10000, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(4096), model.WithModelConfigMaxOutputTokens(4096), @@ -118,6 +128,7 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.Embeddings, Owner: model.ModelOwnerDoubao, InputPrice: 0.0005, + RPM: 1200, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(4096), ), @@ -127,6 +138,7 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.Embeddings, Owner: model.ModelOwnerDoubao, InputPrice: 0.0007, + RPM: 1000, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(4096), ), diff --git a/service/aiproxy/relay/adaptor/doubaoaudio/constants.go b/service/aiproxy/relay/adaptor/doubaoaudio/constants.go index 18dc46c58d7..f56d43db20d 100644 --- a/service/aiproxy/relay/adaptor/doubaoaudio/constants.go +++ b/service/aiproxy/relay/adaptor/doubaoaudio/constants.go @@ -13,6 +13,7 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.AudioSpeech, Owner: model.ModelOwnerDoubao, InputPrice: 0.5, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigSupportFormats([]string{ "pcm", diff --git a/service/aiproxy/relay/adaptor/minimax/constants.go b/service/aiproxy/relay/adaptor/minimax/constants.go index fac1a9bd56f..efc9edf1f95 100644 --- a/service/aiproxy/relay/adaptor/minimax/constants.go +++ b/service/aiproxy/relay/adaptor/minimax/constants.go @@ -14,6 +14,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMiniMax, InputPrice: 0.01, OutputPrice: 0.01, + RPM: 120, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(245760), model.WithModelConfigToolChoice(true), @@ -25,6 +26,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMiniMax, InputPrice: 0.001, OutputPrice: 0.001, + RPM: 120, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(245760), model.WithModelConfigToolChoice(true), @@ -36,6 +38,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMiniMax, InputPrice: 0.005, OutputPrice: 0.005, + RPM: 120, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(8192), model.WithModelConfigToolChoice(true), @@ -47,6 +50,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMiniMax, InputPrice: 0.005, OutputPrice: 0.005, + RPM: 120, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(8192), model.WithModelConfigToolChoice(true), @@ -58,6 +62,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMiniMax, InputPrice: 0.005, OutputPrice: 0.005, + RPM: 120, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(8192), model.WithModelConfigToolChoice(true), @@ -69,6 +74,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMiniMax, InputPrice: 0.015, OutputPrice: 0.015, + RPM: 120, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(16384), model.WithModelConfigToolChoice(true), @@ -80,6 +86,7 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.AudioSpeech, Owner: model.ModelOwnerMiniMax, InputPrice: 0.2, + RPM: 20, Config: model.NewModelConfig( model.WithModelConfigSupportFormats([]string{"pcm", "wav", "flac", "mp3"}), model.WithModelConfigSupportVoices([]string{ diff --git a/service/aiproxy/relay/adaptor/moonshot/constants.go b/service/aiproxy/relay/adaptor/moonshot/constants.go index 11b8cf6a793..fabdc824768 100644 --- a/service/aiproxy/relay/adaptor/moonshot/constants.go +++ b/service/aiproxy/relay/adaptor/moonshot/constants.go @@ -12,6 +12,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMoonshot, InputPrice: 0.012, OutputPrice: 0.012, + RPM: 500, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(8192), model.WithModelConfigToolChoice(true), @@ -23,6 +24,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMoonshot, InputPrice: 0.024, OutputPrice: 0.024, + RPM: 500, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(32768), model.WithModelConfigToolChoice(true), @@ -34,6 +36,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerMoonshot, InputPrice: 0.06, OutputPrice: 0.06, + RPM: 500, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(131072), model.WithModelConfigToolChoice(true), diff --git a/service/aiproxy/relay/adaptor/siliconflow/constants.go b/service/aiproxy/relay/adaptor/siliconflow/constants.go index 7fbb0b7855c..a3e3c942946 100644 --- a/service/aiproxy/relay/adaptor/siliconflow/constants.go +++ b/service/aiproxy/relay/adaptor/siliconflow/constants.go @@ -14,12 +14,15 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerBAAI, InputPrice: 0.0009, OutputPrice: 0, + RPM: 2000, }, { - Model: "BAAI/bge-large-zh-v1.5", - Type: relaymode.Embeddings, - Owner: model.ModelOwnerBAAI, + Model: "BAAI/bge-large-zh-v1.5", + Type: relaymode.Embeddings, + Owner: model.ModelOwnerBAAI, + InputPrice: 0.0005, + RPM: 2000, }, { diff --git a/service/aiproxy/relay/adaptor/tencent/constants.go b/service/aiproxy/relay/adaptor/tencent/constants.go index 1768b104c3c..84bea4faf09 100644 --- a/service/aiproxy/relay/adaptor/tencent/constants.go +++ b/service/aiproxy/relay/adaptor/tencent/constants.go @@ -14,6 +14,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.0001, OutputPrice: 0.0001, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(262144), model.WithModelConfigMaxInputTokens(256000), @@ -26,6 +27,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.015, OutputPrice: 0.05, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32768), model.WithModelConfigMaxInputTokens(28672), @@ -39,6 +41,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.015, OutputPrice: 0.05, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32768), model.WithModelConfigMaxInputTokens(28672), @@ -53,6 +56,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.03, OutputPrice: 0.10, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32768), model.WithModelConfigMaxInputTokens(28672), @@ -66,6 +70,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.004, OutputPrice: 0.012, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32768), model.WithModelConfigMaxInputTokens(28672), @@ -79,6 +84,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.006, OutputPrice: 0.018, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxOutputTokens(6144), @@ -90,6 +96,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.0008, OutputPrice: 0.002, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32768), model.WithModelConfigMaxOutputTokens(2048), @@ -108,6 +115,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.004, OutputPrice: 0.008, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32768), model.WithModelConfigMaxOutputTokens(4096), @@ -119,6 +127,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.004, OutputPrice: 0.008, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32768), model.WithModelConfigMaxOutputTokens(4096), @@ -131,6 +140,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.004, OutputPrice: 0.008, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(8192), model.WithModelConfigMaxInputTokens(4096), @@ -143,6 +153,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.08, OutputPrice: 0.08, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(8192), model.WithModelConfigMaxInputTokens(6144), @@ -157,6 +168,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerTencent, InputPrice: 0.018, OutputPrice: 0.018, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(8192), model.WithModelConfigMaxInputTokens(6144), @@ -171,5 +183,9 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.Embeddings, Owner: model.ModelOwnerTencent, InputPrice: 0.0007, + RPM: 300, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(1024), + ), }, } diff --git a/service/aiproxy/relay/adaptor/xunfei/constants.go b/service/aiproxy/relay/adaptor/xunfei/constants.go index ee0b4158502..6937e3b23d5 100644 --- a/service/aiproxy/relay/adaptor/xunfei/constants.go +++ b/service/aiproxy/relay/adaptor/xunfei/constants.go @@ -5,6 +5,8 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/relaymode" ) +// https://www.xfyun.cn/doc/spark/HTTP%E8%B0%83%E7%94%A8%E6%96%87%E6%A1%A3.html#_1-%E6%8E%A5%E5%8F%A3%E8%AF%B4%E6%98%8E + var ModelList = []*model.ModelConfig{ { Model: "SparkDesk-4.0-Ultra", @@ -12,6 +14,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerXunfei, InputPrice: 0.14, OutputPrice: 0.14, + RPM: 120, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigToolChoice(true), @@ -23,6 +26,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerXunfei, InputPrice: 0.001, OutputPrice: 0.001, + RPM: 120, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(4096), ), @@ -33,6 +37,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerXunfei, InputPrice: 0.06, OutputPrice: 0.06, + RPM: 120, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigToolChoice(true), @@ -44,6 +49,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerXunfei, InputPrice: 0.09, OutputPrice: 0.09, + RPM: 120, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32768), model.WithModelConfigToolChoice(true), @@ -55,6 +61,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerXunfei, InputPrice: 0.014, OutputPrice: 0.014, + RPM: 120, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), ), @@ -65,6 +72,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerXunfei, InputPrice: 0.026, OutputPrice: 0.026, + RPM: 120, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), ), diff --git a/service/aiproxy/relay/adaptor/zhipu/constants.go b/service/aiproxy/relay/adaptor/zhipu/constants.go index 24e36e9df44..bd501e73dfc 100644 --- a/service/aiproxy/relay/adaptor/zhipu/constants.go +++ b/service/aiproxy/relay/adaptor/zhipu/constants.go @@ -12,6 +12,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.001, OutputPrice: 0.001, + RPM: 300, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxOutputTokens(4096), @@ -23,6 +24,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.1, OutputPrice: 0.1, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxOutputTokens(4096), @@ -35,6 +37,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.05, OutputPrice: 0.05, + RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxOutputTokens(4096), @@ -47,6 +50,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.001, OutputPrice: 0.001, + RPM: 900, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxOutputTokens(4096), @@ -59,6 +63,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.01, OutputPrice: 0.01, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(8192), model.WithModelConfigMaxOutputTokens(4096), @@ -71,6 +76,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.001, OutputPrice: 0.001, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(1024000), model.WithModelConfigMaxOutputTokens(4096), @@ -83,6 +89,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.0001, OutputPrice: 0.0001, + RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxOutputTokens(4096), @@ -95,6 +102,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.0001, OutputPrice: 0.0001, + RPM: 1800, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxOutputTokens(4096), @@ -107,6 +115,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.0001, OutputPrice: 0.0001, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(8192), model.WithModelConfigMaxOutputTokens(1024), @@ -119,6 +128,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.05, OutputPrice: 0.05, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(2048), model.WithModelConfigMaxOutputTokens(1024), @@ -131,6 +141,7 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerChatGLM, InputPrice: 0.01, OutputPrice: 0.01, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(8192), model.WithModelConfigMaxOutputTokens(1024), @@ -139,33 +150,24 @@ var ModelList = []*model.ModelConfig{ }, { - Model: "charglm-3", + Model: "charglm-4", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerChatGLM, - InputPrice: 0.015, - OutputPrice: 0.015, + InputPrice: 0.001, + OutputPrice: 0.001, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(4096), model.WithModelConfigMaxOutputTokens(2048), ), }, - { - Model: "emohaa", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerChatGLM, - InputPrice: 0.015, - OutputPrice: 0.015, - Config: model.NewModelConfig( - model.WithModelConfigMaxContextTokens(8192), - model.WithModelConfigMaxOutputTokens(4096), - ), - }, { Model: "codegeex-4", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerChatGLM, InputPrice: 0.0001, OutputPrice: 0.0001, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigMaxOutputTokens(4096), @@ -177,6 +179,7 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.Embeddings, Owner: model.ModelOwnerChatGLM, InputPrice: 0.0005, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(8192), ), @@ -186,6 +189,7 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.Embeddings, Owner: model.ModelOwnerChatGLM, InputPrice: 0.0005, + RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(8192), ), @@ -199,6 +203,7 @@ var ModelList = []*model.ModelConfig{ ImagePrices: map[string]float64{ "1024x1024": 0.1, }, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxOutputTokens(1024), ), @@ -217,6 +222,7 @@ var ModelList = []*model.ModelConfig{ "1440x720": 0.06, "720x1440": 0.06, }, + RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxOutputTokens(1024), ), diff --git a/service/aiproxy/router/api.go b/service/aiproxy/router/api.go index 251a986ce85..b860358cae9 100644 --- a/service/aiproxy/router/api.go +++ b/service/aiproxy/router/api.go @@ -50,7 +50,7 @@ func SetAPIRouter(router *gin.Engine) { groupRoute.GET("/:id", controller.GetGroup) groupRoute.DELETE("/:id", controller.DeleteGroup) groupRoute.POST("/:id/status", controller.UpdateGroupStatus) - groupRoute.POST("/:id/qpm", controller.UpdateGroupQPM) + groupRoute.POST("/:id/rpm", controller.UpdateGroupRPM) } optionRoute := apiRouter.Group("/option") From 1ff997f14e6d273690839096fe04669797087986 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 17 Dec 2024 16:11:22 +0800 Subject: [PATCH 007/167] fix: ci --- service/aiproxy/middleware/distributor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 3084594205c..5be742c9119 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -76,7 +76,7 @@ func Distribute(c *gin.Context) { if groupRPMRatio <= 0 { groupRPMRatio = 1 } - modelRPM = int64(float64(modelRPM) * float64(groupRPMRatio)) + modelRPM = int64(float64(modelRPM) * groupRPMRatio) ok = ForceRateLimit( c.Request.Context(), fmt.Sprintf(groupModelRPMKey, group.ID, requestModel), From 35cd9175ae03d91576053bcbf9b144a76b5dc3bf Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 17 Dec 2024 17:32:38 +0800 Subject: [PATCH 008/167] feat: search log with code type --- service/aiproxy/controller/log.go | 36 +++---- service/aiproxy/model/log.go | 151 +++++++++--------------------- 2 files changed, 58 insertions(+), 129 deletions(-) diff --git a/service/aiproxy/controller/log.go b/service/aiproxy/controller/log.go index 651aa610452..184a93c02a2 100644 --- a/service/aiproxy/controller/log.go +++ b/service/aiproxy/controller/log.go @@ -22,7 +22,6 @@ func GetLogs(c *gin.Context) { } else if perPage > 100 { perPage = 100 } - code, _ := strconv.Atoi(c.Query("code")) startTimestamp, _ := strconv.ParseInt(c.Query("start_timestamp"), 10, 64) endTimestamp, _ := strconv.ParseInt(c.Query("end_timestamp"), 10, 64) var startTimestampTime time.Time @@ -35,18 +34,17 @@ func GetLogs(c *gin.Context) { } tokenName := c.Query("token_name") modelName := c.Query("model_name") - channel, _ := strconv.Atoi(c.Query("channel")) + channelID, _ := strconv.Atoi(c.Query("channel")) group := c.Query("group") endpoint := c.Query("endpoint") - content := c.Query("content") tokenID, _ := strconv.Atoi(c.Query("token_id")) order := c.Query("order") requestID := c.Query("request_id") mode, _ := strconv.Atoi(c.Query("mode")) + codeType := c.Query("code_type") logs, total, err := model.GetLogs( startTimestampTime, endTimestampTime, - code, modelName, group, requestID, @@ -54,11 +52,11 @@ func GetLogs(c *gin.Context) { tokenName, p*perPage, perPage, - channel, + channelID, endpoint, - content, order, mode, + model.CodeType(codeType), ) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) @@ -82,7 +80,6 @@ func GetGroupLogs(c *gin.Context) { } else if perPage > 100 { perPage = 100 } - code, _ := strconv.Atoi(c.Query("code")) startTimestamp, _ := strconv.ParseInt(c.Query("start_timestamp"), 10, 64) endTimestamp, _ := strconv.ParseInt(c.Query("end_timestamp"), 10, 64) var startTimestampTime time.Time @@ -95,30 +92,29 @@ func GetGroupLogs(c *gin.Context) { } tokenName := c.Query("token_name") modelName := c.Query("model_name") - channel, _ := strconv.Atoi(c.Query("channel")) + channelID, _ := strconv.Atoi(c.Query("channel")) group := c.Param("group") endpoint := c.Query("endpoint") - content := c.Query("content") tokenID, _ := strconv.Atoi(c.Query("token_id")) order := c.Query("order") requestID := c.Query("request_id") mode, _ := strconv.Atoi(c.Query("mode")) + codeType := c.Query("code_type") logs, total, err := model.GetGroupLogs( group, startTimestampTime, endTimestampTime, - code, modelName, requestID, tokenID, tokenName, p*perPage, perPage, - channel, + channelID, endpoint, - content, order, mode, + model.CodeType(codeType), ) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) @@ -139,14 +135,12 @@ func SearchLogs(c *gin.Context) { } else if perPage > 100 { perPage = 100 } - code, _ := strconv.Atoi(c.Query("code")) endpoint := c.Query("endpoint") tokenName := c.Query("token_name") modelName := c.Query("model_name") - content := c.Query("content") groupID := c.Query("group_id") tokenID, _ := strconv.Atoi(c.Query("token_id")) - channel, _ := strconv.Atoi(c.Query("channel")) + channelID, _ := strconv.Atoi(c.Query("channel")) startTimestamp, _ := strconv.ParseInt(c.Query("start_timestamp"), 10, 64) endTimestamp, _ := strconv.ParseInt(c.Query("end_timestamp"), 10, 64) var startTimestampTime time.Time @@ -160,23 +154,23 @@ func SearchLogs(c *gin.Context) { order := c.Query("order") requestID := c.Query("request_id") mode, _ := strconv.Atoi(c.Query("mode")) + codeType := c.Query("code_type") logs, total, err := model.SearchLogs( keyword, p, perPage, - code, endpoint, groupID, requestID, tokenID, tokenName, modelName, - content, startTimestampTime, endTimestampTime, - channel, + channelID, order, mode, + model.CodeType(codeType), ) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) @@ -198,11 +192,9 @@ func SearchGroupLogs(c *gin.Context) { perPage = 100 } group := c.Param("group") - code, _ := strconv.Atoi(c.Query("code")) endpoint := c.Query("endpoint") tokenName := c.Query("token_name") modelName := c.Query("model_name") - content := c.Query("content") tokenID, _ := strconv.Atoi(c.Query("token_id")) channelID, _ := strconv.Atoi(c.Query("channel")) startTimestamp, _ := strconv.ParseInt(c.Query("start_timestamp"), 10, 64) @@ -218,23 +210,23 @@ func SearchGroupLogs(c *gin.Context) { order := c.Query("order") requestID := c.Query("request_id") mode, _ := strconv.Atoi(c.Query("mode")) + codeType := c.Query("code_type") logs, total, err := model.SearchGroupLogs( group, keyword, p, perPage, - code, endpoint, requestID, tokenID, tokenName, modelName, - content, startTimestampTime, endTimestampTime, channelID, order, mode, + model.CodeType(codeType), ) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index 7ba8f797b87..1e0156f5439 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -127,7 +127,15 @@ func getLogOrder(order string) string { } } -func GetLogs(startTimestamp time.Time, endTimestamp time.Time, code int, modelName string, group string, requestID string, tokenID int, tokenName string, startIdx int, num int, channelID int, endpoint string, content string, order string, mode int) (logs []*Log, total int64, err error) { +type CodeType string + +const ( + CodeTypeAll CodeType = "all" + CodeTypeSuccess CodeType = "success" + CodeTypeError CodeType = "error" +) + +func GetLogs(startTimestamp time.Time, endTimestamp time.Time, modelName string, group string, requestID string, tokenID int, tokenName string, startIdx int, num int, channelID int, endpoint string, order string, mode int, codeType CodeType) (logs []*Log, total int64, err error) { tx := LogDB.Model(&Log{}) if group != "" { tx = tx.Where("group_id = ?", group) @@ -159,11 +167,11 @@ func GetLogs(startTimestamp time.Time, endTimestamp time.Time, code int, modelNa if endpoint != "" { tx = tx.Where("endpoint = ?", endpoint) } - if content != "" { - tx = tx.Where("content = ?", content) - } - if code != 0 { - tx = tx.Where("code = ?", code) + switch codeType { + case CodeTypeSuccess: + tx = tx.Where("code = 200") + case CodeTypeError: + tx = tx.Where("code != 200") } err = tx.Count(&total).Error if err != nil { @@ -182,7 +190,7 @@ func GetLogs(startTimestamp time.Time, endTimestamp time.Time, code int, modelNa return logs, total, err } -func GetGroupLogs(group string, startTimestamp time.Time, endTimestamp time.Time, code int, modelName string, requestID string, tokenID int, tokenName string, startIdx int, num int, channelID int, endpoint string, content string, order string, mode int) (logs []*Log, total int64, err error) { +func GetGroupLogs(group string, startTimestamp time.Time, endTimestamp time.Time, modelName string, requestID string, tokenID int, tokenName string, startIdx int, num int, channelID int, endpoint string, order string, mode int, codeType CodeType) (logs []*Log, total int64, err error) { tx := LogDB.Model(&Log{}).Where("group_id = ?", group) if !startTimestamp.IsZero() { tx = tx.Where("request_at >= ?", startTimestamp) @@ -211,11 +219,11 @@ func GetGroupLogs(group string, startTimestamp time.Time, endTimestamp time.Time if endpoint != "" { tx = tx.Where("endpoint = ?", endpoint) } - if content != "" { - tx = tx.Where("content = ?", content) - } - if code != 0 { - tx = tx.Where("code = ?", code) + switch codeType { + case CodeTypeSuccess: + tx = tx.Where("code = 200") + case CodeTypeError: + tx = tx.Where("code != 200") } err = tx.Count(&total).Error if err != nil { @@ -234,7 +242,7 @@ func GetGroupLogs(group string, startTimestamp time.Time, endTimestamp time.Time return logs, total, err } -func SearchLogs(keyword string, page int, perPage int, code int, endpoint string, groupID string, requestID string, tokenID int, tokenName string, modelName string, content string, startTimestamp time.Time, endTimestamp time.Time, channelID int, order string, mode int) (logs []*Log, total int64, err error) { +func SearchLogs(keyword string, page int, perPage int, endpoint string, groupID string, requestID string, tokenID int, tokenName string, modelName string, startTimestamp time.Time, endTimestamp time.Time, channelID int, order string, mode int, codeType CodeType) (logs []*Log, total int64, err error) { tx := LogDB.Model(&Log{}) // Handle exact match conditions for non-zero values @@ -259,21 +267,21 @@ func SearchLogs(keyword string, page int, perPage int, code int, endpoint string if tokenID != 0 { tx = tx.Where("token_id = ?", tokenID) } - if code != 0 { - tx = tx.Where("code = ?", code) - } if endpoint != "" { tx = tx.Where("endpoint = ?", endpoint) } if requestID != "" { tx = tx.Where("request_id = ?", requestID) } - if content != "" { - tx = tx.Where("content = ?", content) - } if channelID != 0 { tx = tx.Where("channel_id = ?", channelID) } + switch codeType { + case CodeTypeSuccess: + tx = tx.Where("code = 200") + case CodeTypeError: + tx = tx.Where("code != 200") + } // Handle keyword search for zero value fields if keyword != "" { @@ -281,10 +289,6 @@ func SearchLogs(keyword string, page int, perPage int, code int, endpoint string var values []interface{} if num := helper.String2Int(keyword); num != 0 { - if code == 0 { - conditions = append(conditions, "code = ?") - values = append(values, num) - } if channelID == 0 { conditions = append(conditions, "channel_id = ?") values = append(values, num) @@ -335,14 +339,12 @@ func SearchLogs(keyword string, page int, perPage int, code int, endpoint string } values = append(values, "%"+keyword+"%") } - if content == "" { - if common.UsingPostgreSQL { - conditions = append(conditions, "content ILIKE ?") - } else { - conditions = append(conditions, "content LIKE ?") - } - values = append(values, "%"+keyword+"%") + if common.UsingPostgreSQL { + conditions = append(conditions, "content ILIKE ?") + } else { + conditions = append(conditions, "content LIKE ?") } + values = append(values, "%"+keyword+"%") if len(conditions) > 0 { tx = tx.Where(fmt.Sprintf("(%s)", strings.Join(conditions, " OR ")), values...) @@ -370,7 +372,7 @@ func SearchLogs(keyword string, page int, perPage int, code int, endpoint string return logs, total, err } -func SearchGroupLogs(group string, keyword string, page int, perPage int, code int, endpoint string, requestID string, tokenID int, tokenName string, modelName string, content string, startTimestamp time.Time, endTimestamp time.Time, channelID int, order string, mode int) (logs []*Log, total int64, err error) { +func SearchGroupLogs(group string, keyword string, page int, perPage int, endpoint string, requestID string, tokenID int, tokenName string, modelName string, startTimestamp time.Time, endTimestamp time.Time, channelID int, order string, mode int, codeType CodeType) (logs []*Log, total int64, err error) { if group == "" { return nil, 0, errors.New("group is empty") } @@ -389,9 +391,6 @@ func SearchGroupLogs(group string, keyword string, page int, perPage int, code i if modelName != "" { tx = tx.Where("model = ?", modelName) } - if code != 0 { - tx = tx.Where("code = ?", code) - } if mode != 0 { tx = tx.Where("mode = ?", mode) } @@ -404,12 +403,15 @@ func SearchGroupLogs(group string, keyword string, page int, perPage int, code i if tokenID != 0 { tx = tx.Where("token_id = ?", tokenID) } - if content != "" { - tx = tx.Where("content = ?", content) - } if channelID != 0 { tx = tx.Where("channel_id = ?", channelID) } + switch codeType { + case CodeTypeSuccess: + tx = tx.Where("code = 200") + case CodeTypeError: + tx = tx.Where("code != 200") + } // Handle keyword search for zero value fields if keyword != "" { @@ -417,10 +419,6 @@ func SearchGroupLogs(group string, keyword string, page int, perPage int, code i var values []interface{} if num := helper.String2Int(keyword); num != 0 { - if code == 0 { - conditions = append(conditions, "code = ?") - values = append(values, num) - } if channelID == 0 { conditions = append(conditions, "channel_id = ?") values = append(values, num) @@ -462,14 +460,12 @@ func SearchGroupLogs(group string, keyword string, page int, perPage int, code i } values = append(values, "%"+keyword+"%") } - if content == "" { - if common.UsingPostgreSQL { - conditions = append(conditions, "content ILIKE ?") - } else { - conditions = append(conditions, "content LIKE ?") - } - values = append(values, "%"+keyword+"%") + if common.UsingPostgreSQL { + conditions = append(conditions, "content ILIKE ?") + } else { + conditions = append(conditions, "content LIKE ?") } + values = append(values, "%"+keyword+"%") if len(conditions) > 0 { tx = tx.Where(fmt.Sprintf("(%s)", strings.Join(conditions, " OR ")), values...) @@ -498,65 +494,6 @@ func SearchGroupLogs(group string, keyword string, page int, perPage int, code i return logs, total, err } -func SumUsedQuota(startTimestamp time.Time, endTimestamp time.Time, modelName string, group string, tokenName string, channel int, endpoint string) (quota int64) { - ifnull := "ifnull" - if common.UsingPostgreSQL { - ifnull = "COALESCE" - } - tx := LogDB.Table("logs").Select(ifnull + "(sum(quota),0)") - if group != "" { - tx = tx.Where("group_id = ?", group) - } - if tokenName != "" { - tx = tx.Where("token_name = ?", tokenName) - } - if !startTimestamp.IsZero() { - tx = tx.Where("request_at >= ?", startTimestamp) - } - if !endTimestamp.IsZero() { - tx = tx.Where("request_at <= ?", endTimestamp) - } - if modelName != "" { - tx = tx.Where("model = ?", modelName) - } - if channel != 0 { - tx = tx.Where("channel_id = ?", channel) - } - if endpoint != "" { - tx = tx.Where("endpoint = ?", endpoint) - } - tx.Scan("a) - return quota -} - -func SumUsedToken(startTimestamp time.Time, endTimestamp time.Time, modelName string, group string, tokenName string, endpoint string) (token int) { - ifnull := "ifnull" - if common.UsingPostgreSQL { - ifnull = "COALESCE" - } - tx := LogDB.Table("logs").Select(fmt.Sprintf("%s(sum(prompt_tokens),0) + %s(sum(completion_tokens),0)", ifnull, ifnull)) - if group != "" { - tx = tx.Where("group_id = ?", group) - } - if tokenName != "" { - tx = tx.Where("token_name = ?", tokenName) - } - if !startTimestamp.IsZero() { - tx = tx.Where("request_at >= ?", startTimestamp) - } - if !endTimestamp.IsZero() { - tx = tx.Where("request_at <= ?", endTimestamp) - } - if modelName != "" { - tx = tx.Where("model = ?", modelName) - } - if endpoint != "" { - tx = tx.Where("endpoint = ?", endpoint) - } - tx.Scan(&token) - return token -} - func DeleteOldLog(timestamp time.Time) (int64, error) { result := LogDB.Where("request_at < ?", timestamp).Delete(&Log{}) return result.RowsAffected, result.Error From edd526e2bd733c45085d1a3a06228c781c44951c Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 18 Dec 2024 10:37:53 +0800 Subject: [PATCH 009/167] feat: resp detail buf use pool --- service/aiproxy/relay/controller/helper.go | 33 ++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/service/aiproxy/relay/controller/helper.go b/service/aiproxy/relay/controller/helper.go index f8b2d7579a0..f930911f0b2 100644 --- a/service/aiproxy/relay/controller/helper.go +++ b/service/aiproxy/relay/controller/helper.go @@ -190,6 +190,31 @@ func (rw *responseWriter) WriteString(s string) (int, error) { return rw.ResponseWriter.WriteString(s) } +const ( + // 0.5MB + defaultBufferSize = 512 * 1024 + // 3MB + maxBufferSize = 3 * 1024 * 1024 +) + +var bufferPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(make([]byte, 0, defaultBufferSize)) + }, +} + +func getBuffer() *bytes.Buffer { + return bufferPool.Get().(*bytes.Buffer) +} + +func putBuffer(buf *bytes.Buffer) { + buf.Reset() + if buf.Cap() > maxBufferSize { + return + } + bufferPool.Put(buf) +} + func DoHelper(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*relaymodel.Usage, *model.RequestDetail, *relaymodel.ErrorWithStatusCode) { log := middleware.GetLogger(c) @@ -248,9 +273,12 @@ func DoHelper(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*relaymodel.U return nil, &detail, utils.RelayErrorHandler(meta, resp) } + buf := getBuffer() + defer putBuffer(buf) + rw := &responseWriter{ ResponseWriter: c.Writer, - body: bytes.NewBuffer(nil), + body: buf, } rawWriter := c.Writer defer func() { c.Writer = rawWriter }() @@ -258,7 +286,8 @@ func DoHelper(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*relaymodel.U c.Header("Content-Type", resp.Header.Get("Content-Type")) usage, relayErr := a.DoResponse(meta, c, resp) - detail.ResponseBody = conv.BytesToString(rw.body.Bytes()) + // copy buf to detail.ResponseBody + detail.ResponseBody = rw.body.String() if relayErr != nil { if detail.ResponseBody == "" { respData, err := json.Marshal(gin.H{ From 1eab8afe6fded7106049fa4b3bf0b2ecbd6a86d9 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 18 Dec 2024 16:23:43 +0800 Subject: [PATCH 010/167] feat: no need init client, use ctx --- service/aiproxy/common/client/init.go | 63 ------------------- service/aiproxy/common/config/config.go | 11 ---- service/aiproxy/common/image/image.go | 6 +- service/aiproxy/common/image/image_test.go | 7 --- service/aiproxy/controller/relay.go | 14 +---- service/aiproxy/main.go | 2 - service/aiproxy/relay/adaptor/baidu/token.go | 4 +- .../aiproxy/relay/adaptor/openai/balance.go | 5 +- .../relay/adaptor/siliconflow/balance.go | 3 +- service/aiproxy/relay/controller/helper.go | 4 ++ service/aiproxy/relay/utils/utils.go | 3 +- 11 files changed, 15 insertions(+), 107 deletions(-) delete mode 100644 service/aiproxy/common/client/init.go diff --git a/service/aiproxy/common/client/init.go b/service/aiproxy/common/client/init.go deleted file mode 100644 index 18a45c9d70f..00000000000 --- a/service/aiproxy/common/client/init.go +++ /dev/null @@ -1,63 +0,0 @@ -package client - -import ( - "fmt" - "net/http" - "net/url" - "time" - - "github.com/labring/sealos/service/aiproxy/common/config" - log "github.com/sirupsen/logrus" -) - -var ( - HTTPClient *http.Client - ImpatientHTTPClient *http.Client - UserContentRequestHTTPClient *http.Client -) - -func Init() { - if config.UserContentRequestProxy != "" { - log.Info(fmt.Sprintf("using %s as proxy to fetch user content", config.UserContentRequestProxy)) - proxyURL, err := url.Parse(config.UserContentRequestProxy) - if err != nil { - log.Fatal("USER_CONTENT_REQUEST_PROXY set but invalid: " + config.UserContentRequestProxy) - } - transport := &http.Transport{ - Proxy: http.ProxyURL(proxyURL), - } - UserContentRequestHTTPClient = &http.Client{ - Transport: transport, - Timeout: time.Second * time.Duration(config.UserContentRequestTimeout), - } - } else { - UserContentRequestHTTPClient = &http.Client{} - } - var transport http.RoundTripper - if config.RelayProxy != "" { - log.Info(fmt.Sprintf("using %s as api relay proxy", config.RelayProxy)) - proxyURL, err := url.Parse(config.RelayProxy) - if err != nil { - log.Fatal("USER_CONTENT_REQUEST_PROXY set but invalid: " + config.UserContentRequestProxy) - } - transport = &http.Transport{ - Proxy: http.ProxyURL(proxyURL), - } - } - - if config.RelayTimeout == 0 { - HTTPClient = &http.Client{ - Transport: transport, - } - } else { - HTTPClient = &http.Client{ - Timeout: time.Duration(config.RelayTimeout) * time.Second, - Transport: transport, - } - } - - ImpatientHTTPClient = &http.Client{ - Timeout: 5 * time.Second, - Transport: transport, - } -} diff --git a/service/aiproxy/common/config/config.go b/service/aiproxy/common/config/config.go index d47a9f7cc8d..dcb1b0b57f8 100644 --- a/service/aiproxy/common/config/config.go +++ b/service/aiproxy/common/config/config.go @@ -86,21 +86,10 @@ func SetRetryTimes(times int64) { var DisableAutoMigrateDB = os.Getenv("DISABLE_AUTO_MIGRATE_DB") == "true" -var RelayTimeout = env.Int("RELAY_TIMEOUT", 0) // unit is second - var RateLimitKeyExpirationDuration = 20 * time.Minute var OnlyOneLogFile = env.Bool("ONLY_ONE_LOG_FILE", false) -var ( - // 代理地址 - RelayProxy = env.String("RELAY_PROXY", "") - // 用户内容请求代理地址 - UserContentRequestProxy = env.String("USER_CONTENT_REQUEST_PROXY", "") - // 用户内容请求超时时间,单位为秒 - UserContentRequestTimeout = env.Int("USER_CONTENT_REQUEST_TIMEOUT", 30) -) - var AdminKey = env.String("ADMIN_KEY", "") var ( diff --git a/service/aiproxy/common/image/image.go b/service/aiproxy/common/image/image.go index 505d63f9414..ca2a5511b29 100644 --- a/service/aiproxy/common/image/image.go +++ b/service/aiproxy/common/image/image.go @@ -21,8 +21,6 @@ import ( // import webp decoder _ "golang.org/x/image/webp" - - "github.com/labring/sealos/service/aiproxy/common/client" ) // Regex to match data URL pattern @@ -37,7 +35,7 @@ func GetImageSizeFromURL(url string) (width int, height int, err error) { if err != nil { return 0, 0, err } - resp, err := client.UserContentRequestHTTPClient.Do(req) + resp, err := http.DefaultClient.Do(req) if err != nil { return } @@ -70,7 +68,7 @@ func GetImageFromURL(ctx context.Context, url string) (string, string, error) { if err != nil { return "", "", err } - resp, err := client.UserContentRequestHTTPClient.Do(req) + resp, err := http.DefaultClient.Do(req) if err != nil { return "", "", err } diff --git a/service/aiproxy/common/image/image_test.go b/service/aiproxy/common/image/image_test.go index 7dad94a0c5a..f5abb3f1271 100644 --- a/service/aiproxy/common/image/image_test.go +++ b/service/aiproxy/common/image/image_test.go @@ -12,8 +12,6 @@ import ( "strings" "testing" - "github.com/labring/sealos/service/aiproxy/common/client" - img "github.com/labring/sealos/service/aiproxy/common/image" "github.com/stretchr/testify/assert" @@ -44,11 +42,6 @@ var cases = []struct { {"https://upload.wikimedia.org/wikipedia/commons/6/62/102Cervus.jpg", "jpeg", 270, 230}, } -func TestMain(m *testing.M) { - client.Init() - m.Run() -} - func TestDecode(t *testing.T) { // Bytes read: varies sometimes // jpeg: 1063892 diff --git a/service/aiproxy/controller/relay.go b/service/aiproxy/controller/relay.go index 1d30cef78af..a4bcd22a007 100644 --- a/service/aiproxy/controller/relay.go +++ b/service/aiproxy/controller/relay.go @@ -95,19 +95,11 @@ func Relay(c *gin.Context) { } func shouldRetry(_ *gin.Context, statusCode int) bool { - if statusCode == http.StatusTooManyRequests { + if statusCode == http.StatusTooManyRequests || + statusCode == http.StatusGatewayTimeout { return true } - if statusCode/100 == 5 { - return true - } - if statusCode == http.StatusBadRequest { - return false - } - if statusCode/100 == 2 { - return false - } - return true + return false } func RelayNotImplemented(c *gin.Context) { diff --git a/service/aiproxy/main.go b/service/aiproxy/main.go index 38a7f392766..4ad69c1ffb5 100644 --- a/service/aiproxy/main.go +++ b/service/aiproxy/main.go @@ -17,7 +17,6 @@ import ( _ "github.com/joho/godotenv/autoload" "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/balance" - "github.com/labring/sealos/service/aiproxy/common/client" "github.com/labring/sealos/service/aiproxy/common/config" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" @@ -43,7 +42,6 @@ func initializeServices() error { return err } - client.Init() return nil } diff --git a/service/aiproxy/relay/adaptor/baidu/token.go b/service/aiproxy/relay/adaptor/baidu/token.go index d1305dc2c75..605a203ae3e 100644 --- a/service/aiproxy/relay/adaptor/baidu/token.go +++ b/service/aiproxy/relay/adaptor/baidu/token.go @@ -10,7 +10,7 @@ import ( "time" json "github.com/json-iterator/go" - "github.com/labring/sealos/service/aiproxy/common/client" + "github.com/labring/sealos/service/aiproxy/relay/utils" log "github.com/sirupsen/logrus" ) @@ -66,7 +66,7 @@ func getBaiduAccessTokenHelper(ctx context.Context, apiKey string) (*AccessToken } req.Header.Add("Content-Type", "application/json") req.Header.Add("Accept", "application/json") - res, err := client.ImpatientHTTPClient.Do(req) + res, err := utils.DoRequest(req) if err != nil { return nil, err } diff --git a/service/aiproxy/relay/adaptor/openai/balance.go b/service/aiproxy/relay/adaptor/openai/balance.go index e6f7efee34f..f5bb886fa97 100644 --- a/service/aiproxy/relay/adaptor/openai/balance.go +++ b/service/aiproxy/relay/adaptor/openai/balance.go @@ -6,7 +6,6 @@ import ( "time" json "github.com/json-iterator/go" - "github.com/labring/sealos/service/aiproxy/common/client" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor" ) @@ -29,7 +28,7 @@ func GetBalance(channel *model.Channel) (float64, error) { return 0, err } req1.Header.Set("Authorization", "Bearer "+channel.Key) - res1, err := client.HTTPClient.Do(req1) + res1, err := http.DefaultClient.Do(req1) if err != nil { return 0, err } @@ -51,7 +50,7 @@ func GetBalance(channel *model.Channel) (float64, error) { return 0, err } req2.Header.Set("Authorization", "Bearer "+channel.Key) - res2, err := client.HTTPClient.Do(req2) + res2, err := http.DefaultClient.Do(req2) if err != nil { return 0, err } diff --git a/service/aiproxy/relay/adaptor/siliconflow/balance.go b/service/aiproxy/relay/adaptor/siliconflow/balance.go index 74172143d73..0fd310964c4 100644 --- a/service/aiproxy/relay/adaptor/siliconflow/balance.go +++ b/service/aiproxy/relay/adaptor/siliconflow/balance.go @@ -7,7 +7,6 @@ import ( "net/http" "strconv" - "github.com/labring/sealos/service/aiproxy/common/client" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor" ) @@ -25,7 +24,7 @@ func (a *Adaptor) GetBalance(channel *model.Channel) (float64, error) { return 0, err } req.Header.Set("Authorization", "Bearer "+channel.Key) - res, err := client.HTTPClient.Do(req) + res, err := http.DefaultClient.Do(req) if err != nil { return 0, err } diff --git a/service/aiproxy/relay/controller/helper.go b/service/aiproxy/relay/controller/helper.go index f930911f0b2..525881276ab 100644 --- a/service/aiproxy/relay/controller/helper.go +++ b/service/aiproxy/relay/controller/helper.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "io" "net/http" "sync" @@ -260,6 +261,9 @@ func DoHelper(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*relaymodel.U resp, err := a.DoRequest(meta, c, req) if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + return nil, &detail, openai.ErrorWrapperWithMessage("do request failed: "+err.Error(), "do_request_failed", http.StatusGatewayTimeout) + } return nil, &detail, openai.ErrorWrapperWithMessage("do request failed: "+err.Error(), "do_request_failed", http.StatusBadRequest) } diff --git a/service/aiproxy/relay/utils/utils.go b/service/aiproxy/relay/utils/utils.go index aebd317a721..2c0d8f69b5a 100644 --- a/service/aiproxy/relay/utils/utils.go +++ b/service/aiproxy/relay/utils/utils.go @@ -5,7 +5,6 @@ import ( "strings" "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/client" relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" ) @@ -55,7 +54,7 @@ func UnmarshalMap(req *http.Request) (map[string]any, error) { } func DoRequest(req *http.Request) (*http.Response, error) { - resp, err := client.HTTPClient.Do(req) + resp, err := http.DefaultClient.Do(req) if err != nil { return nil, err } From 7b8efa7679dc47b7d81f60c125987a336994550b Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 18 Dec 2024 16:28:04 +0800 Subject: [PATCH 011/167] fix: lint --- service/aiproxy/main.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/service/aiproxy/main.go b/service/aiproxy/main.go index 4ad69c1ffb5..491355c9ed9 100644 --- a/service/aiproxy/main.go +++ b/service/aiproxy/main.go @@ -38,11 +38,7 @@ func initializeServices() error { return err } - if err := initializeCaches(); err != nil { - return err - } - - return nil + return initializeCaches() } func initializeBalance() error { From 9f63f43da62cd8f6b118de5077e7f4159bbd33fa Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 18 Dec 2024 17:25:44 +0800 Subject: [PATCH 012/167] feat: admin api log filed --- service/aiproxy/controller/dashboard.go | 6 +++--- service/aiproxy/controller/group.go | 26 ++++++++++++------------- service/aiproxy/middleware/auth.go | 7 +++++++ service/aiproxy/router/api.go | 10 +++++----- 4 files changed, 28 insertions(+), 21 deletions(-) diff --git a/service/aiproxy/controller/dashboard.go b/service/aiproxy/controller/dashboard.go index 2a010717805..edfd40f215d 100644 --- a/service/aiproxy/controller/dashboard.go +++ b/service/aiproxy/controller/dashboard.go @@ -10,8 +10,8 @@ import ( ) func GetGroupDashboard(c *gin.Context) { - id := c.Param("id") - if id == "" { + group := c.Param("group") + if group == "" { middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") return } @@ -33,7 +33,7 @@ func GetGroupDashboard(c *gin.Context) { tokenName := c.Query("token_name") modelName := c.Query("model") - dashboards, err := model.GetDashboardData(id, start, end, tokenName, modelName) + dashboards, err := model.GetDashboardData(group, start, end, tokenName, modelName) if err != nil { middleware.ErrorResponse(c, http.StatusOK, "failed to get statistics") return diff --git a/service/aiproxy/controller/group.go b/service/aiproxy/controller/group.go index 82575da9a6c..b134688ddd1 100644 --- a/service/aiproxy/controller/group.go +++ b/service/aiproxy/controller/group.go @@ -64,17 +64,17 @@ func SearchGroups(c *gin.Context) { } func GetGroup(c *gin.Context) { - id := c.Param("id") - if id == "" { + group := c.Param("group") + if group == "" { middleware.ErrorResponse(c, http.StatusOK, "group id is empty") return } - group, err := model.GetGroupByID(id) + _group, err := model.GetGroupByID(group) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - middleware.SuccessResponse(c, group) + middleware.SuccessResponse(c, _group) } type UpdateGroupRPMRequest struct { @@ -82,8 +82,8 @@ type UpdateGroupRPMRequest struct { } func UpdateGroupRPM(c *gin.Context) { - id := c.Param("id") - if id == "" { + group := c.Param("group") + if group == "" { middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") return } @@ -93,7 +93,7 @@ func UpdateGroupRPM(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") return } - err = model.UpdateGroupRPM(id, req.RPMRatio) + err = model.UpdateGroupRPM(group, req.RPMRatio) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return @@ -106,8 +106,8 @@ type UpdateGroupStatusRequest struct { } func UpdateGroupStatus(c *gin.Context) { - id := c.Param("id") - if id == "" { + group := c.Param("group") + if group == "" { middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") return } @@ -117,7 +117,7 @@ func UpdateGroupStatus(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") return } - err = model.UpdateGroupStatus(id, req.Status) + err = model.UpdateGroupStatus(group, req.Status) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return @@ -126,12 +126,12 @@ func UpdateGroupStatus(c *gin.Context) { } func DeleteGroup(c *gin.Context) { - id := c.Param("id") - if id == "" { + group := c.Param("group") + if group == "" { middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") return } - err := model.DeleteGroupByID(id) + err := model.DeleteGroupByID(group) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return diff --git a/service/aiproxy/middleware/auth.go b/service/aiproxy/middleware/auth.go index e63528bb499..3bcde94e124 100644 --- a/service/aiproxy/middleware/auth.go +++ b/service/aiproxy/middleware/auth.go @@ -41,6 +41,13 @@ func AdminAuth(c *gin.Context) { c.Abort() return } + + group := c.Param("group") + if group != "" { + log := GetLogger(c) + log.Data["gid"] = group + } + c.Next() } diff --git a/service/aiproxy/router/api.go b/service/aiproxy/router/api.go index b860358cae9..0da7fd2de63 100644 --- a/service/aiproxy/router/api.go +++ b/service/aiproxy/router/api.go @@ -35,7 +35,7 @@ func SetAPIRouter(router *gin.Engine) { dashboardRoute := apiRouter.Group("/dashboard") { - dashboardRoute.GET("/:id", controller.GetGroupDashboard) + dashboardRoute.GET("/:group", controller.GetGroupDashboard) } groupsRoute := apiRouter.Group("/groups") @@ -47,10 +47,10 @@ func SetAPIRouter(router *gin.Engine) { groupRoute := apiRouter.Group("/group") { groupRoute.POST("/", controller.CreateGroup) - groupRoute.GET("/:id", controller.GetGroup) - groupRoute.DELETE("/:id", controller.DeleteGroup) - groupRoute.POST("/:id/status", controller.UpdateGroupStatus) - groupRoute.POST("/:id/rpm", controller.UpdateGroupRPM) + groupRoute.GET("/:group", controller.GetGroup) + groupRoute.DELETE("/:group", controller.DeleteGroup) + groupRoute.POST("/:group/status", controller.UpdateGroupStatus) + groupRoute.POST("/:group/rpm", controller.UpdateGroupRPM) } optionRoute := apiRouter.Group("/option") From 0341ad08fdda2403506e64144167a1619c031053 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 18 Dec 2024 17:46:27 +0800 Subject: [PATCH 013/167] feat: log usage --- service/aiproxy/relay/controller/helper.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/service/aiproxy/relay/controller/helper.go b/service/aiproxy/relay/controller/helper.go index 525881276ab..8c850323c2c 100644 --- a/service/aiproxy/relay/controller/helper.go +++ b/service/aiproxy/relay/controller/helper.go @@ -314,5 +314,8 @@ func DoHelper(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*relaymodel.U if usage.TotalTokens == 0 { usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens } + log.Data["t_input"] = usage.PromptTokens + log.Data["t_output"] = usage.CompletionTokens + log.Data["t_total"] = usage.TotalTokens return usage, &detail, nil } From 938c29e618c6ea303015a8c345e2fd9b3b44cd43 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 19 Dec 2024 15:28:57 +0800 Subject: [PATCH 014/167] feat: auto retry --- service/aiproxy/common/config/config.go | 43 +++++++---- service/aiproxy/common/ctxkey/key.go | 13 +--- service/aiproxy/common/helper/helper.go | 41 ---------- service/aiproxy/common/helper/key.go | 7 -- service/aiproxy/common/helper/time.go | 9 --- service/aiproxy/controller/channel-test.go | 3 +- service/aiproxy/controller/relay.go | 57 ++++++++------ service/aiproxy/main.go | 2 +- service/aiproxy/middleware/distributor.go | 55 ++++++-------- service/aiproxy/middleware/request-id.go | 23 ++++-- service/aiproxy/middleware/utils.go | 8 +- service/aiproxy/model/cache.go | 29 ++++++- service/aiproxy/model/channel.go | 9 +-- service/aiproxy/model/consumeerr.go | 3 +- service/aiproxy/model/log.go | 5 +- service/aiproxy/model/modelconfig.go | 7 -- service/aiproxy/model/option.go | 75 +++++++++++-------- service/aiproxy/model/utils.go | 12 +++ service/aiproxy/relay/adaptor/ali/image.go | 3 +- .../aiproxy/relay/adaptor/anthropic/main.go | 6 +- .../aiproxy/relay/adaptor/aws/claude/main.go | 4 +- .../aiproxy/relay/adaptor/aws/llama3/main.go | 15 ++-- service/aiproxy/relay/adaptor/cohere/main.go | 11 ++- service/aiproxy/relay/adaptor/coze/main.go | 13 ++-- .../aiproxy/relay/adaptor/gemini/adaptor.go | 10 ++- service/aiproxy/relay/adaptor/gemini/main.go | 18 ++--- service/aiproxy/relay/adaptor/ollama/main.go | 16 ++-- service/aiproxy/relay/controller/helper.go | 19 ++++- 28 files changed, 265 insertions(+), 251 deletions(-) delete mode 100644 service/aiproxy/common/helper/helper.go delete mode 100644 service/aiproxy/common/helper/key.go delete mode 100644 service/aiproxy/common/helper/time.go diff --git a/service/aiproxy/common/config/config.go b/service/aiproxy/common/config/config.go index dcb1b0b57f8..d2a59c0f9ad 100644 --- a/service/aiproxy/common/config/config.go +++ b/service/aiproxy/common/config/config.go @@ -4,18 +4,12 @@ import ( "os" "slices" "strconv" - "sync" "sync/atomic" "time" "github.com/labring/sealos/service/aiproxy/common/env" ) -var ( - OptionMap map[string]string - OptionMapRWMutex sync.RWMutex -) - var ( DebugEnabled, _ = strconv.ParseBool(os.Getenv("DEBUG")) DebugSQLEnabled, _ = strconv.ParseBool(os.Getenv("DEBUG_SQL")) @@ -28,14 +22,39 @@ var ( automaticEnableChannelWhenTestSucceedEnabled atomic.Bool // 是否近似计算token approximateTokenEnabled atomic.Bool - // 重试次数 - retryTimes atomic.Int64 // 暂停服务 disableServe atomic.Bool // log detail 存储时间(小时) logDetailStorageHours int64 = 3 * 24 ) +var ( + // 重试次数 + retryTimes atomic.Int64 + // 模型类型超时时间,单位秒 + timeoutWithModelType atomic.Value +) + +func GetRetryTimes() int64 { + return retryTimes.Load() +} + +func SetRetryTimes(times int64) { + retryTimes.Store(times) +} + +func init() { + timeoutWithModelType.Store(make(map[int]int64)) +} + +func GetTimeoutWithModelType() map[int]int64 { + return timeoutWithModelType.Load().(map[int]int64) +} + +func SetTimeoutWithModelType(timeout map[int]int64) { + timeoutWithModelType.Store(timeout) +} + func GetLogDetailStorageHours() int64 { return atomic.LoadInt64(&logDetailStorageHours) } @@ -76,14 +95,6 @@ func SetApproximateTokenEnabled(enabled bool) { approximateTokenEnabled.Store(enabled) } -func GetRetryTimes() int64 { - return retryTimes.Load() -} - -func SetRetryTimes(times int64) { - retryTimes.Store(times) -} - var DisableAutoMigrateDB = os.Getenv("DISABLE_AUTO_MIGRATE_DB") == "true" var RateLimitKeyExpirationDuration = 20 * time.Minute diff --git a/service/aiproxy/common/ctxkey/key.go b/service/aiproxy/common/ctxkey/key.go index 488a856ca52..a986a4ebc18 100644 --- a/service/aiproxy/common/ctxkey/key.go +++ b/service/aiproxy/common/ctxkey/key.go @@ -1,13 +1,8 @@ package ctxkey -type OriginalModelKey string - -const ( - OriginalModel OriginalModelKey = "original_model" -) - const ( - Channel = "channel" - Group = "group" - Token = "token" + Group = "group" + Token = "token" + OriginalModel = "original_model" + RequestID = "X-Request-Id" ) diff --git a/service/aiproxy/common/helper/helper.go b/service/aiproxy/common/helper/helper.go deleted file mode 100644 index 8d882b60c68..00000000000 --- a/service/aiproxy/common/helper/helper.go +++ /dev/null @@ -1,41 +0,0 @@ -package helper - -import ( - "fmt" - "strconv" - "time" - - "github.com/gin-gonic/gin" - "github.com/labring/sealos/service/aiproxy/common/random" -) - -func GenRequestID() string { - return strconv.FormatInt(time.Now().UnixMilli(), 10) + random.GetRandomNumberString(4) -} - -func GetResponseID(c *gin.Context) string { - logID := c.GetString(string(RequestIDKey)) - return "chatcmpl-" + logID -} - -func AssignOrDefault(value string, defaultValue string) string { - if len(value) != 0 { - return value - } - return defaultValue -} - -func MessageWithRequestID(message string, id string) string { - return fmt.Sprintf("%s (request id: %s)", message, id) -} - -func String2Int(keyword string) int { - if keyword == "" { - return 0 - } - i, err := strconv.Atoi(keyword) - if err != nil { - return 0 - } - return i -} diff --git a/service/aiproxy/common/helper/key.go b/service/aiproxy/common/helper/key.go deleted file mode 100644 index bc9c949eb9c..00000000000 --- a/service/aiproxy/common/helper/key.go +++ /dev/null @@ -1,7 +0,0 @@ -package helper - -type Key string - -const ( - RequestIDKey Key = "X-Request-Id" -) diff --git a/service/aiproxy/common/helper/time.go b/service/aiproxy/common/helper/time.go deleted file mode 100644 index 757e56af23d..00000000000 --- a/service/aiproxy/common/helper/time.go +++ /dev/null @@ -1,9 +0,0 @@ -package helper - -import ( - "time" -) - -func GetTimestamp() int64 { - return time.Now().Unix() -} diff --git a/service/aiproxy/controller/channel-test.go b/service/aiproxy/controller/channel-test.go index 5c431ff8792..50ce196aae0 100644 --- a/service/aiproxy/controller/channel-test.go +++ b/service/aiproxy/controller/channel-test.go @@ -16,7 +16,6 @@ import ( "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/helper" "github.com/labring/sealos/service/aiproxy/common/render" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" @@ -42,7 +41,7 @@ func testSingleModel(channel *model.Channel, modelName string) (*model.ChannelTe Body: io.NopCloser(body), Header: make(http.Header), } - newc.Set(string(helper.RequestIDKey), channelTestRequestID) + middleware.SetRequestID(newc, channelTestRequestID) meta := meta.NewMeta( channel, diff --git a/service/aiproxy/controller/relay.go b/service/aiproxy/controller/relay.go index a4bcd22a007..79d922e41c6 100644 --- a/service/aiproxy/controller/relay.go +++ b/service/aiproxy/controller/relay.go @@ -2,13 +2,14 @@ package controller import ( "bytes" + "errors" "io" "net/http" "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" - "github.com/labring/sealos/service/aiproxy/common/helper" + "github.com/labring/sealos/service/aiproxy/common/ctxkey" "github.com/labring/sealos/service/aiproxy/middleware" dbmodel "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/controller" @@ -40,43 +41,56 @@ func relayHelper(meta *meta.Meta, c *gin.Context) *model.ErrorWithStatusCode { func Relay(c *gin.Context) { log := middleware.GetLogger(c) - if config.DebugEnabled { - requestBody, _ := common.GetRequestBody(c.Request) - log.Debugf("request body: %s", requestBody) + + requestModel := c.MustGet(string(ctxkey.OriginalModel)).(string) + channel, err := dbmodel.CacheGetRandomSatisfiedChannel(requestModel) + if err != nil { + c.JSON(http.StatusServiceUnavailable, gin.H{ + "error": &model.Error{ + Message: "The upstream load of the current group is saturated, please try again later", + Code: "upstream_load_saturated", + Type: middleware.ErrorTypeAIPROXY, + }, + }) + return } - meta := middleware.NewMetaByContext(c) + + meta := middleware.NewMetaByContext(c, channel) bizErr := relayHelper(meta, c) if bizErr == nil { return } - lastFailedChannelID := meta.Channel.ID - requestID := c.GetString(string(helper.RequestIDKey)) - retryTimes := config.GetRetryTimes() - if !shouldRetry(c, bizErr.StatusCode) { - retryTimes = 0 + failedChannelIDs := []int{channel.ID} + requestID := c.GetString(ctxkey.RequestID) + var retryTimes int64 + if shouldRetry(c, bizErr.StatusCode) { + retryTimes = config.GetRetryTimes() } for i := retryTimes; i > 0; i-- { - channel, err := dbmodel.CacheGetRandomSatisfiedChannel(meta.OriginModelName) + newChannel, err := dbmodel.CacheGetRandomSatisfiedChannel(meta.OriginModelName, failedChannelIDs...) if err != nil { - log.Errorf("get random satisfied channel failed: %+v", err) - break - } - log.Infof("using channel #%d to retry (remain times %d)", channel.ID, i) - if channel.ID == lastFailedChannelID { - continue + if !errors.Is(err, dbmodel.ErrChannelsExhausted) { + log.Errorf("get random satisfied channel failed: %+v", err) + break + } + if len(failedChannelIDs) != 1 { + break + } + newChannel = channel } + log.Warnf("using channel %s(%d) to retry (remain times %d)", newChannel.Name, newChannel.ID, i) requestBody, err := common.GetRequestBody(c.Request) if err != nil { log.Errorf("GetRequestBody failed: %+v", err) break } c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody)) - meta.Reset(channel) + meta.Reset(newChannel) bizErr = relayHelper(meta, c) if bizErr == nil { return } - lastFailedChannelID = channel.ID + failedChannelIDs = append(failedChannelIDs, newChannel.ID) } if bizErr != nil { message := bizErr.Message @@ -85,7 +99,7 @@ func Relay(c *gin.Context) { } c.JSON(bizErr.StatusCode, gin.H{ "error": &model.Error{ - Message: helper.MessageWithRequestID(message, requestID), + Message: middleware.MessageWithRequestID(message, requestID), Code: bizErr.Code, Param: bizErr.Param, Type: bizErr.Type, @@ -96,7 +110,8 @@ func Relay(c *gin.Context) { func shouldRetry(_ *gin.Context, statusCode int) bool { if statusCode == http.StatusTooManyRequests || - statusCode == http.StatusGatewayTimeout { + statusCode == http.StatusGatewayTimeout || + statusCode == http.StatusForbidden { return true } return false diff --git a/service/aiproxy/main.go b/service/aiproxy/main.go index 491355c9ed9..00392c9a7c5 100644 --- a/service/aiproxy/main.go +++ b/service/aiproxy/main.go @@ -99,7 +99,7 @@ func initializeDatabases() error { } func initializeCaches() error { - if err := model.InitOptionMap(); err != nil { + if err := model.InitOption2DB(); err != nil { return err } if err := model.InitModelConfigCache(); err != nil { diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 5be742c9119..15b617dc959 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -1,7 +1,6 @@ package middleware import ( - "context" "fmt" "net/http" "slices" @@ -10,7 +9,6 @@ import ( "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common/config" "github.com/labring/sealos/service/aiproxy/common/ctxkey" - "github.com/labring/sealos/service/aiproxy/common/helper" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/meta" "github.com/labring/sealos/service/aiproxy/relay/relaymode" @@ -56,11 +54,6 @@ func Distribute(c *gin.Context) { ) return } - channel, err := model.CacheGetRandomSatisfiedChannel(requestModel) - if err != nil { - abortWithMessage(c, http.StatusServiceUnavailable, requestModel+" is not available") - return - } mc, ok := model.CacheGetModelConfig(requestModel) if !ok { @@ -68,40 +61,34 @@ func Distribute(c *gin.Context) { return } modelRPM := mc.RPM - if modelRPM <= 0 { - abortWithMessage(c, http.StatusServiceUnavailable, requestModel+" rpm is not available, please contact the administrator") - return - } - groupRPMRatio := group.RPMRatio - if groupRPMRatio <= 0 { - groupRPMRatio = 1 - } - modelRPM = int64(float64(modelRPM) * groupRPMRatio) - ok = ForceRateLimit( - c.Request.Context(), - fmt.Sprintf(groupModelRPMKey, group.ID, requestModel), - modelRPM, - time.Minute, - ) - if !ok { - abortWithMessage(c, http.StatusTooManyRequests, - group.ID+" is requesting too frequently", + if modelRPM > 0 { + groupRPMRatio := group.RPMRatio + if groupRPMRatio <= 0 { + groupRPMRatio = 1 + } + modelRPM = int64(float64(modelRPM) * groupRPMRatio) + ok = ForceRateLimit( + c.Request.Context(), + fmt.Sprintf(groupModelRPMKey, group.ID, requestModel), + modelRPM, + time.Minute, ) - return + if !ok { + abortWithMessage(c, http.StatusTooManyRequests, + group.ID+" is requesting too frequently", + ) + return + } } - c.Set(string(ctxkey.OriginalModel), requestModel) - ctx := context.WithValue(c.Request.Context(), ctxkey.OriginalModel, requestModel) - c.Request = c.Request.WithContext(ctx) - c.Set(ctxkey.Channel, channel) + c.Set(ctxkey.OriginalModel, requestModel) c.Next() } -func NewMetaByContext(c *gin.Context) *meta.Meta { - channel := c.MustGet(ctxkey.Channel).(*model.Channel) - originalModel := c.MustGet(string(ctxkey.OriginalModel)).(string) - requestID := c.GetString(string(helper.RequestIDKey)) +func NewMetaByContext(c *gin.Context, channel *model.Channel) *meta.Meta { + originalModel := c.MustGet(ctxkey.OriginalModel).(string) + requestID := c.GetString(ctxkey.RequestID) group := c.MustGet(ctxkey.Group).(*model.GroupCache) token := c.MustGet(ctxkey.Token).(*model.TokenCache) return meta.NewMeta( diff --git a/service/aiproxy/middleware/request-id.go b/service/aiproxy/middleware/request-id.go index 159ebb220c8..4296b3490ec 100644 --- a/service/aiproxy/middleware/request-id.go +++ b/service/aiproxy/middleware/request-id.go @@ -1,15 +1,26 @@ package middleware import ( + "strconv" + "time" + "github.com/gin-gonic/gin" - "github.com/labring/sealos/service/aiproxy/common/helper" + "github.com/labring/sealos/service/aiproxy/common/ctxkey" + "github.com/labring/sealos/service/aiproxy/common/random" ) -func RequestID(c *gin.Context) { - id := helper.GenRequestID() - c.Set(string(helper.RequestIDKey), id) - c.Header(string(helper.RequestIDKey), id) +func GenRequestID() string { + return strconv.FormatInt(time.Now().UnixMilli(), 10) + random.GetRandomNumberString(4) +} + +func SetRequestID(c *gin.Context, id string) { + c.Set(ctxkey.RequestID, id) + c.Header(ctxkey.RequestID, id) log := GetLogger(c) SetLogRequestIDField(log.Data, id) - c.Next() +} + +func RequestID(c *gin.Context) { + id := GenRequestID() + SetRequestID(c, id) } diff --git a/service/aiproxy/middleware/utils.go b/service/aiproxy/middleware/utils.go index e80a6177867..4fc44655207 100644 --- a/service/aiproxy/middleware/utils.go +++ b/service/aiproxy/middleware/utils.go @@ -6,7 +6,7 @@ import ( "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/helper" + "github.com/labring/sealos/service/aiproxy/common/ctxkey" "github.com/labring/sealos/service/aiproxy/relay/model" ) @@ -14,11 +14,15 @@ const ( ErrorTypeAIPROXY = "aiproxy_error" ) +func MessageWithRequestID(message string, id string) string { + return fmt.Sprintf("%s (request id: %s)", message, id) +} + func abortWithMessage(c *gin.Context, statusCode int, message string) { GetLogger(c).Error(message) c.JSON(statusCode, gin.H{ "error": &model.Error{ - Message: helper.MessageWithRequestID(message, c.GetString(string(helper.RequestIDKey))), + Message: MessageWithRequestID(message, c.GetString(ctxkey.RequestID)), Type: ErrorTypeAIPROXY, }, }) diff --git a/service/aiproxy/model/cache.go b/service/aiproxy/model/cache.go index 3186090207b..41bc423540d 100644 --- a/service/aiproxy/model/cache.go +++ b/service/aiproxy/model/cache.go @@ -444,7 +444,7 @@ func initializeChannelModels(channel *Channel) { if len(missingModels) > 0 { slices.Sort(missingModels) - log.Errorf("model config not found or rpm less than 0: %v", missingModels) + log.Errorf("model config not found: %v", missingModels) } slices.Sort(findedModels) channel.Models = findedModels @@ -593,11 +593,32 @@ func SyncChannelCache(ctx context.Context, wg *sync.WaitGroup, frequency time.Du } } +func filterChannels(channels []*Channel, ignoreChannel ...int) []*Channel { + filtered := make([]*Channel, 0) + for _, channel := range channels { + if slices.Contains(ignoreChannel, channel.ID) { + continue + } + filtered = append(filtered, channel) + } + return filtered +} + +var ( + ErrChannelsNotFound = errors.New("channels not found") + ErrChannelsExhausted = errors.New("channels exhausted") +) + //nolint:gosec -func CacheGetRandomSatisfiedChannel(model string) (*Channel, error) { - channels := GetEnabledModel2Channels()[model] +func CacheGetRandomSatisfiedChannel(model string, ignoreChannel ...int) (*Channel, error) { + _channels := GetEnabledModel2Channels()[model] + if len(_channels) == 0 { + return nil, ErrChannelsNotFound + } + + channels := filterChannels(_channels, ignoreChannel...) if len(channels) == 0 { - return nil, errors.New("model not found") + return nil, ErrChannelsExhausted } if len(channels) == 1 { diff --git a/service/aiproxy/model/channel.go b/service/aiproxy/model/channel.go index 5656d50cfc3..4a04d7258d1 100644 --- a/service/aiproxy/model/channel.go +++ b/service/aiproxy/model/channel.go @@ -8,7 +8,6 @@ import ( json "github.com/json-iterator/go" "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/helper" "gorm.io/gorm" "gorm.io/gorm/clause" ) @@ -59,7 +58,7 @@ func (c *Channel) BeforeSave(tx *gorm.DB) (err error) { return err } if len(missingModels) > 0 { - return fmt.Errorf("model config not found or rpm less than 0: %v", missingModels) + return fmt.Errorf("model config not found: %v", missingModels) } return nil } @@ -74,7 +73,7 @@ func checkModelConfig(tx *gorm.DB, models []string) ([]string, []string, error) return models, nil, nil } - where := tx.Model(&ModelConfig{}).Where("model IN ? AND rpm > 0", models) + where := tx.Model(&ModelConfig{}).Where("model IN ?", models) var count int64 if err := where.Count(&count).Error; err != nil { return nil, nil, err @@ -229,7 +228,7 @@ func SearchChannels(keyword string, startIdx int, num int, onlyDisabled bool, om if id == 0 { conditions = append(conditions, "id = ?") - values = append(values, helper.String2Int(keyword)) + values = append(values, String2Int(keyword)) } if name == "" { if common.UsingPostgreSQL { @@ -249,7 +248,7 @@ func SearchChannels(keyword string, startIdx int, num int, onlyDisabled bool, om } if channelType == 0 { conditions = append(conditions, "type = ?") - values = append(values, helper.String2Int(keyword)) + values = append(values, String2Int(keyword)) } if baseURL == "" { if common.UsingPostgreSQL { diff --git a/service/aiproxy/model/consumeerr.go b/service/aiproxy/model/consumeerr.go index 36e5f2b46f8..9843b739381 100644 --- a/service/aiproxy/model/consumeerr.go +++ b/service/aiproxy/model/consumeerr.go @@ -7,7 +7,6 @@ import ( json "github.com/json-iterator/go" "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/helper" ) type ConsumeError struct { @@ -82,7 +81,7 @@ func SearchConsumeError(keyword string, requestID string, group string, tokenNam if tokenID == 0 { conditions = append(conditions, "token_id = ?") - values = append(values, helper.String2Int(keyword)) + values = append(values, String2Int(keyword)) } if requestID == "" { if common.UsingPostgreSQL { diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index 1e0156f5439..680bbc4072d 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -11,7 +11,6 @@ import ( "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" - "github.com/labring/sealos/service/aiproxy/common/helper" ) type RequestDetail struct { @@ -288,7 +287,7 @@ func SearchLogs(keyword string, page int, perPage int, endpoint string, groupID var conditions []string var values []interface{} - if num := helper.String2Int(keyword); num != 0 { + if num := String2Int(keyword); num != 0 { if channelID == 0 { conditions = append(conditions, "channel_id = ?") values = append(values, num) @@ -418,7 +417,7 @@ func SearchGroupLogs(group string, keyword string, page int, perPage int, endpoi var conditions []string var values []interface{} - if num := helper.String2Int(keyword); num != 0 { + if num := String2Int(keyword); num != 0 { if channelID == 0 { conditions = append(conditions, "channel_id = ?") values = append(values, num) diff --git a/service/aiproxy/model/modelconfig.go b/service/aiproxy/model/modelconfig.go index ece76af65f0..1fb5c0aa43e 100644 --- a/service/aiproxy/model/modelconfig.go +++ b/service/aiproxy/model/modelconfig.go @@ -26,13 +26,6 @@ type ModelConfig struct { RPM int64 `json:"rpm"` } -func (c *ModelConfig) BeforeSave(_ *gorm.DB) error { - if c.RPM <= 0 { - return fmt.Errorf("%s rpm must be greater than 0", c.Model) - } - return nil -} - func (c *ModelConfig) MarshalJSON() ([]byte, error) { type Alias ModelConfig return json.Marshal(&struct { diff --git a/service/aiproxy/model/option.go b/service/aiproxy/model/option.go index e377efebd7a..cac4695f185 100644 --- a/service/aiproxy/model/option.go +++ b/service/aiproxy/model/option.go @@ -28,25 +28,26 @@ func GetAllOption() ([]*Option, error) { return options, err } -func InitOptionMap() error { - config.OptionMapRWMutex.Lock() - config.OptionMap = make(map[string]string) - config.OptionMap["LogDetailStorageHours"] = strconv.FormatInt(config.GetLogDetailStorageHours(), 10) - config.OptionMap["DisableServe"] = strconv.FormatBool(config.GetDisableServe()) - config.OptionMap["AutomaticDisableChannelEnabled"] = strconv.FormatBool(config.GetAutomaticDisableChannelEnabled()) - config.OptionMap["AutomaticEnableChannelWhenTestSucceedEnabled"] = strconv.FormatBool(config.GetAutomaticEnableChannelWhenTestSucceedEnabled()) - config.OptionMap["ApproximateTokenEnabled"] = strconv.FormatBool(config.GetApproximateTokenEnabled()) - config.OptionMap["BillingEnabled"] = strconv.FormatBool(config.GetBillingEnabled()) - config.OptionMap["RetryTimes"] = strconv.FormatInt(config.GetRetryTimes(), 10) - config.OptionMap["GlobalApiRateLimitNum"] = strconv.FormatInt(config.GetGlobalAPIRateLimitNum(), 10) +var OptionMap = make(map[string]string) + +func InitOption2DB() error { + OptionMap["LogDetailStorageHours"] = strconv.FormatInt(config.GetLogDetailStorageHours(), 10) + OptionMap["DisableServe"] = strconv.FormatBool(config.GetDisableServe()) + OptionMap["AutomaticDisableChannelEnabled"] = strconv.FormatBool(config.GetAutomaticDisableChannelEnabled()) + OptionMap["AutomaticEnableChannelWhenTestSucceedEnabled"] = strconv.FormatBool(config.GetAutomaticEnableChannelWhenTestSucceedEnabled()) + OptionMap["ApproximateTokenEnabled"] = strconv.FormatBool(config.GetApproximateTokenEnabled()) + OptionMap["BillingEnabled"] = strconv.FormatBool(config.GetBillingEnabled()) + OptionMap["RetryTimes"] = strconv.FormatInt(config.GetRetryTimes(), 10) + timeoutWithModelTypeJSON, _ := json.Marshal(config.GetTimeoutWithModelType()) + OptionMap["TimeoutWithModelType"] = conv.BytesToString(timeoutWithModelTypeJSON) + OptionMap["GlobalApiRateLimitNum"] = strconv.FormatInt(config.GetGlobalAPIRateLimitNum(), 10) defaultChannelModelsJSON, _ := json.Marshal(config.GetDefaultChannelModels()) - config.OptionMap["DefaultChannelModels"] = conv.BytesToString(defaultChannelModelsJSON) + OptionMap["DefaultChannelModels"] = conv.BytesToString(defaultChannelModelsJSON) defaultChannelModelMappingJSON, _ := json.Marshal(config.GetDefaultChannelModelMapping()) - config.OptionMap["DefaultChannelModelMapping"] = conv.BytesToString(defaultChannelModelMappingJSON) - config.OptionMap["GeminiSafetySetting"] = config.GetGeminiSafetySetting() - config.OptionMap["GeminiVersion"] = config.GetGeminiVersion() - config.OptionMap["GroupMaxTokenNum"] = strconv.FormatInt(int64(config.GetGroupMaxTokenNum()), 10) - config.OptionMapRWMutex.Unlock() + OptionMap["DefaultChannelModelMapping"] = conv.BytesToString(defaultChannelModelMappingJSON) + OptionMap["GeminiSafetySetting"] = config.GetGeminiSafetySetting() + OptionMap["GeminiVersion"] = config.GetGeminiVersion() + OptionMap["GroupMaxTokenNum"] = strconv.FormatInt(int64(config.GetGroupMaxTokenNum()), 10) err := loadOptionsFromDatabase(true) if err != nil { return err @@ -55,9 +56,7 @@ func InitOptionMap() error { } func storeOptionMap() error { - config.OptionMapRWMutex.Lock() - defer config.OptionMapRWMutex.Unlock() - for key, value := range config.OptionMap { + for key, value := range OptionMap { err := saveOption(key, value) if err != nil { return err @@ -72,9 +71,18 @@ func loadOptionsFromDatabase(isInit bool) error { return err } for _, option := range options { - err := updateOptionMap(option.Key, option.Value, isInit) - if err != nil && !errors.Is(err, ErrUnknownOptionKey) { - log.Errorf("failed to update option: %s, value: %s, error: %s", option.Key, option.Value, err.Error()) + err := updateOption(option.Key, option.Value, isInit) + if err != nil { + if !errors.Is(err, ErrUnknownOptionKey) { + return fmt.Errorf("failed to update option: %s, value: %s, error: %w", option.Key, option.Value, err) + } + if isInit { + log.Warnf("unknown option: %s, value: %s", option.Key, option.Value) + } + continue + } + if isInit { + delete(OptionMap, option.Key) } } return nil @@ -90,7 +98,7 @@ func SyncOptions(ctx context.Context, wg *sync.WaitGroup, frequency time.Duratio case <-ctx.Done(): return case <-ticker.C: - if err := loadOptionsFromDatabase(true); err != nil { + if err := loadOptionsFromDatabase(false); err != nil { log.Error("failed to sync options from database: " + err.Error()) } } @@ -107,7 +115,7 @@ func saveOption(key string, value string) error { } func UpdateOption(key string, value string) error { - err := updateOptionMap(key, value, false) + err := updateOption(key, value, false) if err != nil { return err } @@ -135,10 +143,8 @@ func isTrue(value string) bool { return result } -func updateOptionMap(key string, value string, isInit bool) (err error) { - config.OptionMapRWMutex.Lock() - defer config.OptionMapRWMutex.Unlock() - config.OptionMap[key] = value +//nolint:gocyclo +func updateOption(key string, value string, isInit bool) (err error) { switch key { case "LogDetailStorageHours": logDetailStorageHours, err := strconv.ParseInt(value, 10, 64) @@ -195,11 +201,11 @@ func updateOptionMap(key string, value string, isInit bool) (err error) { } if !isInit && len(missingModels) > 0 { sort.Strings(missingModels) - return fmt.Errorf("model config not found or rpm less than 0: %v", missingModels) + return fmt.Errorf("model config not found: %v", missingModels) } if len(missingModels) > 0 { sort.Strings(missingModels) - log.Errorf("model config not found or rpm less than 0: %v", missingModels) + log.Errorf("model config not found: %v", missingModels) } allowedNewModels := make(map[int][]string) for t, ms := range newModels { @@ -223,6 +229,13 @@ func updateOptionMap(key string, value string, isInit bool) (err error) { return err } config.SetRetryTimes(retryTimes) + case "TimeoutWithModelType": + var newTimeoutWithModelType map[int]int64 + err := json.Unmarshal(conv.StringToBytes(value), &newTimeoutWithModelType) + if err != nil { + return err + } + config.SetTimeoutWithModelType(newTimeoutWithModelType) default: return ErrUnknownOptionKey } diff --git a/service/aiproxy/model/utils.go b/service/aiproxy/model/utils.go index a12d0cae9b3..f06c9531b3d 100644 --- a/service/aiproxy/model/utils.go +++ b/service/aiproxy/model/utils.go @@ -4,6 +4,7 @@ import ( "database/sql/driver" "errors" "fmt" + "strconv" "strings" "time" @@ -131,3 +132,14 @@ func (ns EmptyNullString) Value() (driver.Value, error) { } return string(ns), nil } + +func String2Int(keyword string) int { + if keyword == "" { + return 0 + } + i, err := strconv.Atoi(keyword) + if err != nil { + return 0 + } + return i +} diff --git a/service/aiproxy/relay/adaptor/ali/image.go b/service/aiproxy/relay/adaptor/ali/image.go index 92c7d048a87..55e86bad655 100644 --- a/service/aiproxy/relay/adaptor/ali/image.go +++ b/service/aiproxy/relay/adaptor/ali/image.go @@ -11,7 +11,6 @@ import ( "github.com/gin-gonic/gin" json "github.com/json-iterator/go" - "github.com/labring/sealos/service/aiproxy/common/helper" "github.com/labring/sealos/service/aiproxy/common/image" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" @@ -168,7 +167,7 @@ func asyncTaskWait(ctx context.Context, taskID string, key string) (*TaskRespons func responseAli2OpenAIImage(ctx context.Context, response *TaskResponse, responseFormat string) *openai.ImageResponse { imageResponse := openai.ImageResponse{ - Created: helper.GetTimestamp(), + Created: time.Now().Unix(), } for _, data := range response.Output.Results { diff --git a/service/aiproxy/relay/adaptor/anthropic/main.go b/service/aiproxy/relay/adaptor/anthropic/main.go index 00a018455c4..b19cc12eb30 100644 --- a/service/aiproxy/relay/adaptor/anthropic/main.go +++ b/service/aiproxy/relay/adaptor/anthropic/main.go @@ -4,6 +4,7 @@ import ( "bufio" "net/http" "slices" + "time" json "github.com/json-iterator/go" "github.com/labring/sealos/service/aiproxy/common/conv" @@ -12,7 +13,6 @@ import ( "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/helper" "github.com/labring/sealos/service/aiproxy/common/image" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/meta" @@ -256,7 +256,7 @@ func ResponseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse { ID: "chatcmpl-" + claudeResponse.ID, Model: claudeResponse.Model, Object: "chat.completion", - Created: helper.GetTimestamp(), + Created: time.Now().Unix(), Choices: []*openai.TextResponseChoice{&choice}, } return &fullTextResponse @@ -267,7 +267,7 @@ func StreamHandler(_ *meta.Meta, c *gin.Context, resp *http.Response) (*model.Er log := middleware.GetLogger(c) - createdTime := helper.GetTimestamp() + createdTime := time.Now().Unix() scanner := bufio.NewScanner(resp.Body) scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { if atEOF && len(data) == 0 { diff --git a/service/aiproxy/relay/adaptor/aws/claude/main.go b/service/aiproxy/relay/adaptor/aws/claude/main.go index 45f974fc908..a483a95a8e6 100644 --- a/service/aiproxy/relay/adaptor/aws/claude/main.go +++ b/service/aiproxy/relay/adaptor/aws/claude/main.go @@ -4,6 +4,7 @@ package aws import ( "io" "net/http" + "time" json "github.com/json-iterator/go" @@ -12,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/bedrockruntime/types" "github.com/gin-gonic/gin" "github.com/jinzhu/copier" - "github.com/labring/sealos/service/aiproxy/common/helper" "github.com/labring/sealos/service/aiproxy/common/render" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" @@ -147,7 +147,7 @@ func Handler(meta *meta.Meta, c *gin.Context) (*relaymodel.ErrorWithStatusCode, func StreamHandler(meta *meta.Meta, c *gin.Context) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) { log := middleware.GetLogger(c) - createdTime := helper.GetTimestamp() + createdTime := time.Now().Unix() originModelName := meta.OriginModelName awsModelID, err := awsModelID(meta.ActualModelName) if err != nil { diff --git a/service/aiproxy/relay/adaptor/aws/llama3/main.go b/service/aiproxy/relay/adaptor/aws/llama3/main.go index f60e43a3528..8648788943f 100644 --- a/service/aiproxy/relay/adaptor/aws/llama3/main.go +++ b/service/aiproxy/relay/adaptor/aws/llama3/main.go @@ -6,20 +6,19 @@ import ( "io" "net/http" "text/template" + "time" json "github.com/json-iterator/go" - "github.com/labring/sealos/service/aiproxy/common/random" - "github.com/labring/sealos/service/aiproxy/common/render" - "github.com/labring/sealos/service/aiproxy/middleware" - "github.com/labring/sealos/service/aiproxy/model" - "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" "github.com/aws/aws-sdk-go-v2/service/bedrockruntime/types" "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/helper" + "github.com/labring/sealos/service/aiproxy/common/random" + "github.com/labring/sealos/service/aiproxy/common/render" + "github.com/labring/sealos/service/aiproxy/middleware" + "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/aws/utils" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/meta" @@ -156,7 +155,7 @@ func ResponseLlama2OpenAI(llamaResponse *Response) *openai.TextResponse { fullTextResponse := openai.TextResponse{ ID: "chatcmpl-" + random.GetUUID(), Object: "chat.completion", - Created: helper.GetTimestamp(), + Created: time.Now().Unix(), Choices: []*openai.TextResponseChoice{&choice}, } return &fullTextResponse @@ -165,7 +164,7 @@ func ResponseLlama2OpenAI(llamaResponse *Response) *openai.TextResponse { func StreamHandler(meta *meta.Meta, c *gin.Context) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) { log := middleware.GetLogger(c) - createdTime := helper.GetTimestamp() + createdTime := time.Now().Unix() awsModelID, err := awsModelID(meta.ActualModelName) if err != nil { return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil diff --git a/service/aiproxy/relay/adaptor/cohere/main.go b/service/aiproxy/relay/adaptor/cohere/main.go index 61a65c51f42..d4b11148313 100644 --- a/service/aiproxy/relay/adaptor/cohere/main.go +++ b/service/aiproxy/relay/adaptor/cohere/main.go @@ -5,15 +5,14 @@ import ( "fmt" "net/http" "strings" + "time" + "github.com/gin-gonic/gin" json "github.com/json-iterator/go" + "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/conv" "github.com/labring/sealos/service/aiproxy/common/render" "github.com/labring/sealos/service/aiproxy/middleware" - - "github.com/gin-gonic/gin" - "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/helper" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/model" ) @@ -125,7 +124,7 @@ func ResponseCohere2OpenAI(cohereResponse *Response) *openai.TextResponse { ID: "chatcmpl-" + cohereResponse.ResponseID, Model: "model", Object: "chat.completion", - Created: helper.GetTimestamp(), + Created: time.Now().Unix(), Choices: []*openai.TextResponseChoice{&choice}, } return &fullTextResponse @@ -136,7 +135,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC log := middleware.GetLogger(c) - createdTime := helper.GetTimestamp() + createdTime := time.Now().Unix() scanner := bufio.NewScanner(resp.Body) scanner.Split(bufio.ScanLines) diff --git a/service/aiproxy/relay/adaptor/coze/main.go b/service/aiproxy/relay/adaptor/coze/main.go index be7ce381e7c..296769d2d8a 100644 --- a/service/aiproxy/relay/adaptor/coze/main.go +++ b/service/aiproxy/relay/adaptor/coze/main.go @@ -4,15 +4,14 @@ import ( "bufio" "net/http" "strings" - - json "github.com/json-iterator/go" - "github.com/labring/sealos/service/aiproxy/common/render" - "github.com/labring/sealos/service/aiproxy/middleware" + "time" "github.com/gin-gonic/gin" + json "github.com/json-iterator/go" "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/conv" - "github.com/labring/sealos/service/aiproxy/common/helper" + "github.com/labring/sealos/service/aiproxy/common/render" + "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/relay/adaptor/coze/constant/messagetype" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/model" @@ -100,7 +99,7 @@ func ResponseCoze2OpenAI(cozeResponse *Response) *openai.TextResponse { ID: "chatcmpl-" + cozeResponse.ConversationID, Model: "coze-bot", Object: "chat.completion", - Created: helper.GetTimestamp(), + Created: time.Now().Unix(), Choices: []*openai.TextResponseChoice{&choice}, } return &fullTextResponse @@ -112,7 +111,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC log := middleware.GetLogger(c) var responseText string - createdTime := helper.GetTimestamp() + createdTime := time.Now().Unix() scanner := bufio.NewScanner(resp.Body) scanner.Split(bufio.ScanLines) diff --git a/service/aiproxy/relay/adaptor/gemini/adaptor.go b/service/aiproxy/relay/adaptor/gemini/adaptor.go index 05686c38a39..f87003f4881 100644 --- a/service/aiproxy/relay/adaptor/gemini/adaptor.go +++ b/service/aiproxy/relay/adaptor/gemini/adaptor.go @@ -8,7 +8,6 @@ import ( "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common/config" - "github.com/labring/sealos/service/aiproxy/common/helper" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/meta" @@ -21,8 +20,15 @@ type Adaptor struct{} const baseURL = "https://generativelanguage.googleapis.com" +func AssignOrDefault(value string, defaultValue string) string { + if len(value) != 0 { + return value + } + return defaultValue +} + func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - version := helper.AssignOrDefault(meta.Channel.Config.APIVersion, config.GetGeminiVersion()) + version := AssignOrDefault(meta.Channel.Config.APIVersion, config.GetGeminiVersion()) var action string switch meta.Mode { case relaymode.Embeddings: diff --git a/service/aiproxy/relay/adaptor/gemini/main.go b/service/aiproxy/relay/adaptor/gemini/main.go index 9bb3dc6bd35..b4a28216b6e 100644 --- a/service/aiproxy/relay/adaptor/gemini/main.go +++ b/service/aiproxy/relay/adaptor/gemini/main.go @@ -8,25 +8,23 @@ import ( "io" "net/http" "strings" + "time" + "github.com/gin-gonic/gin" json "github.com/json-iterator/go" - "github.com/labring/sealos/service/aiproxy/common/conv" - "github.com/labring/sealos/service/aiproxy/common/render" - "github.com/labring/sealos/service/aiproxy/middleware" - "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" - "github.com/labring/sealos/service/aiproxy/common/helper" + "github.com/labring/sealos/service/aiproxy/common/conv" "github.com/labring/sealos/service/aiproxy/common/image" "github.com/labring/sealos/service/aiproxy/common/random" + "github.com/labring/sealos/service/aiproxy/common/render" + "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/constant" "github.com/labring/sealos/service/aiproxy/relay/meta" "github.com/labring/sealos/service/aiproxy/relay/model" "github.com/labring/sealos/service/aiproxy/relay/utils" log "github.com/sirupsen/logrus" - - "github.com/gin-gonic/gin" ) // https://ai.google.dev/docs/gemini_api_overview?hl=zh-cn @@ -207,7 +205,7 @@ func CountTokens(ctx context.Context, meta *meta.Meta, chat []ChatContent) (int, if err != nil { return 0, err } - version := helper.AssignOrDefault(meta.Channel.Config.APIVersion, config.GetGeminiVersion()) + version := AssignOrDefault(meta.Channel.Config.APIVersion, config.GetGeminiVersion()) u := meta.Channel.BaseURL if u == "" { u = baseURL @@ -295,7 +293,7 @@ func responseGeminiChat2OpenAI(response *ChatResponse) *openai.TextResponse { fullTextResponse := openai.TextResponse{ ID: "chatcmpl-" + random.GetUUID(), Object: "chat.completion", - Created: helper.GetTimestamp(), + Created: time.Now().Unix(), Choices: make([]*openai.TextResponseChoice, 0, len(response.Candidates)), } for i, candidate := range response.Candidates { @@ -327,7 +325,7 @@ func streamResponseGeminiChat2OpenAI(meta *meta.Meta, geminiResponse *ChatRespon // choice.FinishReason = &constant.StopFinishReason var response openai.ChatCompletionsStreamResponse response.ID = "chatcmpl-" + random.GetUUID() - response.Created = helper.GetTimestamp() + response.Created = time.Now().Unix() response.Object = "chat.completion.chunk" response.Model = meta.OriginModelName response.Choices = []*openai.ChatCompletionsStreamResponseChoice{&choice} diff --git a/service/aiproxy/relay/adaptor/ollama/main.go b/service/aiproxy/relay/adaptor/ollama/main.go index dc7a6c77011..2452afa038a 100644 --- a/service/aiproxy/relay/adaptor/ollama/main.go +++ b/service/aiproxy/relay/adaptor/ollama/main.go @@ -6,18 +6,16 @@ import ( "io" "net/http" "strings" + "time" + "github.com/gin-gonic/gin" json "github.com/json-iterator/go" + "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/conv" + "github.com/labring/sealos/service/aiproxy/common/image" + "github.com/labring/sealos/service/aiproxy/common/random" "github.com/labring/sealos/service/aiproxy/common/render" "github.com/labring/sealos/service/aiproxy/middleware" - - "github.com/labring/sealos/service/aiproxy/common/helper" - "github.com/labring/sealos/service/aiproxy/common/random" - - "github.com/gin-gonic/gin" - "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/image" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/constant" "github.com/labring/sealos/service/aiproxy/relay/meta" @@ -92,7 +90,7 @@ func responseOllama2OpenAI(response *ChatResponse) *openai.TextResponse { ID: "chatcmpl-" + random.GetUUID(), Model: response.Model, Object: "chat.completion", - Created: helper.GetTimestamp(), + Created: time.Now().Unix(), Choices: []*openai.TextResponseChoice{&choice}, Usage: relaymodel.Usage{ PromptTokens: response.PromptEvalCount, @@ -113,7 +111,7 @@ func streamResponseOllama2OpenAI(ollamaResponse *ChatResponse) *openai.ChatCompl response := openai.ChatCompletionsStreamResponse{ ID: "chatcmpl-" + random.GetUUID(), Object: "chat.completion.chunk", - Created: helper.GetTimestamp(), + Created: time.Now().Unix(), Model: ollamaResponse.Model, Choices: []*openai.ChatCompletionsStreamResponseChoice{&choice}, } diff --git a/service/aiproxy/relay/controller/helper.go b/service/aiproxy/relay/controller/helper.go index 8c850323c2c..ffbc50c44ae 100644 --- a/service/aiproxy/relay/controller/helper.go +++ b/service/aiproxy/relay/controller/helper.go @@ -8,10 +8,12 @@ import ( "io" "net/http" "sync" + "time" "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/balance" + "github.com/labring/sealos/service/aiproxy/common/config" "github.com/labring/sealos/service/aiproxy/common/conv" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" @@ -240,6 +242,16 @@ func DoHelper(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*relaymodel.U if err != nil { return nil, &detail, openai.ErrorWrapperWithMessage("get request url failed: "+err.Error(), "get_request_url_failed", http.StatusBadRequest) } + + timeout := config.GetTimeoutWithModelType()[meta.Mode] + if timeout > 0 { + rawRequest := c.Request + ctx, cancel := context.WithTimeout(rawRequest.Context(), time.Duration(timeout)*time.Second) + defer cancel() + c.Request = rawRequest.WithContext(ctx) + defer func() { c.Request = rawRequest }() + } + req, err := http.NewRequestWithContext(c.Request.Context(), c.Request.Method, fullRequestURL, body) if err != nil { return nil, &detail, openai.ErrorWrapperWithMessage("new request failed: "+err.Error(), "new_request_failed", http.StatusBadRequest) @@ -261,10 +273,13 @@ func DoHelper(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*relaymodel.U resp, err := a.DoRequest(meta, c, req) if err != nil { + if errors.Is(err, context.Canceled) { + return nil, &detail, openai.ErrorWrapperWithMessage("do request failed: request canceled by client", "request_canceled", http.StatusBadRequest) + } if errors.Is(err, context.DeadlineExceeded) { - return nil, &detail, openai.ErrorWrapperWithMessage("do request failed: "+err.Error(), "do_request_failed", http.StatusGatewayTimeout) + return nil, &detail, openai.ErrorWrapperWithMessage("do request failed: request timeout", "request_timeout", http.StatusGatewayTimeout) } - return nil, &detail, openai.ErrorWrapperWithMessage("do request failed: "+err.Error(), "do_request_failed", http.StatusBadRequest) + return nil, &detail, openai.ErrorWrapperWithMessage("do request failed: "+err.Error(), "request_failed", http.StatusBadRequest) } if isErrorHappened(resp) { From e63b0f9ac5e5f08c6508413078257aea2c83953f Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 19 Dec 2024 15:35:31 +0800 Subject: [PATCH 015/167] fix: retry channel exhausted, use first channel --- service/aiproxy/controller/relay.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/service/aiproxy/controller/relay.go b/service/aiproxy/controller/relay.go index 79d922e41c6..ff7ba4b7423 100644 --- a/service/aiproxy/controller/relay.go +++ b/service/aiproxy/controller/relay.go @@ -73,9 +73,6 @@ func Relay(c *gin.Context) { log.Errorf("get random satisfied channel failed: %+v", err) break } - if len(failedChannelIDs) != 1 { - break - } newChannel = channel } log.Warnf("using channel %s(%d) to retry (remain times %d)", newChannel.Name, newChannel.ID, i) From 5fe5b91143fbef1f2a8c7bf547a5012d392e200c Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 19 Dec 2024 23:25:20 +0800 Subject: [PATCH 016/167] feat: init monitor --- service/aiproxy/common/config/config.go | 34 +++-- service/aiproxy/controller/channel-billing.go | 4 +- service/aiproxy/controller/channel.go | 8 +- service/aiproxy/controller/relay.go | 30 +++- service/aiproxy/model/cache.go | 5 +- service/aiproxy/model/channel.go | 57 ++----- service/aiproxy/model/option.go | 3 - service/aiproxy/monitor/model.go | 139 ++++++++++++++++++ .../aiproxy/relay/adaptor/gemini/adaptor.go | 17 +-- .../aiproxy/relay/adaptor/gemini/constants.go | 8 +- service/aiproxy/relay/adaptor/gemini/main.go | 8 +- 11 files changed, 224 insertions(+), 89 deletions(-) create mode 100644 service/aiproxy/monitor/model.go diff --git a/service/aiproxy/common/config/config.go b/service/aiproxy/common/config/config.go index d2a59c0f9ad..f0ed206aa7c 100644 --- a/service/aiproxy/common/config/config.go +++ b/service/aiproxy/common/config/config.go @@ -31,6 +31,10 @@ var ( var ( // 重试次数 retryTimes atomic.Int64 + // 模型可重试的失败次数上限 + modelFailDisableTimes atomic.Int64 + // 模型禁用时间 + modelFailDisableTime atomic.Int64 // 模型类型超时时间,单位秒 timeoutWithModelType atomic.Value ) @@ -43,6 +47,22 @@ func SetRetryTimes(times int64) { retryTimes.Store(times) } +func GetModelFailDisableTimes() int64 { + return modelFailDisableTimes.Load() +} + +func SetModelFailDisableTimes(times int64) { + modelFailDisableTimes.Store(times) +} + +func GetModelFailDisableTime() int64 { + return modelFailDisableTime.Load() +} + +func SetModelFailDisableTime(time int64) { + modelFailDisableTime.Store(time) +} + func init() { timeoutWithModelType.Store(make(map[int]int64)) } @@ -153,14 +173,10 @@ func SetGroupMaxTokenNum(num int32) { groupMaxTokenNum.Store(num) } -var ( - geminiSafetySetting atomic.Value - geminiVersion atomic.Value -) +var geminiSafetySetting atomic.Value func init() { geminiSafetySetting.Store("BLOCK_NONE") - geminiVersion.Store("v1beta") } func GetGeminiSafetySetting() string { @@ -171,14 +187,6 @@ func SetGeminiSafetySetting(setting string) { geminiSafetySetting.Store(setting) } -func GetGeminiVersion() string { - return geminiVersion.Load().(string) -} - -func SetGeminiVersion(version string) { - geminiVersion.Store(version) -} - var billingEnabled atomic.Bool func init() { diff --git a/service/aiproxy/controller/channel-billing.go b/service/aiproxy/controller/channel-billing.go index bf6247be234..8ec189e05a9 100644 --- a/service/aiproxy/controller/channel-billing.go +++ b/service/aiproxy/controller/channel-billing.go @@ -48,7 +48,7 @@ func UpdateChannelBalance(c *gin.Context) { }) return } - channel, err := model.GetChannelByID(id, false) + channel, err := model.GetChannelByID(id) if err != nil { c.JSON(http.StatusOK, middleware.APIResponse{ Success: false, @@ -72,7 +72,7 @@ func UpdateChannelBalance(c *gin.Context) { } func updateAllChannelsBalance() error { - channels, err := model.GetAllChannels(false, false) + channels, err := model.GetAllChannels() if err != nil { return err } diff --git a/service/aiproxy/controller/channel.go b/service/aiproxy/controller/channel.go index 572a3a061d3..e1c706c3eca 100644 --- a/service/aiproxy/controller/channel.go +++ b/service/aiproxy/controller/channel.go @@ -35,7 +35,7 @@ func GetChannels(c *gin.Context) { channelType, _ := strconv.Atoi(c.Query("channel_type")) baseURL := c.Query("base_url") order := c.Query("order") - channels, total, err := model.GetChannels(p*perPage, perPage, false, false, id, name, key, channelType, baseURL, order) + channels, total, err := model.GetChannels(p*perPage, perPage, id, name, key, channelType, baseURL, order) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return @@ -47,7 +47,7 @@ func GetChannels(c *gin.Context) { } func GetAllChannels(c *gin.Context) { - channels, err := model.GetAllChannels(false, false) + channels, err := model.GetAllChannels() if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return @@ -93,7 +93,7 @@ func SearchChannels(c *gin.Context) { channelType, _ := strconv.Atoi(c.Query("channel_type")) baseURL := c.Query("base_url") order := c.Query("order") - channels, total, err := model.SearchChannels(keyword, p*perPage, perPage, false, false, id, name, key, channelType, baseURL, order) + channels, total, err := model.SearchChannels(keyword, p*perPage, perPage, id, name, key, channelType, baseURL, order) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return @@ -110,7 +110,7 @@ func GetChannel(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - channel, err := model.GetChannelByID(id, false) + channel, err := model.GetChannelByID(id) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return diff --git a/service/aiproxy/controller/relay.go b/service/aiproxy/controller/relay.go index ff7ba4b7423..60c7ee8966c 100644 --- a/service/aiproxy/controller/relay.go +++ b/service/aiproxy/controller/relay.go @@ -5,6 +5,7 @@ import ( "errors" "io" "net/http" + "time" "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common" @@ -12,6 +13,7 @@ import ( "github.com/labring/sealos/service/aiproxy/common/ctxkey" "github.com/labring/sealos/service/aiproxy/middleware" dbmodel "github.com/labring/sealos/service/aiproxy/model" + "github.com/labring/sealos/service/aiproxy/monitor" "github.com/labring/sealos/service/aiproxy/relay/controller" "github.com/labring/sealos/service/aiproxy/relay/meta" "github.com/labring/sealos/service/aiproxy/relay/model" @@ -43,7 +45,18 @@ func Relay(c *gin.Context) { log := middleware.GetLogger(c) requestModel := c.MustGet(string(ctxkey.OriginalModel)).(string) - channel, err := dbmodel.CacheGetRandomSatisfiedChannel(requestModel) + + ids, err := monitor.GetChannelsWithErrors(c.Request.Context(), requestModel, 10*time.Minute, 1) + if err != nil { + log.Errorf("get channels with errors failed: %+v", err) + } + + failedChannelIDs := []int{} + for _, id := range ids { + failedChannelIDs = append(failedChannelIDs, int(id)) + } + + channel, err := dbmodel.CacheGetRandomSatisfiedChannel(requestModel, failedChannelIDs...) if err != nil { c.JSON(http.StatusServiceUnavailable, gin.H{ "error": &model.Error{ @@ -58,17 +71,28 @@ func Relay(c *gin.Context) { meta := middleware.NewMetaByContext(c, channel) bizErr := relayHelper(meta, c) if bizErr == nil { + err = monitor.ClearChannelErrors(c.Request.Context(), requestModel, channel.ID) + if err != nil { + log.Errorf("clear channel errors failed: %+v", err) + } return } - failedChannelIDs := []int{channel.ID} + failedChannelIDs = append(failedChannelIDs, channel.ID) requestID := c.GetString(ctxkey.RequestID) var retryTimes int64 if shouldRetry(c, bizErr.StatusCode) { + err = monitor.AddError(c.Request.Context(), requestModel, int64(channel.ID), 10*time.Second) + if err != nil { + log.Errorf("add error failed: %+v", err) + } retryTimes = config.GetRetryTimes() } for i := retryTimes; i > 0; i-- { - newChannel, err := dbmodel.CacheGetRandomSatisfiedChannel(meta.OriginModelName, failedChannelIDs...) + newChannel, err := dbmodel.CacheGetRandomSatisfiedChannel(requestModel, failedChannelIDs...) if err != nil { + if errors.Is(err, dbmodel.ErrChannelsNotFound) { + break + } if !errors.Is(err, dbmodel.ErrChannelsExhausted) { log.Errorf("get random satisfied channel failed: %+v", err) break diff --git a/service/aiproxy/model/cache.go b/service/aiproxy/model/cache.go index 41bc423540d..0a7b2e3bc90 100644 --- a/service/aiproxy/model/cache.go +++ b/service/aiproxy/model/cache.go @@ -390,7 +390,7 @@ func InitChannelCache() error { func LoadEnabledChannels() ([]*Channel, error) { var channels []*Channel - err := DB.Where("status = ?", ChannelStatusEnabled).Find(&channels).Error + err := DB.Where("status = ? or status = ?", ChannelStatusEnabled, ChannelStatusFail).Find(&channels).Error if err != nil { return nil, err } @@ -596,6 +596,9 @@ func SyncChannelCache(ctx context.Context, wg *sync.WaitGroup, frequency time.Du func filterChannels(channels []*Channel, ignoreChannel ...int) []*Channel { filtered := make([]*Channel, 0) for _, channel := range channels { + if channel.Status != ChannelStatusEnabled { + continue + } if slices.Contains(ignoreChannel, channel.ID) { continue } diff --git a/service/aiproxy/model/channel.go b/service/aiproxy/model/channel.go index 4a04d7258d1..97a2563a58e 100644 --- a/service/aiproxy/model/channel.go +++ b/service/aiproxy/model/channel.go @@ -17,10 +17,10 @@ const ( ) const ( - ChannelStatusUnknown = 0 - ChannelStatusEnabled = 1 // don't use 0, 0 is the default value! - ChannelStatusManuallyDisabled = 2 // also don't use 0 - ChannelStatusAutoDisabled = 3 + ChannelStatusUnknown = 0 + ChannelStatusEnabled = 1 // don't use 0, 0 is the default value! + ChannelStatusDisabled = 2 // also don't use 0 + ChannelStatusFail = 3 ) type Channel struct { @@ -146,29 +146,19 @@ type ChannelConfig struct { SK string `json:"sk,omitempty"` AK string `json:"ak,omitempty"` UserID string `json:"user_id,omitempty"` - APIVersion string `json:"api_version,omitempty"` Plugin string `json:"plugin,omitempty"` VertexAIProjectID string `json:"vertex_ai_project_id,omitempty"` VertexAIADC string `json:"vertex_ai_adc,omitempty"` } -func GetAllChannels(onlyDisabled bool, omitKey bool) (channels []*Channel, err error) { +func GetAllChannels() (channels []*Channel, err error) { tx := DB.Model(&Channel{}) - if onlyDisabled { - tx = tx.Where("status = ? or status = ?", ChannelStatusAutoDisabled, ChannelStatusManuallyDisabled) - } - if omitKey { - tx = tx.Omit("key") - } err = tx.Order("id desc").Find(&channels).Error return channels, err } -func GetChannels(startIdx int, num int, onlyDisabled bool, omitKey bool, id int, name string, key string, channelType int, baseURL string, order string) (channels []*Channel, total int64, err error) { +func GetChannels(startIdx int, num int, id int, name string, key string, channelType int, baseURL string, order string) (channels []*Channel, total int64, err error) { tx := DB.Model(&Channel{}) - if onlyDisabled { - tx = tx.Where("status = ? or status = ?", ChannelStatusAutoDisabled, ChannelStatusManuallyDisabled) - } if id != 0 { tx = tx.Where("id = ?", id) } @@ -188,9 +178,6 @@ func GetChannels(startIdx int, num int, onlyDisabled bool, omitKey bool, id int, if err != nil { return nil, 0, err } - if omitKey { - tx = tx.Omit("key") - } if total <= 0 { return nil, 0, nil } @@ -198,11 +185,8 @@ func GetChannels(startIdx int, num int, onlyDisabled bool, omitKey bool, id int, return channels, total, err } -func SearchChannels(keyword string, startIdx int, num int, onlyDisabled bool, omitKey bool, id int, name string, key string, channelType int, baseURL string, order string) (channels []*Channel, total int64, err error) { +func SearchChannels(keyword string, startIdx int, num int, id int, name string, key string, channelType int, baseURL string, order string) (channels []*Channel, total int64, err error) { tx := DB.Model(&Channel{}) - if onlyDisabled { - tx = tx.Where("status = ? or status = ?", ChannelStatusAutoDisabled, ChannelStatusManuallyDisabled) - } // Handle exact match conditions for non-zero values if id != 0 { @@ -268,9 +252,6 @@ func SearchChannels(keyword string, startIdx int, num int, onlyDisabled bool, om if err != nil { return nil, 0, err } - if omitKey { - tx = tx.Omit("key") - } if total <= 0 { return nil, 0, nil } @@ -278,14 +259,9 @@ func SearchChannels(keyword string, startIdx int, num int, onlyDisabled bool, om return channels, total, err } -func GetChannelByID(id int, omitKey bool) (*Channel, error) { +func GetChannelByID(id int) (*Channel, error) { channel := Channel{ID: id} - var err error - if omitKey { - err = DB.Omit("key").First(&channel, "id = ?", id).Error - } else { - err = DB.First(&channel, "id = ?", id).Error - } + err := DB.First(&channel, "id = ?", id).Error return &channel, HandleNotFound(err, ErrChannelNotFound) } @@ -372,14 +348,6 @@ func UpdateChannelStatusByID(id int, status int) error { return HandleUpdateResult(result, ErrChannelNotFound) } -func DisableChannelByID(id int) error { - return UpdateChannelStatusByID(id, ChannelStatusAutoDisabled) -} - -func EnableChannelByID(id int) error { - return UpdateChannelStatusByID(id, ChannelStatusEnabled) -} - func UpdateChannelUsedAmount(id int, amount float64, requestCount int) error { result := DB.Model(&Channel{}).Where("id = ?", id).Updates(map[string]interface{}{ "used_amount": gorm.Expr("used_amount + ?", amount), @@ -390,6 +358,11 @@ func UpdateChannelUsedAmount(id int, amount float64, requestCount int) error { } func DeleteDisabledChannel() error { - result := DB.Where("status = ? or status = ?", ChannelStatusAutoDisabled, ChannelStatusManuallyDisabled).Delete(&Channel{}) + result := DB.Where("status = ?", ChannelStatusDisabled).Delete(&Channel{}) + return HandleUpdateResult(result, ErrChannelNotFound) +} + +func DeleteFailChannel() error { + result := DB.Where("status = ?", ChannelStatusFail).Delete(&Channel{}) return HandleUpdateResult(result, ErrChannelNotFound) } diff --git a/service/aiproxy/model/option.go b/service/aiproxy/model/option.go index cac4695f185..f4e47cc707a 100644 --- a/service/aiproxy/model/option.go +++ b/service/aiproxy/model/option.go @@ -46,7 +46,6 @@ func InitOption2DB() error { defaultChannelModelMappingJSON, _ := json.Marshal(config.GetDefaultChannelModelMapping()) OptionMap["DefaultChannelModelMapping"] = conv.BytesToString(defaultChannelModelMappingJSON) OptionMap["GeminiSafetySetting"] = config.GetGeminiSafetySetting() - OptionMap["GeminiVersion"] = config.GetGeminiVersion() OptionMap["GroupMaxTokenNum"] = strconv.FormatInt(int64(config.GetGroupMaxTokenNum()), 10) err := loadOptionsFromDatabase(true) if err != nil { @@ -170,8 +169,6 @@ func updateOption(key string, value string, isInit bool) (err error) { config.SetGroupMaxTokenNum(int32(groupMaxTokenNum)) case "GeminiSafetySetting": config.SetGeminiSafetySetting(value) - case "GeminiVersion": - config.SetGeminiVersion(value) case "GlobalApiRateLimitNum": globalAPIRateLimitNum, err := strconv.ParseInt(value, 10, 64) if err != nil { diff --git a/service/aiproxy/monitor/model.go b/service/aiproxy/monitor/model.go new file mode 100644 index 00000000000..267e74f9cb5 --- /dev/null +++ b/service/aiproxy/monitor/model.go @@ -0,0 +1,139 @@ +package monitor + +import ( + "context" + "time" + + "github.com/labring/sealos/service/aiproxy/common" + "github.com/redis/go-redis/v9" +) + +// 每个channelID使用单独的list存储错误时间戳 +// 使用hash存储每个channelID的错误计数 +var addErrorScript = redis.NewScript(` + local model = KEYS[1] + local channel_id = ARGV[1] + local error_time_to_live = tonumber(ARGV[2]) + local channel_errors_key = "model:" .. model .. ":channel:" .. channel_id .. ":errors" + local counts_key = "model:" .. model .. ":counts" + local now = redis.call("TIME") + local now_ms = tonumber(now[1]) * 1000 + math.floor(tonumber(now[2])/1000) + + -- 清理过期数据 + local expired_time = now_ms - error_time_to_live + local expired_count = 0 + local timestamps = redis.call("LRANGE", channel_errors_key, 0, -1) + for i = #timestamps, 1, -1 do + if tonumber(timestamps[i]) < expired_time then + redis.call("LREM", channel_errors_key, 1, timestamps[i]) + expired_count = expired_count + 1 + end + end + + if expired_count > 0 then + local count = redis.call("HGET", counts_key, channel_id) + if count then + count = tonumber(count) + if count > expired_count then + redis.call("HINCRBY", counts_key, channel_id, -expired_count) + else + redis.call("HDEL", counts_key, channel_id) + end + end + end + + -- 添加新的错误记录 + redis.call("LPUSH", channel_errors_key, now_ms) + redis.call("HINCRBY", counts_key, channel_id, 1) + + -- 设置过期时间 + redis.call("PEXPIRE", channel_errors_key, error_time_to_live) + redis.call("PEXPIRE", counts_key, error_time_to_live) + + return redis.status_reply("ok") +`) + +func AddError(ctx context.Context, model string, channelID int64, errorTimeToLive time.Duration) error { + if !common.RedisEnabled { + return nil + } + return addErrorScript.Run(ctx, common.RDB, []string{model}, channelID, errorTimeToLive.Milliseconds()).Err() +} + +var getChannelsWithErrorsScript = redis.NewScript(` + local model = KEYS[1] + local error_time_to_live = tonumber(ARGV[1]) + local max_errors = tonumber(ARGV[2]) + local counts_key = "model:" .. model .. ":counts" + local now = redis.call("TIME") + local now_ms = tonumber(now[1]) * 1000 + math.floor(tonumber(now[2])/1000) + local expired_time = now_ms - error_time_to_live + + -- 获取所有channel + local counts = redis.call("HGETALL", counts_key) + local result = {} + + -- 遍历每个channel,清理过期数据并检查错误数 + for i = 1, #counts, 2 do + local channel_id = counts[i] + local channel_errors_key = "model:" .. model .. ":channel:" .. channel_id .. ":errors" + + -- 清理过期数据 + local expired_count = 0 + local timestamps = redis.call("LRANGE", channel_errors_key, 0, -1) + for j = #timestamps, 1, -1 do + if tonumber(timestamps[j]) < expired_time then + redis.call("LREM", channel_errors_key, 1, timestamps[j]) + expired_count = expired_count + 1 + end + end + + -- 更新错误计数 + local count = tonumber(counts[i + 1]) + if expired_count > 0 then + if count > expired_count then + count = count - expired_count + redis.call("HINCRBY", counts_key, channel_id, -expired_count) + else + count = 0 + redis.call("HDEL", counts_key, channel_id) + end + end + + if count >= max_errors then + table.insert(result, channel_id) + end + end + + return result +`) + +func GetChannelsWithErrors(ctx context.Context, model string, errorTimeToLive time.Duration, maxErrors int64) ([]int64, error) { + if !common.RedisEnabled { + return nil, nil + } + result, err := getChannelsWithErrorsScript.Run(ctx, common.RDB, []string{model}, errorTimeToLive.Milliseconds(), maxErrors).Int64Slice() + if err != nil { + return nil, err + } + return result, nil +} + +var clearChannelErrorsScript = redis.NewScript(` + local model = KEYS[1] + local channel_id = ARGV[1] + local channel_errors_key = "model:" .. model .. ":channel:" .. channel_id .. ":errors" + local counts_key = "model:" .. model .. ":counts" + + redis.call("DEL", channel_errors_key) + redis.call("HDEL", counts_key, channel_id) + + return redis.status_reply("ok") +`) + +func ClearChannelErrors(ctx context.Context, model string, channelID int) error { + if !common.RedisEnabled { + return nil + } + return clearChannelErrorsScript.Run(ctx, common.RDB, []string{model}, channelID).Err() +} diff --git a/service/aiproxy/relay/adaptor/gemini/adaptor.go b/service/aiproxy/relay/adaptor/gemini/adaptor.go index f87003f4881..c2964c1809b 100644 --- a/service/aiproxy/relay/adaptor/gemini/adaptor.go +++ b/service/aiproxy/relay/adaptor/gemini/adaptor.go @@ -7,7 +7,6 @@ import ( "net/http" "github.com/gin-gonic/gin" - "github.com/labring/sealos/service/aiproxy/common/config" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/meta" @@ -20,15 +19,15 @@ type Adaptor struct{} const baseURL = "https://generativelanguage.googleapis.com" -func AssignOrDefault(value string, defaultValue string) string { - if len(value) != 0 { - return value +func getRequestURL(meta *meta.Meta, action string) string { + u := meta.Channel.BaseURL + if u == "" { + u = baseURL } - return defaultValue + return fmt.Sprintf("%s/%s/models/%s:%s", u, "v1beta", meta.ActualModelName, action) } func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - version := AssignOrDefault(meta.Channel.Config.APIVersion, config.GetGeminiVersion()) var action string switch meta.Mode { case relaymode.Embeddings: @@ -40,11 +39,7 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { if meta.GetBool("stream") { action = "streamGenerateContent?alt=sse" } - u := meta.Channel.BaseURL - if u == "" { - u = baseURL - } - return fmt.Sprintf("%s/%s/models/%s:%s", u, version, meta.ActualModelName, action), nil + return getRequestURL(meta, action), nil } func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error { diff --git a/service/aiproxy/relay/adaptor/gemini/constants.go b/service/aiproxy/relay/adaptor/gemini/constants.go index 2988ca8f719..fc6b1354a5b 100644 --- a/service/aiproxy/relay/adaptor/gemini/constants.go +++ b/service/aiproxy/relay/adaptor/gemini/constants.go @@ -14,25 +14,27 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerGoogle, }, { - Model: "gemini-1.0-pro", + Model: "gemini-1.5-flash", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerGoogle, }, { - Model: "gemini-1.5-flash", + Model: "gemini-1.5-pro", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerGoogle, }, { - Model: "gemini-1.5-pro", + Model: "gemini-2.0-flash-exp", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerGoogle, }, + { Model: "text-embedding-004", Type: relaymode.Embeddings, Owner: model.ModelOwnerGoogle, }, + { Model: "aqa", Type: relaymode.ChatCompletions, diff --git a/service/aiproxy/relay/adaptor/gemini/main.go b/service/aiproxy/relay/adaptor/gemini/main.go index b4a28216b6e..47fcc86018b 100644 --- a/service/aiproxy/relay/adaptor/gemini/main.go +++ b/service/aiproxy/relay/adaptor/gemini/main.go @@ -205,13 +205,7 @@ func CountTokens(ctx context.Context, meta *meta.Meta, chat []ChatContent) (int, if err != nil { return 0, err } - version := AssignOrDefault(meta.Channel.Config.APIVersion, config.GetGeminiVersion()) - u := meta.Channel.BaseURL - if u == "" { - u = baseURL - } - countURL := fmt.Sprintf("%s/%s/models/%s:countTokens", u, version, meta.ActualModelName) - req, err := http.NewRequestWithContext(ctx, http.MethodPost, countURL, bytes.NewReader(countData)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, getRequestURL(meta, "countTokens"), bytes.NewReader(countData)) if err != nil { return 0, err } From 62b53fb947e225c5326e8f014a5a91fc35f85e3d Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 20 Dec 2024 13:33:56 +0800 Subject: [PATCH 017/167] feat: auto ban error rate and auto test unban --- service/aiproxy/common/config/config.go | 27 +-- service/aiproxy/controller/channel-test.go | 42 ++++- service/aiproxy/controller/channel.go | 10 ++ service/aiproxy/controller/relay.go | 26 +-- service/aiproxy/main.go | 13 ++ service/aiproxy/model/option.go | 7 + service/aiproxy/monitor/model.go | 196 ++++++++++++--------- 7 files changed, 202 insertions(+), 119 deletions(-) diff --git a/service/aiproxy/common/config/config.go b/service/aiproxy/common/config/config.go index f0ed206aa7c..adb87d7ba89 100644 --- a/service/aiproxy/common/config/config.go +++ b/service/aiproxy/common/config/config.go @@ -1,6 +1,7 @@ package config import ( + "math" "os" "slices" "strconv" @@ -31,10 +32,8 @@ var ( var ( // 重试次数 retryTimes atomic.Int64 - // 模型可重试的失败次数上限 - modelFailDisableTimes atomic.Int64 - // 模型禁用时间 - modelFailDisableTime atomic.Int64 + // 模型错误率自动封禁 + modelErrorAutoBanRate = math.Float64bits(0.5) // 模型类型超时时间,单位秒 timeoutWithModelType atomic.Value ) @@ -43,24 +42,16 @@ func GetRetryTimes() int64 { return retryTimes.Load() } -func SetRetryTimes(times int64) { - retryTimes.Store(times) -} - -func GetModelFailDisableTimes() int64 { - return modelFailDisableTimes.Load() +func GetModelErrorAutoBanRate() float64 { + return math.Float64frombits(atomic.LoadUint64(&modelErrorAutoBanRate)) } -func SetModelFailDisableTimes(times int64) { - modelFailDisableTimes.Store(times) +func SetModelErrorAutoBanRate(rate float64) { + atomic.StoreUint64(&modelErrorAutoBanRate, math.Float64bits(rate)) } -func GetModelFailDisableTime() int64 { - return modelFailDisableTime.Load() -} - -func SetModelFailDisableTime(time int64) { - modelFailDisableTime.Store(time) +func SetRetryTimes(times int64) { + retryTimes.Store(times) } func init() { diff --git a/service/aiproxy/controller/channel-test.go b/service/aiproxy/controller/channel-test.go index 50ce196aae0..efea3dca13d 100644 --- a/service/aiproxy/controller/channel-test.go +++ b/service/aiproxy/controller/channel-test.go @@ -1,6 +1,7 @@ package controller import ( + "context" "errors" "fmt" "io" @@ -19,6 +20,7 @@ import ( "github.com/labring/sealos/service/aiproxy/common/render" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" + "github.com/labring/sealos/service/aiproxy/monitor" "github.com/labring/sealos/service/aiproxy/relay/meta" "github.com/labring/sealos/service/aiproxy/relay/utils" log "github.com/sirupsen/logrus" @@ -51,11 +53,17 @@ func testSingleModel(channel *model.Channel, modelName string) (*model.ChannelTe meta.WithChannelTest(true), ) bizErr := relayHelper(meta, newc) + success := bizErr == nil var respStr string var code int - if bizErr == nil { + if success { respStr = w.Body.String() code = w.Code + log.Infof("model %s(%d) test success, unban it", modelName, channel.ID) + err := monitor.ClearChannelModelErrors(context.Background(), modelName, channel.ID) + if err != nil { + log.Errorf("clear channel errors failed: %+v", err) + } } else { respStr = bizErr.String() code = bizErr.StatusCode @@ -67,7 +75,7 @@ func testSingleModel(channel *model.Channel, modelName string) (*model.ChannelTe meta.ActualModelName, meta.Mode, time.Since(meta.RequestAt).Seconds(), - bizErr == nil, + success, respStr, code, ) @@ -349,3 +357,33 @@ func TestAllChannels(c *gin.Context) { }) } } + +func AutoTestBannedModels() { + log := log.WithFields(log.Fields{ + "auto_test_banned_models": "true", + }) + channels, err := monitor.GetAllBannedChannels(context.Background()) + if err != nil { + log.Errorf("failed to get banned channels: %s", err.Error()) + } + if len(channels) == 0 { + return + } + + for modelName, ids := range channels { + for _, id := range ids { + channel, err := model.LoadChannelByID(int(id)) + if err != nil { + log.Errorf("failed to get channel by model %s: %s", modelName, err.Error()) + continue + } + result, err := testSingleModel(channel, modelName) + if err != nil { + log.Errorf("failed to test channel %s(%d) model %s: %s", channel.Name, channel.ID, modelName, err.Error()) + } + if !result.Success { + log.Infof("model %s(%d) test failed", modelName, channel.ID) + } + } + } +} diff --git a/service/aiproxy/controller/channel.go b/service/aiproxy/controller/channel.go index e1c706c3eca..a4c33004dbd 100644 --- a/service/aiproxy/controller/channel.go +++ b/service/aiproxy/controller/channel.go @@ -10,7 +10,9 @@ import ( "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" + "github.com/labring/sealos/service/aiproxy/monitor" "github.com/labring/sealos/service/aiproxy/relay/channeltype" + log "github.com/sirupsen/logrus" ) func ChannelTypeNames(c *gin.Context) { @@ -223,6 +225,10 @@ func UpdateChannel(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } + err = monitor.ClearChannelAllModelErrors(c.Request.Context(), id) + if err != nil { + log.Errorf("failed to clear channel all model errors: %+v", err) + } middleware.SuccessResponse(c, ch) } @@ -243,5 +249,9 @@ func UpdateChannelStatus(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } + err = monitor.ClearChannelAllModelErrors(c.Request.Context(), id) + if err != nil { + log.Errorf("failed to clear channel all model errors: %+v", err) + } middleware.SuccessResponse(c, nil) } diff --git a/service/aiproxy/controller/relay.go b/service/aiproxy/controller/relay.go index 60c7ee8966c..5785ed369b5 100644 --- a/service/aiproxy/controller/relay.go +++ b/service/aiproxy/controller/relay.go @@ -5,7 +5,6 @@ import ( "errors" "io" "net/http" - "time" "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common" @@ -18,6 +17,7 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/meta" "github.com/labring/sealos/service/aiproxy/relay/model" "github.com/labring/sealos/service/aiproxy/relay/relaymode" + log "github.com/sirupsen/logrus" ) // https://platform.openai.com/docs/api-reference/chat @@ -41,14 +41,22 @@ func relayHelper(meta *meta.Meta, c *gin.Context) *model.ErrorWithStatusCode { } } +func RelayHelper(meta *meta.Meta, c *gin.Context) *model.ErrorWithStatusCode { + err := relayHelper(meta, c) + if err := monitor.AddRequest(c.Request.Context(), meta.OriginModelName, int64(meta.Channel.ID), err != nil); err != nil { + log.Errorf("add request failed: %+v", err) + } + return err +} + func Relay(c *gin.Context) { log := middleware.GetLogger(c) requestModel := c.MustGet(string(ctxkey.OriginalModel)).(string) - ids, err := monitor.GetChannelsWithErrors(c.Request.Context(), requestModel, 10*time.Minute, 1) + ids, err := monitor.GetBannedChannels(c.Request.Context(), requestModel) if err != nil { - log.Errorf("get channels with errors failed: %+v", err) + log.Errorf("get %s auto banned channels failed: %+v", requestModel, err) } failedChannelIDs := []int{} @@ -69,22 +77,14 @@ func Relay(c *gin.Context) { } meta := middleware.NewMetaByContext(c, channel) - bizErr := relayHelper(meta, c) + bizErr := RelayHelper(meta, c) if bizErr == nil { - err = monitor.ClearChannelErrors(c.Request.Context(), requestModel, channel.ID) - if err != nil { - log.Errorf("clear channel errors failed: %+v", err) - } return } failedChannelIDs = append(failedChannelIDs, channel.ID) requestID := c.GetString(ctxkey.RequestID) var retryTimes int64 if shouldRetry(c, bizErr.StatusCode) { - err = monitor.AddError(c.Request.Context(), requestModel, int64(channel.ID), 10*time.Second) - if err != nil { - log.Errorf("add error failed: %+v", err) - } retryTimes = config.GetRetryTimes() } for i := retryTimes; i > 0; i-- { @@ -107,7 +107,7 @@ func Relay(c *gin.Context) { } c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody)) meta.Reset(newChannel) - bizErr = relayHelper(meta, c) + bizErr = RelayHelper(meta, c) if bizErr == nil { return } diff --git a/service/aiproxy/main.go b/service/aiproxy/main.go index 00392c9a7c5..c2a3ce8d401 100644 --- a/service/aiproxy/main.go +++ b/service/aiproxy/main.go @@ -18,6 +18,7 @@ import ( "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/balance" "github.com/labring/sealos/service/aiproxy/common/config" + "github.com/labring/sealos/service/aiproxy/controller" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" relaycontroller "github.com/labring/sealos/service/aiproxy/relay/controller" @@ -137,6 +138,16 @@ func setupHTTPServer() (*http.Server, *gin.Engine) { }, server } +func autoTestBannedModels() { + log.Info("auto test banned models start") + ticker := time.NewTicker(time.Second * 15) + defer ticker.Stop() + + for range ticker.C { + controller.AutoTestBannedModels() + } +} + func main() { if err := initializeServices(); err != nil { log.Fatal("failed to initialize services: " + err.Error()) @@ -163,6 +174,8 @@ func main() { } }() + go autoTestBannedModels() + <-ctx.Done() log.Info("shutting down server...") log.Info("max wait time: 120s") diff --git a/service/aiproxy/model/option.go b/service/aiproxy/model/option.go index f4e47cc707a..9b9b4070fb7 100644 --- a/service/aiproxy/model/option.go +++ b/service/aiproxy/model/option.go @@ -38,6 +38,7 @@ func InitOption2DB() error { OptionMap["ApproximateTokenEnabled"] = strconv.FormatBool(config.GetApproximateTokenEnabled()) OptionMap["BillingEnabled"] = strconv.FormatBool(config.GetBillingEnabled()) OptionMap["RetryTimes"] = strconv.FormatInt(config.GetRetryTimes(), 10) + OptionMap["ModelErrorAutoBanRate"] = strconv.FormatFloat(config.GetModelErrorAutoBanRate(), 'f', -1, 64) timeoutWithModelTypeJSON, _ := json.Marshal(config.GetTimeoutWithModelType()) OptionMap["TimeoutWithModelType"] = conv.BytesToString(timeoutWithModelTypeJSON) OptionMap["GlobalApiRateLimitNum"] = strconv.FormatInt(config.GetGlobalAPIRateLimitNum(), 10) @@ -226,6 +227,12 @@ func updateOption(key string, value string, isInit bool) (err error) { return err } config.SetRetryTimes(retryTimes) + case "ModelErrorAutoBanRate": + modelErrorAutoBanRate, err := strconv.ParseFloat(value, 64) + if err != nil { + return err + } + config.SetModelErrorAutoBanRate(modelErrorAutoBanRate) case "TimeoutWithModelType": var newTimeoutWithModelType map[int]int64 err := json.Unmarshal(conv.StringToBytes(value), &newTimeoutWithModelType) diff --git a/service/aiproxy/monitor/model.go b/service/aiproxy/monitor/model.go index 267e74f9cb5..fc14b3f4fb9 100644 --- a/service/aiproxy/monitor/model.go +++ b/service/aiproxy/monitor/model.go @@ -2,138 +2,162 @@ package monitor import ( "context" + "strings" "time" "github.com/labring/sealos/service/aiproxy/common" + "github.com/labring/sealos/service/aiproxy/common/config" "github.com/redis/go-redis/v9" ) -// 每个channelID使用单独的list存储错误时间戳 -// 使用hash存储每个channelID的错误计数 -var addErrorScript = redis.NewScript(` +// 使用set存储被永久禁用的channelID +var addRequestScript = redis.NewScript(` local model = KEYS[1] local channel_id = ARGV[1] local error_time_to_live = tonumber(ARGV[2]) - local channel_errors_key = "model:" .. model .. ":channel:" .. channel_id .. ":errors" - local counts_key = "model:" .. model .. ":counts" - local now = redis.call("TIME") - local now_ms = tonumber(now[1]) * 1000 + math.floor(tonumber(now[2])/1000) + local max_error_rate = tonumber(ARGV[3]) + local is_error = tonumber(ARGV[4]) + local ban_time = tonumber(ARGV[5]) + local banned_key = "model:" .. model .. ":banned" - -- 清理过期数据 + if redis.call("SISMEMBER", banned_key, channel_id) == 1 then + return redis.status_reply("ok") + end + + local now_ms = redis.call("TIME")[1] * 1000 + math.floor(redis.call("TIME")[2]/1000) local expired_time = now_ms - error_time_to_live - local expired_count = 0 - local timestamps = redis.call("LRANGE", channel_errors_key, 0, -1) - for i = #timestamps, 1, -1 do - if tonumber(timestamps[i]) < expired_time then - redis.call("LREM", channel_errors_key, 1, timestamps[i]) - expired_count = expired_count + 1 + local channel_requests_key = "model:" .. model .. ":channel:" .. channel_id .. ":requests" + + redis.call("ZREMRANGEBYSCORE", channel_requests_key, 0, expired_time) + + local request_data = string.format("%d:%d", now_ms, is_error) + redis.call("ZADD", channel_requests_key, now_ms, request_data) + redis.call("PEXPIRE", channel_requests_key, error_time_to_live) + + local total_count = redis.call("ZCARD", channel_requests_key) + if total_count >= 5 then + local error_count = 0 + local requests = redis.call("ZRANGE", channel_requests_key, 0, -1) + for _, request in ipairs(requests) do + local _, status = string.match(request, "(%d+):(%d+)") + if tonumber(status) == 1 then + error_count = error_count + 1 + end end - end + local error_rate = error_count / total_count - if expired_count > 0 then - local count = redis.call("HGET", counts_key, channel_id) - if count then - count = tonumber(count) - if count > expired_count then - redis.call("HINCRBY", counts_key, channel_id, -expired_count) - else - redis.call("HDEL", counts_key, channel_id) + if error_rate >= max_error_rate then + redis.call("SADD", banned_key, channel_id) + if ban_time > 0 then + redis.call("PEXPIRE", banned_key, ban_time) end + redis.call("DEL", channel_requests_key) end end - -- 添加新的错误记录 - redis.call("LPUSH", channel_errors_key, now_ms) - redis.call("HINCRBY", counts_key, channel_id, 1) - - -- 设置过期时间 - redis.call("PEXPIRE", channel_errors_key, error_time_to_live) - redis.call("PEXPIRE", counts_key, error_time_to_live) - return redis.status_reply("ok") `) -func AddError(ctx context.Context, model string, channelID int64, errorTimeToLive time.Duration) error { +func AddRequest(ctx context.Context, model string, channelID int64, isError bool) error { if !common.RedisEnabled { return nil } - return addErrorScript.Run(ctx, common.RDB, []string{model}, channelID, errorTimeToLive.Milliseconds()).Err() + errorFlag := 0 + if isError { + errorFlag = 1 + } + live := 60 * time.Second + banTime := 4 * live + return addRequestScript.Run( + ctx, + common.RDB, + []string{model}, + channelID, + live.Milliseconds(), + config.GetModelErrorAutoBanRate(), + errorFlag, + banTime.Milliseconds()).Err() } -var getChannelsWithErrorsScript = redis.NewScript(` +var getBannedChannelsScript = redis.NewScript(` local model = KEYS[1] - local error_time_to_live = tonumber(ARGV[1]) - local max_errors = tonumber(ARGV[2]) - local counts_key = "model:" .. model .. ":counts" - local now = redis.call("TIME") - local now_ms = tonumber(now[1]) * 1000 + math.floor(tonumber(now[2])/1000) - local expired_time = now_ms - error_time_to_live - - -- 获取所有channel - local counts = redis.call("HGETALL", counts_key) - local result = {} + local banned_key = "model:" .. model .. ":banned" - -- 遍历每个channel,清理过期数据并检查错误数 - for i = 1, #counts, 2 do - local channel_id = counts[i] - local channel_errors_key = "model:" .. model .. ":channel:" .. channel_id .. ":errors" - - -- 清理过期数据 - local expired_count = 0 - local timestamps = redis.call("LRANGE", channel_errors_key, 0, -1) - for j = #timestamps, 1, -1 do - if tonumber(timestamps[j]) < expired_time then - redis.call("LREM", channel_errors_key, 1, timestamps[j]) - expired_count = expired_count + 1 - end - end - - -- 更新错误计数 - local count = tonumber(counts[i + 1]) - if expired_count > 0 then - if count > expired_count then - count = count - expired_count - redis.call("HINCRBY", counts_key, channel_id, -expired_count) - else - count = 0 - redis.call("HDEL", counts_key, channel_id) - end - end - - if count >= max_errors then - table.insert(result, channel_id) - end - end - - return result + return redis.call("SMEMBERS", banned_key) `) -func GetChannelsWithErrors(ctx context.Context, model string, errorTimeToLive time.Duration, maxErrors int64) ([]int64, error) { +func GetBannedChannels(ctx context.Context, model string) ([]int64, error) { if !common.RedisEnabled { return nil, nil } - result, err := getChannelsWithErrorsScript.Run(ctx, common.RDB, []string{model}, errorTimeToLive.Milliseconds(), maxErrors).Int64Slice() + result, err := getBannedChannelsScript.Run(ctx, common.RDB, []string{model}).Int64Slice() if err != nil { return nil, err } return result, nil } -var clearChannelErrorsScript = redis.NewScript(` +var clearChannelModelErrorsScript = redis.NewScript(` local model = KEYS[1] local channel_id = ARGV[1] - local channel_errors_key = "model:" .. model .. ":channel:" .. channel_id .. ":errors" - local counts_key = "model:" .. model .. ":counts" + local channel_requests_key = "model:" .. model .. ":channel:" .. channel_id .. ":requests" + local banned_key = "model:" .. model .. ":banned" + + redis.call("DEL", channel_requests_key) + redis.call("SREM", banned_key, channel_id) + + return redis.status_reply("ok") +`) + +func ClearChannelModelErrors(ctx context.Context, model string, channelID int) error { + if !common.RedisEnabled { + return nil + } + return clearChannelModelErrorsScript.Run(ctx, common.RDB, []string{model}, channelID).Err() +} + +var clearChannelAllModelErrorsScript = redis.NewScript(` + local channel_id = ARGV[1] + local banned_key = "model:*:banned" + local channel_requests_pattern = "model:*:channel:" .. channel_id .. ":requests" - redis.call("DEL", channel_errors_key) - redis.call("HDEL", counts_key, channel_id) + local keys = redis.call("KEYS", channel_requests_pattern) + for _, key in ipairs(keys) do + redis.call("DEL", key) + end + redis.call("DEL", banned_key) return redis.status_reply("ok") `) -func ClearChannelErrors(ctx context.Context, model string, channelID int) error { +func ClearChannelAllModelErrors(ctx context.Context, channelID int) error { if !common.RedisEnabled { return nil } - return clearChannelErrorsScript.Run(ctx, common.RDB, []string{model}, channelID).Err() + return clearChannelAllModelErrorsScript.Run(ctx, common.RDB, []string{}, channelID).Err() +} + +func GetAllBannedChannels(ctx context.Context) (map[string][]int64, error) { + if !common.RedisEnabled { + return nil, nil + } + + result := make(map[string][]int64) + iter := common.RDB.Scan(ctx, 0, "model:*:banned", 0).Iterator() + for iter.Next(ctx) { + key := iter.Val() + model := strings.TrimPrefix(strings.TrimSuffix(key, ":banned"), "model:") + + channels, err := getBannedChannelsScript.Run(ctx, common.RDB, []string{model}).Int64Slice() + if err != nil { + return nil, err + } + result[model] = channels + } + + if err := iter.Err(); err != nil { + return nil, err + } + + return result, nil } From 27ce755b6f309db816acfb3185ffcefc3056ba1f Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 20 Dec 2024 23:43:39 +0800 Subject: [PATCH 018/167] fix: getChannelWithFallback --- service/aiproxy/controller/relay.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/service/aiproxy/controller/relay.go b/service/aiproxy/controller/relay.go index 5785ed369b5..41e5a5199db 100644 --- a/service/aiproxy/controller/relay.go +++ b/service/aiproxy/controller/relay.go @@ -49,6 +49,17 @@ func RelayHelper(meta *meta.Meta, c *gin.Context) *model.ErrorWithStatusCode { return err } +func getChannelWithFallback(model string, failedChannelIDs ...int) (*dbmodel.Channel, error) { + channel, err := dbmodel.CacheGetRandomSatisfiedChannel(model, failedChannelIDs...) + if err == nil { + return channel, nil + } + if !errors.Is(err, dbmodel.ErrChannelsExhausted) { + return nil, err + } + return dbmodel.CacheGetRandomSatisfiedChannel(model) +} + func Relay(c *gin.Context) { log := middleware.GetLogger(c) @@ -64,7 +75,7 @@ func Relay(c *gin.Context) { failedChannelIDs = append(failedChannelIDs, int(id)) } - channel, err := dbmodel.CacheGetRandomSatisfiedChannel(requestModel, failedChannelIDs...) + channel, err := getChannelWithFallback(requestModel, failedChannelIDs...) if err != nil { c.JSON(http.StatusServiceUnavailable, gin.H{ "error": &model.Error{ @@ -94,7 +105,6 @@ func Relay(c *gin.Context) { break } if !errors.Is(err, dbmodel.ErrChannelsExhausted) { - log.Errorf("get random satisfied channel failed: %+v", err) break } newChannel = channel From de9cff4eb4ee5e78b381c4e3c487de4d2d1d5a2f Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 23 Dec 2024 00:08:54 +0800 Subject: [PATCH 019/167] feat: support google thinking --- service/aiproxy/common/conv/any.go | 9 +-- .../aiproxy/relay/adaptor/anthropic/main.go | 7 +- .../aiproxy/relay/adaptor/aws/llama3/main.go | 3 +- service/aiproxy/relay/adaptor/baidu/main.go | 2 +- service/aiproxy/relay/adaptor/cohere/main.go | 3 +- service/aiproxy/relay/adaptor/coze/main.go | 3 +- .../aiproxy/relay/adaptor/gemini/adaptor.go | 8 ++- .../aiproxy/relay/adaptor/gemini/constants.go | 5 ++ service/aiproxy/relay/adaptor/gemini/main.go | 65 ++++++++++++++----- service/aiproxy/relay/adaptor/ollama/main.go | 2 +- service/aiproxy/relay/adaptor/openai/model.go | 4 +- 11 files changed, 77 insertions(+), 34 deletions(-) diff --git a/service/aiproxy/common/conv/any.go b/service/aiproxy/common/conv/any.go index ed6de0d1c12..d5e3bc037fd 100644 --- a/service/aiproxy/common/conv/any.go +++ b/service/aiproxy/common/conv/any.go @@ -9,15 +9,10 @@ func AsString(v any) string { // The change of bytes will cause the change of string synchronously func BytesToString(b []byte) string { - return *(*string)(unsafe.Pointer(&b)) + return unsafe.String(unsafe.SliceData(b), len(b)) } // If string is readonly, modifying bytes will cause panic func StringToBytes(s string) []byte { - return *(*[]byte)(unsafe.Pointer( - &struct { - string - Cap int - }{s, len(s)}, - )) + return unsafe.Slice(unsafe.StringData(s), len(s)) } diff --git a/service/aiproxy/relay/adaptor/anthropic/main.go b/service/aiproxy/relay/adaptor/anthropic/main.go index b19cc12eb30..169a6e0df0d 100644 --- a/service/aiproxy/relay/adaptor/anthropic/main.go +++ b/service/aiproxy/relay/adaptor/anthropic/main.go @@ -15,6 +15,7 @@ import ( "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/image" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" + "github.com/labring/sealos/service/aiproxy/relay/constant" "github.com/labring/sealos/service/aiproxy/relay/meta" "github.com/labring/sealos/service/aiproxy/relay/model" ) @@ -26,10 +27,8 @@ func stopReasonClaude2OpenAI(reason *string) string { return "" } switch *reason { - case "end_turn": - return "stop" - case "stop_sequence": - return "stop" + case "end_turn", "stop_sequence": + return constant.StopFinishReason case "max_tokens": return "length" case toolUseType: diff --git a/service/aiproxy/relay/adaptor/aws/llama3/main.go b/service/aiproxy/relay/adaptor/aws/llama3/main.go index 8648788943f..7ec0a55001f 100644 --- a/service/aiproxy/relay/adaptor/aws/llama3/main.go +++ b/service/aiproxy/relay/adaptor/aws/llama3/main.go @@ -21,6 +21,7 @@ import ( "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/aws/utils" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" + "github.com/labring/sealos/service/aiproxy/relay/constant" "github.com/labring/sealos/service/aiproxy/relay/meta" relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" "github.com/labring/sealos/service/aiproxy/relay/relaymode" @@ -214,7 +215,7 @@ func StreamHandler(meta *meta.Meta, c *gin.Context) (*relaymodel.ErrorWithStatus if llamaResp.PromptTokenCount > 0 { usage.PromptTokens = llamaResp.PromptTokenCount } - if llamaResp.StopReason == "stop" { + if llamaResp.StopReason == constant.StopFinishReason { usage.CompletionTokens = llamaResp.GenerationTokenCount usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens } diff --git a/service/aiproxy/relay/adaptor/baidu/main.go b/service/aiproxy/relay/adaptor/baidu/main.go index f68ecbc5019..c608803dfa6 100644 --- a/service/aiproxy/relay/adaptor/baidu/main.go +++ b/service/aiproxy/relay/adaptor/baidu/main.go @@ -93,7 +93,7 @@ func responseBaidu2OpenAI(response *ChatResponse) *openai.TextResponse { Role: "assistant", Content: response.Result, }, - FinishReason: "stop", + FinishReason: constant.StopFinishReason, } fullTextResponse := openai.TextResponse{ ID: response.ID, diff --git a/service/aiproxy/relay/adaptor/cohere/main.go b/service/aiproxy/relay/adaptor/cohere/main.go index d4b11148313..455e86b263e 100644 --- a/service/aiproxy/relay/adaptor/cohere/main.go +++ b/service/aiproxy/relay/adaptor/cohere/main.go @@ -14,6 +14,7 @@ import ( "github.com/labring/sealos/service/aiproxy/common/render" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" + "github.com/labring/sealos/service/aiproxy/relay/constant" "github.com/labring/sealos/service/aiproxy/relay/model" ) @@ -25,7 +26,7 @@ func stopReasonCohere2OpenAI(reason *string) string { } switch *reason { case "COMPLETE": - return "stop" + return constant.StopFinishReason default: return *reason } diff --git a/service/aiproxy/relay/adaptor/coze/main.go b/service/aiproxy/relay/adaptor/coze/main.go index 296769d2d8a..c0108e57b62 100644 --- a/service/aiproxy/relay/adaptor/coze/main.go +++ b/service/aiproxy/relay/adaptor/coze/main.go @@ -14,6 +14,7 @@ import ( "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/relay/adaptor/coze/constant/messagetype" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" + "github.com/labring/sealos/service/aiproxy/relay/constant" "github.com/labring/sealos/service/aiproxy/relay/model" ) @@ -93,7 +94,7 @@ func ResponseCoze2OpenAI(cozeResponse *Response) *openai.TextResponse { Content: responseText, Name: nil, }, - FinishReason: "stop", + FinishReason: constant.StopFinishReason, } fullTextResponse := openai.TextResponse{ ID: "chatcmpl-" + cozeResponse.ConversationID, diff --git a/service/aiproxy/relay/adaptor/gemini/adaptor.go b/service/aiproxy/relay/adaptor/gemini/adaptor.go index c2964c1809b..969be992d82 100644 --- a/service/aiproxy/relay/adaptor/gemini/adaptor.go +++ b/service/aiproxy/relay/adaptor/gemini/adaptor.go @@ -19,12 +19,18 @@ type Adaptor struct{} const baseURL = "https://generativelanguage.googleapis.com" +var v1ModelMap = map[string]struct{}{} + func getRequestURL(meta *meta.Meta, action string) string { u := meta.Channel.BaseURL if u == "" { u = baseURL } - return fmt.Sprintf("%s/%s/models/%s:%s", u, "v1beta", meta.ActualModelName, action) + version := "v1beta" + if _, ok := v1ModelMap[meta.ActualModelName]; ok { + version = "v1" + } + return fmt.Sprintf("%s/%s/models/%s:%s", u, version, meta.ActualModelName, action) } func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { diff --git a/service/aiproxy/relay/adaptor/gemini/constants.go b/service/aiproxy/relay/adaptor/gemini/constants.go index fc6b1354a5b..5e89798b914 100644 --- a/service/aiproxy/relay/adaptor/gemini/constants.go +++ b/service/aiproxy/relay/adaptor/gemini/constants.go @@ -28,6 +28,11 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.ChatCompletions, Owner: model.ModelOwnerGoogle, }, + { + Model: "gemini-2.0-flash-thinking-exp", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerGoogle, + }, { Model: "text-embedding-004", diff --git a/service/aiproxy/relay/adaptor/gemini/main.go b/service/aiproxy/relay/adaptor/gemini/main.go index 47fcc86018b..e4732a3430b 100644 --- a/service/aiproxy/relay/adaptor/gemini/main.go +++ b/service/aiproxy/relay/adaptor/gemini/main.go @@ -50,6 +50,7 @@ func buildSafetySettings() []ChatSafetySettings { {Category: "HARM_CATEGORY_HATE_SPEECH", Threshold: safetySetting}, {Category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", Threshold: safetySetting}, {Category: "HARM_CATEGORY_DANGEROUS_CONTENT", Threshold: safetySetting}, + {Category: "HARM_CATEGORY_CIVIC_INTEGRITY", Threshold: safetySetting}, } } @@ -237,10 +238,13 @@ func (g *ChatResponse) GetResponseText() string { if g == nil { return "" } - if len(g.Candidates) > 0 && len(g.Candidates[0].Content.Parts) > 0 { - return g.Candidates[0].Content.Parts[0].Text + builder := strings.Builder{} + for _, candidate := range g.Candidates { + for _, part := range candidate.Content.Parts { + builder.WriteString(part.Text) + } } - return "" + return builder.String() } type ChatCandidate struct { @@ -283,9 +287,10 @@ func getToolCalls(candidate *ChatCandidate) []*model.Tool { return toolCalls } -func responseGeminiChat2OpenAI(response *ChatResponse) *openai.TextResponse { +func responseGeminiChat2OpenAI(meta *meta.Meta, response *ChatResponse) *openai.TextResponse { fullTextResponse := openai.TextResponse{ ID: "chatcmpl-" + random.GetUUID(), + Model: meta.OriginModelName, Object: "chat.completion", Created: time.Now().Unix(), Choices: make([]*openai.TextResponseChoice, 0, len(response.Candidates)), @@ -302,7 +307,14 @@ func responseGeminiChat2OpenAI(response *ChatResponse) *openai.TextResponse { if candidate.Content.Parts[0].FunctionCall != nil { choice.Message.ToolCalls = getToolCalls(candidate) } else { - choice.Message.Content = candidate.Content.Parts[0].Text + builder := strings.Builder{} + for i, part := range candidate.Content.Parts { + if i > 0 { + builder.WriteString("\n") + } + builder.WriteString(part.Text) + } + choice.Message.Content = builder.String() } } else { choice.Message.Content = "" @@ -314,16 +326,37 @@ func responseGeminiChat2OpenAI(response *ChatResponse) *openai.TextResponse { } func streamResponseGeminiChat2OpenAI(meta *meta.Meta, geminiResponse *ChatResponse) *openai.ChatCompletionsStreamResponse { - var choice openai.ChatCompletionsStreamResponseChoice - choice.Delta.Content = geminiResponse.GetResponseText() - // choice.FinishReason = &constant.StopFinishReason - var response openai.ChatCompletionsStreamResponse - response.ID = "chatcmpl-" + random.GetUUID() - response.Created = time.Now().Unix() - response.Object = "chat.completion.chunk" - response.Model = meta.OriginModelName - response.Choices = []*openai.ChatCompletionsStreamResponseChoice{&choice} - return &response + response := &openai.ChatCompletionsStreamResponse{ + ID: "chatcmpl-" + random.GetUUID(), + Created: time.Now().Unix(), + Model: meta.OriginModelName, + Object: "chat.completion.chunk", + Choices: make([]*openai.ChatCompletionsStreamResponseChoice, 0, len(geminiResponse.Candidates)), + } + for i, candidate := range geminiResponse.Candidates { + choice := openai.ChatCompletionsStreamResponseChoice{ + Index: i, + } + if len(candidate.Content.Parts) > 0 { + if candidate.Content.Parts[0].FunctionCall != nil { + choice.Delta.ToolCalls = getToolCalls(candidate) + } else { + builder := strings.Builder{} + for i, part := range candidate.Content.Parts { + if i > 0 { + builder.WriteString("\n") + } + builder.WriteString(part.Text) + } + choice.Delta.Content = builder.String() + } + } else { + choice.Delta.Content = "" + choice.FinishReason = &candidate.FinishReason + } + response.Choices = append(response.Choices, &choice) + } + return response } func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode) { @@ -405,7 +438,7 @@ func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage if len(geminiResponse.Candidates) == 0 { return nil, openai.ErrorWrapperWithMessage("No candidates returned", "gemini_error", resp.StatusCode) } - fullTextResponse := responseGeminiChat2OpenAI(&geminiResponse) + fullTextResponse := responseGeminiChat2OpenAI(meta, &geminiResponse) fullTextResponse.Model = meta.OriginModelName respContent := []ChatContent{} for _, candidate := range geminiResponse.Candidates { diff --git a/service/aiproxy/relay/adaptor/ollama/main.go b/service/aiproxy/relay/adaptor/ollama/main.go index 2452afa038a..008a7fb32c1 100644 --- a/service/aiproxy/relay/adaptor/ollama/main.go +++ b/service/aiproxy/relay/adaptor/ollama/main.go @@ -84,7 +84,7 @@ func responseOllama2OpenAI(response *ChatResponse) *openai.TextResponse { }, } if response.Done { - choice.FinishReason = "stop" + choice.FinishReason = constant.StopFinishReason } fullTextResponse := openai.TextResponse{ ID: "chatcmpl-" + random.GetUUID(), diff --git a/service/aiproxy/relay/adaptor/openai/model.go b/service/aiproxy/relay/adaptor/openai/model.go index 6c101b93398..b83898c2daa 100644 --- a/service/aiproxy/relay/adaptor/openai/model.go +++ b/service/aiproxy/relay/adaptor/openai/model.go @@ -1,6 +1,8 @@ package openai -import "github.com/labring/sealos/service/aiproxy/relay/model" +import ( + "github.com/labring/sealos/service/aiproxy/relay/model" +) type TextContent struct { Type string `json:"type,omitempty"` From 241c48f9cc5099cf4dabb8df6ff99e09e6e019e6 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 23 Dec 2024 00:14:30 +0800 Subject: [PATCH 020/167] fix: monitor --- service/aiproxy/controller/relay.go | 37 +++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/service/aiproxy/controller/relay.go b/service/aiproxy/controller/relay.go index 41e5a5199db..bcab8b90df1 100644 --- a/service/aiproxy/controller/relay.go +++ b/service/aiproxy/controller/relay.go @@ -41,12 +41,31 @@ func relayHelper(meta *meta.Meta, c *gin.Context) *model.ErrorWithStatusCode { } } -func RelayHelper(meta *meta.Meta, c *gin.Context) *model.ErrorWithStatusCode { +func RelayHelper(meta *meta.Meta, c *gin.Context) (*model.ErrorWithStatusCode, bool) { err := relayHelper(meta, c) - if err := monitor.AddRequest(c.Request.Context(), meta.OriginModelName, int64(meta.Channel.ID), err != nil); err != nil { - log.Errorf("add request failed: %+v", err) + if err == nil { + if err := monitor.AddRequest( + c.Request.Context(), + meta.OriginModelName, + int64(meta.Channel.ID), + false, + ); err != nil { + log.Errorf("add request failed: %+v", err) + } + return nil, false + } + if shouldRetry(c, err.StatusCode) { + if err := monitor.AddRequest( + c.Request.Context(), + meta.OriginModelName, + int64(meta.Channel.ID), + true, + ); err != nil { + log.Errorf("add request failed: %+v", err) + } + return err, true } - return err + return nil, false } func getChannelWithFallback(model string, failedChannelIDs ...int) (*dbmodel.Channel, error) { @@ -88,14 +107,14 @@ func Relay(c *gin.Context) { } meta := middleware.NewMetaByContext(c, channel) - bizErr := RelayHelper(meta, c) + bizErr, retry := RelayHelper(meta, c) if bizErr == nil { return } failedChannelIDs = append(failedChannelIDs, channel.ID) requestID := c.GetString(ctxkey.RequestID) var retryTimes int64 - if shouldRetry(c, bizErr.StatusCode) { + if retry { retryTimes = config.GetRetryTimes() } for i := retryTimes; i > 0; i-- { @@ -117,10 +136,13 @@ func Relay(c *gin.Context) { } c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody)) meta.Reset(newChannel) - bizErr = RelayHelper(meta, c) + bizErr, retry = RelayHelper(meta, c) if bizErr == nil { return } + if !retry { + break + } failedChannelIDs = append(failedChannelIDs, newChannel.ID) } if bizErr != nil { @@ -139,6 +161,7 @@ func Relay(c *gin.Context) { } } +// 仅当是channel错误时,才需要重试,用户请求参数错误时,不需要重试 func shouldRetry(_ *gin.Context, statusCode int) bool { if statusCode == http.StatusTooManyRequests || statusCode == http.StatusGatewayTimeout || From 72cdcff8ce5a7a67f094ca9a493d9ee5450631dc Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 24 Dec 2024 11:18:08 +0800 Subject: [PATCH 021/167] feat: get log detail --- service/aiproxy/controller/log.go | 61 +++++--- service/aiproxy/model/log.go | 252 +++++++++++++++++++++++++----- service/aiproxy/router/api.go | 2 + 3 files changed, 256 insertions(+), 59 deletions(-) diff --git a/service/aiproxy/controller/log.go b/service/aiproxy/controller/log.go index 184a93c02a2..9b9c1522d39 100644 --- a/service/aiproxy/controller/log.go +++ b/service/aiproxy/controller/log.go @@ -42,7 +42,8 @@ func GetLogs(c *gin.Context) { requestID := c.Query("request_id") mode, _ := strconv.Atoi(c.Query("mode")) codeType := c.Query("code_type") - logs, total, err := model.GetLogs( + withBody, _ := strconv.ParseBool(c.Query("with_body")) + result, err := model.GetLogs( startTimestampTime, endTimestampTime, modelName, @@ -57,15 +58,13 @@ func GetLogs(c *gin.Context) { order, mode, model.CodeType(codeType), + withBody, ) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - middleware.SuccessResponse(c, gin.H{ - "logs": logs, - "total": total, - }) + middleware.SuccessResponse(c, result) } func GetGroupLogs(c *gin.Context) { @@ -100,7 +99,8 @@ func GetGroupLogs(c *gin.Context) { requestID := c.Query("request_id") mode, _ := strconv.Atoi(c.Query("mode")) codeType := c.Query("code_type") - logs, total, err := model.GetGroupLogs( + withBody, _ := strconv.ParseBool(c.Query("with_body")) + result, err := model.GetGroupLogs( group, startTimestampTime, endTimestampTime, @@ -115,15 +115,13 @@ func GetGroupLogs(c *gin.Context) { order, mode, model.CodeType(codeType), + withBody, ) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - middleware.SuccessResponse(c, gin.H{ - "logs": logs, - "total": total, - }) + middleware.SuccessResponse(c, result) } func SearchLogs(c *gin.Context) { @@ -155,7 +153,8 @@ func SearchLogs(c *gin.Context) { requestID := c.Query("request_id") mode, _ := strconv.Atoi(c.Query("mode")) codeType := c.Query("code_type") - logs, total, err := model.SearchLogs( + withBody, _ := strconv.ParseBool(c.Query("with_body")) + result, err := model.SearchLogs( keyword, p, perPage, @@ -171,15 +170,13 @@ func SearchLogs(c *gin.Context) { order, mode, model.CodeType(codeType), + withBody, ) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - middleware.SuccessResponse(c, gin.H{ - "logs": logs, - "total": total, - }) + middleware.SuccessResponse(c, result) } func SearchGroupLogs(c *gin.Context) { @@ -211,7 +208,8 @@ func SearchGroupLogs(c *gin.Context) { requestID := c.Query("request_id") mode, _ := strconv.Atoi(c.Query("mode")) codeType := c.Query("code_type") - logs, total, err := model.SearchGroupLogs( + withBody, _ := strconv.ParseBool(c.Query("with_body")) + result, err := model.SearchGroupLogs( group, keyword, p, @@ -227,15 +225,38 @@ func SearchGroupLogs(c *gin.Context) { order, mode, model.CodeType(codeType), + withBody, ) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - middleware.SuccessResponse(c, gin.H{ - "logs": logs, - "total": total, - }) + middleware.SuccessResponse(c, result) +} + +func GetLogDetail(c *gin.Context) { + logID, _ := strconv.Atoi(c.Param("log_id")) + log, err := model.GetLogDetail(logID) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, err.Error()) + return + } + middleware.SuccessResponse(c, log) +} + +func GetGroupLogDetail(c *gin.Context) { + group := c.Param("group") + if group == "" { + middleware.ErrorResponse(c, http.StatusOK, "group is required") + return + } + logID, _ := strconv.Atoi(c.Param("log_id")) + log, err := model.GetGroupLogDetail(group, logID) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, err.Error()) + return + } + middleware.SuccessResponse(c, log) } func DeleteHistoryLogs(c *gin.Context) { diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index 680bbc4072d..c5b14fa339b 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -8,6 +8,7 @@ import ( json "github.com/json-iterator/go" log "github.com/sirupsen/logrus" + "gorm.io/gorm" "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" @@ -15,8 +16,8 @@ import ( type RequestDetail struct { CreatedAt time.Time `gorm:"autoCreateTime" json:"-"` - RequestBody string `gorm:"type:text" json:"request_body"` - ResponseBody string `gorm:"type:text" json:"response_body"` + RequestBody string `gorm:"type:text" json:"request_body,omitempty"` + ResponseBody string `gorm:"type:text" json:"response_body,omitempty"` ID int `json:"id"` LogID int `json:"log_id"` } @@ -27,7 +28,7 @@ type Log struct { CreatedAt time.Time `gorm:"index" json:"created_at"` TokenName string `gorm:"index;index:idx_group_token,priority:2;index:idx_group_reqat_token,priority:3" json:"token_name"` Endpoint string `gorm:"index" json:"endpoint"` - Content string `gorm:"type:text" json:"content"` + Content string `gorm:"type:text" json:"content,omitempty"` GroupID string `gorm:"index;index:idx_group_token,priority:1;index:idx_request_at_group_id,priority:1;index:idx_group_reqat_token,priority:1" json:"group"` Model string `gorm:"index" json:"model"` RequestID string `gorm:"index" json:"request_id"` @@ -56,6 +57,29 @@ func (l *Log) MarshalJSON() ([]byte, error) { }) } +func GetGroupLogDetail(group string, logID int) (*RequestDetail, error) { + var detail RequestDetail + err := LogDB.Model(&RequestDetail{}). + Joins("JOIN logs ON logs.id = request_details.log_id"). + Where("logs.group_id = ? AND logs.id = ?", group, logID). + First(&detail).Error + if err != nil { + return nil, err + } + return &detail, nil +} + +func GetLogDetail(logID int) (*RequestDetail, error) { + var detail RequestDetail + err := LogDB.Model(&RequestDetail{}). + Where("log_id = ?", logID). + First(&detail).Error + if err != nil { + return nil, err + } + return &detail, nil +} + func RecordConsumeLog( requestID string, requestAt time.Time, @@ -134,7 +158,28 @@ const ( CodeTypeError CodeType = "error" ) -func GetLogs(startTimestamp time.Time, endTimestamp time.Time, modelName string, group string, requestID string, tokenID int, tokenName string, startIdx int, num int, channelID int, endpoint string, order string, mode int, codeType CodeType) (logs []*Log, total int64, err error) { +type GetLogsResult struct { + Logs []*Log `json:"logs"` + Total int64 `json:"total"` +} + +func GetLogs( + startTimestamp time.Time, + endTimestamp time.Time, + modelName string, + group string, + requestID string, + tokenID int, + tokenName string, + startIdx int, + num int, + channelID int, + endpoint string, + order string, + mode int, + codeType CodeType, + withBody bool, +) (*GetLogsResult, error) { tx := LogDB.Model(&Log{}) if group != "" { tx = tx.Where("group_id = ?", group) @@ -172,24 +217,55 @@ func GetLogs(startTimestamp time.Time, endTimestamp time.Time, modelName string, case CodeTypeError: tx = tx.Where("code != 200") } - err = tx.Count(&total).Error + + result := &GetLogsResult{} + err := tx.Count(&result.Total).Error if err != nil { - return nil, 0, err + return nil, err } - if total <= 0 { - return nil, 0, nil + if result.Total <= 0 { + return result, nil + } + + if withBody { + tx = tx.Preload("RequestDetail") + } else { + tx = tx.Preload("RequestDetail", func(db *gorm.DB) *gorm.DB { + return db.Select("id", "log_id") + }) } err = tx. - Preload("RequestDetail"). Order(getLogOrder(order)). Limit(num). Offset(startIdx). - Find(&logs).Error - return logs, total, err + Find(&result.Logs).Error + return result, err +} + +type GetGroupLogsResult struct { + GetLogsResult + TokenNames []string `json:"token_names"` + Models []string `json:"models"` } -func GetGroupLogs(group string, startTimestamp time.Time, endTimestamp time.Time, modelName string, requestID string, tokenID int, tokenName string, startIdx int, num int, channelID int, endpoint string, order string, mode int, codeType CodeType) (logs []*Log, total int64, err error) { +func GetGroupLogs( + group string, + startTimestamp time.Time, + endTimestamp time.Time, + modelName string, + requestID string, + tokenID int, + tokenName string, + startIdx int, + num int, + channelID int, + endpoint string, + order string, + mode int, + codeType CodeType, + withBody bool, +) (*GetGroupLogsResult, error) { tx := LogDB.Model(&Log{}).Where("group_id = ?", group) if !startTimestamp.IsZero() { tx = tx.Where("request_at >= ?", startTimestamp) @@ -224,24 +300,65 @@ func GetGroupLogs(group string, startTimestamp time.Time, endTimestamp time.Time case CodeTypeError: tx = tx.Where("code != 200") } - err = tx.Count(&total).Error + + result := &GetGroupLogsResult{} + err := tx.Count(&result.Total).Error if err != nil { - return nil, 0, err + return nil, err + } + if result.Total <= 0 { + return result, nil } - if total <= 0 { - return nil, 0, nil + + if withBody { + tx = tx.Preload("RequestDetail") + } else { + tx = tx.Preload("RequestDetail", func(db *gorm.DB) *gorm.DB { + return db.Select("id", "log_id") + }) } err = tx. - Preload("RequestDetail"). Order(getLogOrder(order)). Limit(num). Offset(startIdx). - Find(&logs).Error - return logs, total, err + Find(&result.Logs).Error + if err != nil { + return nil, err + } + + // Get distinct token names and models for the time period + result.TokenNames, err = getGroupLogDistinctValues[string]("token_name", group, startTimestamp, endTimestamp) + if err != nil { + return nil, err + } + + result.Models, err = getGroupLogDistinctValues[string]("model", group, startTimestamp, endTimestamp) + if err != nil { + return nil, err + } + + return result, nil } -func SearchLogs(keyword string, page int, perPage int, endpoint string, groupID string, requestID string, tokenID int, tokenName string, modelName string, startTimestamp time.Time, endTimestamp time.Time, channelID int, order string, mode int, codeType CodeType) (logs []*Log, total int64, err error) { +func SearchLogs( + keyword string, + page int, + perPage int, + endpoint string, + groupID string, + requestID string, + tokenID int, + tokenName string, + modelName string, + startTimestamp time.Time, + endTimestamp time.Time, + channelID int, + order string, + mode int, + codeType CodeType, + withBody bool, +) (*GetLogsResult, error) { tx := LogDB.Model(&Log{}) // Handle exact match conditions for non-zero values @@ -350,30 +467,56 @@ func SearchLogs(keyword string, page int, perPage int, endpoint string, groupID } } - err = tx.Count(&total).Error + result := &GetLogsResult{} + err := tx.Count(&result.Total).Error if err != nil { - return nil, 0, err + return nil, err } - if total <= 0 { - return nil, 0, nil + if result.Total <= 0 { + return result, nil } page-- if page < 0 { page = 0 } + + if withBody { + tx = tx.Preload("RequestDetail") + } else { + tx = tx.Preload("RequestDetail", func(db *gorm.DB) *gorm.DB { + return db.Select("id", "log_id") + }) + } + err = tx. - Preload("RequestDetail"). Order(getLogOrder(order)). Limit(perPage). Offset(page * perPage). - Find(&logs).Error - return logs, total, err + Find(&result.Logs).Error + return result, err } -func SearchGroupLogs(group string, keyword string, page int, perPage int, endpoint string, requestID string, tokenID int, tokenName string, modelName string, startTimestamp time.Time, endTimestamp time.Time, channelID int, order string, mode int, codeType CodeType) (logs []*Log, total int64, err error) { +func SearchGroupLogs( + group string, + keyword string, + page int, + perPage int, + endpoint string, + requestID string, + tokenID int, + tokenName string, + modelName string, + startTimestamp time.Time, + endTimestamp time.Time, + channelID int, + order string, + mode int, + codeType CodeType, + withBody bool, +) (*GetGroupLogsResult, error) { if group == "" { - return nil, 0, errors.New("group is empty") + return nil, errors.New("group is empty") } tx := LogDB.Model(&Log{}).Where("group_id = ?", group) @@ -471,12 +614,13 @@ func SearchGroupLogs(group string, keyword string, page int, perPage int, endpoi } } - err = tx.Count(&total).Error + result := &GetGroupLogsResult{} + err := tx.Count(&result.Total).Error if err != nil { - return nil, 0, err + return nil, err } - if total <= 0 { - return nil, 0, nil + if result.Total <= 0 { + return result, nil } page-- @@ -484,13 +628,35 @@ func SearchGroupLogs(group string, keyword string, page int, perPage int, endpoi page = 0 } + if withBody { + tx = tx.Preload("RequestDetail") + } else { + tx = tx.Preload("RequestDetail", func(db *gorm.DB) *gorm.DB { + return db.Select("id", "log_id") + }) + } + err = tx. - Preload("RequestDetail"). Order(getLogOrder(order)). Limit(perPage). Offset(page * perPage). - Find(&logs).Error - return logs, total, err + Find(&result.Logs).Error + if err != nil { + return nil, err + } + + // Get distinct token names and models for the time period + result.TokenNames, err = getGroupLogDistinctValues[string]("token_name", group, startTimestamp, endTimestamp) + if err != nil { + return nil, err + } + + result.Models, err = getGroupLogDistinctValues[string]("model", group, startTimestamp, endTimestamp) + if err != nil { + return nil, err + } + + return result, nil } func DeleteOldLog(timestamp time.Time) (int64, error) { @@ -558,11 +724,19 @@ func getChartData(group string, start, end time.Time, tokenName, modelName strin func getGroupLogDistinctValues[T any](field string, group string, start, end time.Time) ([]T, error) { var values []T - err := LogDB. + query := LogDB. Model(&Log{}). Distinct(field). - Where("group_id = ? AND request_at BETWEEN ? AND ?", group, start, end). - Pluck(field, &values).Error + Where("group_id = ?", group) + + if !start.IsZero() { + query = query.Where("request_at >= ?", start) + } + if !end.IsZero() { + query = query.Where("request_at <= ?", end) + } + + err := query.Pluck(field, &values).Error return values, err } diff --git a/service/aiproxy/router/api.go b/service/aiproxy/router/api.go index 0da7fd2de63..92c2a610b1c 100644 --- a/service/aiproxy/router/api.go +++ b/service/aiproxy/router/api.go @@ -113,11 +113,13 @@ func SetAPIRouter(router *gin.Engine) { logsRoute.DELETE("/", controller.DeleteHistoryLogs) logsRoute.GET("/search", controller.SearchLogs) logsRoute.GET("/consume_error", controller.SearchConsumeError) + logsRoute.GET("/detail/:log_id", controller.GetLogDetail) } logRoute := apiRouter.Group("/log") { logRoute.GET("/:group/search", controller.SearchGroupLogs) logRoute.GET("/:group", controller.GetGroupLogs) + logRoute.GET("/:group/detail/:log_id", controller.GetGroupLogDetail) } modelConfigsRoute := apiRouter.Group("/model_configs") From 3b812f9c5c0d4a790066d9f94d5e1a388a410ff4 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 24 Dec 2024 11:42:58 +0800 Subject: [PATCH 022/167] feat: no need channel config --- service/aiproxy/controller/channel.go | 1 - service/aiproxy/model/channel.go | 1 - service/aiproxy/relay/adaptor/ali/adaptor.go | 4 +- .../aiproxy/relay/adaptor/aws/claude/main.go | 14 ++++- .../aiproxy/relay/adaptor/aws/llama3/main.go | 14 ++++- .../relay/adaptor/aws/utils/adaptor.go | 53 +++++++++++++++++++ .../relay/adaptor/cloudflare/adaptor.go | 2 +- service/aiproxy/relay/adaptor/coze/adaptor.go | 39 +++++++++++--- service/aiproxy/relay/adaptor/coze/main.go | 21 -------- .../aiproxy/relay/adaptor/vertexai/adaptor.go | 44 ++++++++++++--- service/aiproxy/relay/meta/meta.go | 17 ------ 11 files changed, 150 insertions(+), 60 deletions(-) diff --git a/service/aiproxy/controller/channel.go b/service/aiproxy/controller/channel.go index a4c33004dbd..9a4a004447c 100644 --- a/service/aiproxy/controller/channel.go +++ b/service/aiproxy/controller/channel.go @@ -141,7 +141,6 @@ func (r *AddChannelRequest) ToChannel() *model.Channel { BaseURL: r.BaseURL, Models: slices.Clone(r.Models), ModelMapping: maps.Clone(r.ModelMapping), - Config: r.Config, Priority: r.Priority, Status: r.Status, } diff --git a/service/aiproxy/model/channel.go b/service/aiproxy/model/channel.go index 97a2563a58e..bc6b98890db 100644 --- a/service/aiproxy/model/channel.go +++ b/service/aiproxy/model/channel.go @@ -30,7 +30,6 @@ type Channel struct { ChannelTests []*ChannelTest `gorm:"foreignKey:ChannelID;references:ID" json:"channel_tests"` BalanceUpdatedAt time.Time `json:"balance_updated_at"` ModelMapping map[string]string `gorm:"serializer:fastjson;type:text" json:"model_mapping"` - Config ChannelConfig `gorm:"serializer:fastjson;type:text" json:"config"` Key string `gorm:"type:text;index" json:"key"` Name string `gorm:"index" json:"name"` BaseURL string `gorm:"index" json:"base_url"` diff --git a/service/aiproxy/relay/adaptor/ali/adaptor.go b/service/aiproxy/relay/adaptor/ali/adaptor.go index 86b23fb67f6..8eb1f723119 100644 --- a/service/aiproxy/relay/adaptor/ali/adaptor.go +++ b/service/aiproxy/relay/adaptor/ali/adaptor.go @@ -46,9 +46,7 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error { req.Header.Set("Authorization", "Bearer "+meta.Channel.Key) - if meta.Channel.Config.Plugin != "" { - req.Header.Set("X-Dashscope-Plugin", meta.Channel.Config.Plugin) - } + // req.Header.Set("X-Dashscope-Plugin", meta.Channel.Config.Plugin) return nil } diff --git a/service/aiproxy/relay/adaptor/aws/claude/main.go b/service/aiproxy/relay/adaptor/aws/claude/main.go index a483a95a8e6..7c2106981df 100644 --- a/service/aiproxy/relay/adaptor/aws/claude/main.go +++ b/service/aiproxy/relay/adaptor/aws/claude/main.go @@ -121,7 +121,12 @@ func Handler(meta *meta.Meta, c *gin.Context) (*relaymodel.ErrorWithStatusCode, return utils.WrapErr(errors.Wrap(err, "marshal request")), nil } - awsResp, err := meta.AwsClient().InvokeModel(c.Request.Context(), awsReq) + awsClient, err := utils.AwsClientFromMeta(meta) + if err != nil { + return utils.WrapErr(errors.Wrap(err, "get aws client")), nil + } + + awsResp, err := awsClient.InvokeModel(c.Request.Context(), awsReq) if err != nil { return utils.WrapErr(errors.Wrap(err, "InvokeModel")), nil } @@ -177,7 +182,12 @@ func StreamHandler(meta *meta.Meta, c *gin.Context) (*relaymodel.ErrorWithStatus return utils.WrapErr(errors.Wrap(err, "marshal request")), nil } - awsResp, err := meta.AwsClient().InvokeModelWithResponseStream(c.Request.Context(), awsReq) + awsClient, err := utils.AwsClientFromMeta(meta) + if err != nil { + return utils.WrapErr(errors.Wrap(err, "get aws client")), nil + } + + awsResp, err := awsClient.InvokeModelWithResponseStream(c.Request.Context(), awsReq) if err != nil { return utils.WrapErr(errors.Wrap(err, "InvokeModelWithResponseStream")), nil } diff --git a/service/aiproxy/relay/adaptor/aws/llama3/main.go b/service/aiproxy/relay/adaptor/aws/llama3/main.go index 7ec0a55001f..436aac4fdfb 100644 --- a/service/aiproxy/relay/adaptor/aws/llama3/main.go +++ b/service/aiproxy/relay/adaptor/aws/llama3/main.go @@ -115,7 +115,12 @@ func Handler(meta *meta.Meta, c *gin.Context) (*relaymodel.ErrorWithStatusCode, return utils.WrapErr(errors.Wrap(err, "marshal request")), nil } - awsResp, err := meta.AwsClient().InvokeModel(c.Request.Context(), awsReq) + awsClient, err := utils.AwsClientFromMeta(meta) + if err != nil { + return utils.WrapErr(errors.Wrap(err, "get aws client")), nil + } + + awsResp, err := awsClient.InvokeModel(c.Request.Context(), awsReq) if err != nil { return utils.WrapErr(errors.Wrap(err, "InvokeModel")), nil } @@ -187,7 +192,12 @@ func StreamHandler(meta *meta.Meta, c *gin.Context) (*relaymodel.ErrorWithStatus return utils.WrapErr(errors.Wrap(err, "marshal request")), nil } - awsResp, err := meta.AwsClient().InvokeModelWithResponseStream(c.Request.Context(), awsReq) + awsClient, err := utils.AwsClientFromMeta(meta) + if err != nil { + return utils.WrapErr(errors.Wrap(err, "get aws client")), nil + } + + awsResp, err := awsClient.InvokeModelWithResponseStream(c.Request.Context(), awsReq) if err != nil { return utils.WrapErr(errors.Wrap(err, "InvokeModelWithResponseStream")), nil } diff --git a/service/aiproxy/relay/adaptor/aws/utils/adaptor.go b/service/aiproxy/relay/adaptor/aws/utils/adaptor.go index d034d86f1ba..bb69fa29aa8 100644 --- a/service/aiproxy/relay/adaptor/aws/utils/adaptor.go +++ b/service/aiproxy/relay/adaptor/aws/utils/adaptor.go @@ -1,9 +1,14 @@ package utils import ( + "fmt" "io" "net/http" + "strings" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/relay/meta" "github.com/labring/sealos/service/aiproxy/relay/model" @@ -13,3 +18,51 @@ type AwsAdapter interface { ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) DoResponse(meta *meta.Meta, c *gin.Context) (usage *model.Usage, err *model.ErrorWithStatusCode) } + +type AwsConfig struct { + Region string + AK string + SK string +} + +func GetAwsConfigFromKey(key string) (*AwsConfig, error) { + split := strings.Split(key, "|") + if len(split) != 3 { + return nil, fmt.Errorf("invalid key format") + } + return &AwsConfig{ + Region: split[0], + AK: split[1], + SK: split[2], + }, nil +} + +func AwsClient(config *AwsConfig) *bedrockruntime.Client { + return bedrockruntime.New(bedrockruntime.Options{ + Region: config.Region, + Credentials: aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(config.AK, config.SK, "")), + }) +} + +func awsClientFromKey(key string) (*bedrockruntime.Client, error) { + config, err := GetAwsConfigFromKey(key) + if err != nil { + return nil, err + } + return AwsClient(config), nil +} + +const AwsClientKey = "aws_client" + +func AwsClientFromMeta(meta *meta.Meta) (*bedrockruntime.Client, error) { + awsClientI, ok := meta.Get(AwsClientKey) + if ok { + return awsClientI.(*bedrockruntime.Client), nil + } + awsClient, err := awsClientFromKey(meta.Channel.Key) + if err != nil { + return nil, err + } + meta.Set(AwsClientKey, awsClient) + return awsClient, nil +} diff --git a/service/aiproxy/relay/adaptor/cloudflare/adaptor.go b/service/aiproxy/relay/adaptor/cloudflare/adaptor.go index 42c66142837..cf7e84b0704 100644 --- a/service/aiproxy/relay/adaptor/cloudflare/adaptor.go +++ b/service/aiproxy/relay/adaptor/cloudflare/adaptor.go @@ -33,7 +33,7 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { if isAIGateWay { urlPrefix = u } else { - urlPrefix = fmt.Sprintf("%s/client/v4/accounts/%s/ai", u, meta.Channel.Config.UserID) + urlPrefix = fmt.Sprintf("%s/client/v4/accounts/%s/ai", u, meta.Channel.Key) } switch meta.Mode { diff --git a/service/aiproxy/relay/adaptor/coze/adaptor.go b/service/aiproxy/relay/adaptor/coze/adaptor.go index c322d302638..0cd4d8cfb18 100644 --- a/service/aiproxy/relay/adaptor/coze/adaptor.go +++ b/service/aiproxy/relay/adaptor/coze/adaptor.go @@ -5,6 +5,7 @@ import ( "errors" "io" "net/http" + "strings" "github.com/gin-gonic/gin" json "github.com/json-iterator/go" @@ -12,6 +13,7 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/meta" relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" + "github.com/labring/sealos/service/aiproxy/relay/relaymode" "github.com/labring/sealos/service/aiproxy/relay/utils" ) @@ -27,23 +29,48 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { return u + "/open_api/v2/chat", nil } +func getTokenAndUserID(key string) (string, string) { + split := strings.Split(key, "|") + if len(split) != 2 { + return "", "" + } + return split[0], split[1] +} + func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error { - req.Header.Set("Authorization", "Bearer "+meta.Channel.Key) + token, _ := getTokenAndUserID(meta.Channel.Key) + req.Header.Set("Authorization", "Bearer "+token) return nil } func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { + if meta.Mode != relaymode.ChatCompletions { + return nil, nil, errors.New("coze only support chat completions") + } request, err := utils.UnmarshalGeneralOpenAIRequest(req) if err != nil { return nil, nil, err } - request.User = meta.Channel.Config.UserID + _, userID := getTokenAndUserID(meta.Channel.Key) + request.User = userID request.Model = meta.ActualModelName - requestBody := ConvertRequest(request) - if requestBody == nil { - return nil, nil, errors.New("request body is nil") + cozeRequest := Request{ + Stream: request.Stream, + User: request.User, + BotID: strings.TrimPrefix(meta.ActualModelName, "bot-"), + } + for i, message := range request.Messages { + if i == len(request.Messages)-1 { + cozeRequest.Query = message.StringContent() + continue + } + cozeMessage := Message{ + Role: message.Role, + Content: message.StringContent(), + } + cozeRequest.ChatHistory = append(cozeRequest.ChatHistory, cozeMessage) } - data, err := json.Marshal(requestBody) + data, err := json.Marshal(cozeRequest) if err != nil { return nil, nil, err } diff --git a/service/aiproxy/relay/adaptor/coze/main.go b/service/aiproxy/relay/adaptor/coze/main.go index c0108e57b62..d178f7ec0e8 100644 --- a/service/aiproxy/relay/adaptor/coze/main.go +++ b/service/aiproxy/relay/adaptor/coze/main.go @@ -3,7 +3,6 @@ package coze import ( "bufio" "net/http" - "strings" "time" "github.com/gin-gonic/gin" @@ -36,26 +35,6 @@ func stopReasonCoze2OpenAI(reason *string) string { } } -func ConvertRequest(textRequest *model.GeneralOpenAIRequest) *Request { - cozeRequest := Request{ - Stream: textRequest.Stream, - User: textRequest.User, - BotID: strings.TrimPrefix(textRequest.Model, "bot-"), - } - for i, message := range textRequest.Messages { - if i == len(textRequest.Messages)-1 { - cozeRequest.Query = message.StringContent() - continue - } - cozeMessage := Message{ - Role: message.Role, - Content: message.StringContent(), - } - cozeRequest.ChatHistory = append(cozeRequest.ChatHistory, cozeMessage) - } - return &cozeRequest -} - func StreamResponseCoze2OpenAI(cozeResponse *StreamResponse) (*openai.ChatCompletionsStreamResponse, *Response) { var response *Response var stopReason string diff --git a/service/aiproxy/relay/adaptor/vertexai/adaptor.go b/service/aiproxy/relay/adaptor/vertexai/adaptor.go index fa8ab21ce1f..2abb205d8f6 100644 --- a/service/aiproxy/relay/adaptor/vertexai/adaptor.go +++ b/service/aiproxy/relay/adaptor/vertexai/adaptor.go @@ -23,6 +23,29 @@ const channelName = "vertexai" type Adaptor struct{} +type VertexAIConfig struct { + Region string + ProjectID string + ADCJSON string +} + +// region|projectID|adcJSON +func getConfigFromKey(key string) (VertexAIConfig, error) { + region, after, ok := strings.Cut(key, "|") + if !ok { + return VertexAIConfig{}, fmt.Errorf("invalid key format") + } + projectID, adcJSON, ok := strings.Cut(after, "|") + if !ok { + return VertexAIConfig{}, fmt.Errorf("invalid key format") + } + return VertexAIConfig{ + Region: region, + ProjectID: projectID, + ADCJSON: adcJSON, + }, nil +} + func (a *Adaptor) ConvertRequest(meta *meta.Meta, request *http.Request) (http.Header, io.Reader, error) { adaptor := GetAdaptor(meta.ActualModelName) if adaptor == nil { @@ -64,28 +87,37 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { } } + config, err := getConfigFromKey(meta.Channel.Key) + if err != nil { + return "", err + } + if meta.Channel.BaseURL != "" { return fmt.Sprintf( "%s/v1/projects/%s/locations/%s/publishers/google/models/%s:%s", meta.Channel.BaseURL, - meta.Channel.Config.VertexAIProjectID, - meta.Channel.Config.Region, + config.ProjectID, + config.Region, meta.ActualModelName, suffix, ), nil } return fmt.Sprintf( "https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:%s", - meta.Channel.Config.Region, - meta.Channel.Config.VertexAIProjectID, - meta.Channel.Config.Region, + config.Region, + config.ProjectID, + config.Region, meta.ActualModelName, suffix, ), nil } func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error { - token, err := getToken(context.Background(), meta.Channel.ID, meta.Channel.Config.VertexAIADC) + config, err := getConfigFromKey(meta.Channel.Key) + if err != nil { + return err + } + token, err := getToken(context.Background(), meta.Channel.ID, config.ADCJSON) if err != nil { return err } diff --git a/service/aiproxy/relay/meta/meta.go b/service/aiproxy/relay/meta/meta.go index f5159df7474..17e893bca4c 100644 --- a/service/aiproxy/relay/meta/meta.go +++ b/service/aiproxy/relay/meta/meta.go @@ -4,14 +4,10 @@ import ( "fmt" "time" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" "github.com/labring/sealos/service/aiproxy/model" ) type ChannelMeta struct { - Config model.ChannelConfig Name string BaseURL string Key string @@ -85,7 +81,6 @@ func NewMeta(channel *model.Channel, mode int, modelName string, opts ...Option) func (m *Meta) Reset(channel *model.Channel) { m.Channel = &ChannelMeta{ - Config: channel.Config, Name: channel.Name, BaseURL: channel.BaseURL, Key: channel.Key, @@ -137,18 +132,6 @@ func (m *Meta) GetBool(key string) bool { return false } -func (m *Meta) AwsClient() *bedrockruntime.Client { - if v, ok := m.Get("awsClient"); ok { - return v.(*bedrockruntime.Client) - } - awsClient := bedrockruntime.New(bedrockruntime.Options{ - Region: m.Channel.Config.Region, - Credentials: aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(m.Channel.Config.AK, m.Channel.Config.SK, "")), - }) - m.Set("awsClient", awsClient) - return awsClient -} - //nolint:unparam func GetMappedModelName(modelName string, mapping map[string]string) (string, bool) { if len(modelName) == 0 { From ff6480f4b5b2fe5fe5c856f91a8af97215fadbbc Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 24 Dec 2024 13:24:27 +0800 Subject: [PATCH 023/167] feat: key validate --- service/aiproxy/controller/channel-billing.go | 2 +- service/aiproxy/controller/channel.go | 43 ++++++++++++--- service/aiproxy/relay/adaptor/aws/key.go | 16 ++++++ .../relay/adaptor/aws/utils/adaptor.go | 4 +- .../aiproxy/relay/adaptor/azure/constants.go | 55 ++++++++++--------- service/aiproxy/relay/adaptor/azure/key.go | 29 ++++++++++ service/aiproxy/relay/adaptor/coze/adaptor.go | 25 +++------ service/aiproxy/relay/adaptor/coze/key.go | 26 +++++++++ .../aiproxy/relay/adaptor/deepseek/balance.go | 2 +- .../aiproxy/relay/adaptor/doubaoaudio/key.go | 24 ++++++++ .../aiproxy/relay/adaptor/doubaoaudio/main.go | 11 ---- service/aiproxy/relay/adaptor/interface.go | 6 +- .../aiproxy/relay/adaptor/minimax/adaptor.go | 31 ++++------- service/aiproxy/relay/adaptor/minimax/key.go | 26 +++++++++ .../aiproxy/relay/adaptor/moonshot/balance.go | 2 +- .../aiproxy/relay/adaptor/openai/balance.go | 2 +- .../relay/adaptor/siliconflow/balance.go | 2 +- .../aiproxy/relay/adaptor/vertexai/adaptor.go | 19 +------ service/aiproxy/relay/adaptor/vertexai/key.go | 35 ++++++++++++ service/aiproxy/relay/adaptor/xunfei/key.go | 17 ++++++ service/aiproxy/relay/channeltype/define.go | 23 +++++++- 21 files changed, 291 insertions(+), 109 deletions(-) create mode 100644 service/aiproxy/relay/adaptor/aws/key.go create mode 100644 service/aiproxy/relay/adaptor/azure/key.go create mode 100644 service/aiproxy/relay/adaptor/coze/key.go create mode 100644 service/aiproxy/relay/adaptor/doubaoaudio/key.go create mode 100644 service/aiproxy/relay/adaptor/minimax/key.go create mode 100644 service/aiproxy/relay/adaptor/vertexai/key.go create mode 100644 service/aiproxy/relay/adaptor/xunfei/key.go diff --git a/service/aiproxy/controller/channel-billing.go b/service/aiproxy/controller/channel-billing.go index 8ec189e05a9..4124642655b 100644 --- a/service/aiproxy/controller/channel-billing.go +++ b/service/aiproxy/controller/channel-billing.go @@ -25,7 +25,7 @@ func updateChannelBalance(channel *model.Channel) (float64, error) { if !ok { return 0, fmt.Errorf("invalid channel type: %d", channel.Type) } - if getBalance, ok := adaptorI.(adaptor.GetBalance); ok { + if getBalance, ok := adaptorI.(adaptor.Balancer); ok { balance, err := getBalance.GetBalance(channel) if err != nil { return 0, err diff --git a/service/aiproxy/controller/channel.go b/service/aiproxy/controller/channel.go index 9a4a004447c..e62339e4ea2 100644 --- a/service/aiproxy/controller/channel.go +++ b/service/aiproxy/controller/channel.go @@ -1,6 +1,8 @@ package controller import ( + "errors" + "fmt" "maps" "net/http" "slices" @@ -66,7 +68,12 @@ func AddChannels(c *gin.Context) { } _channels := make([]*model.Channel, 0, len(channels)) for _, channel := range channels { - _channels = append(_channels, channel.ToChannels()...) + channels, err := channel.ToChannels() + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, err.Error()) + return + } + _channels = append(_channels, channels...) } err = model.BatchInsertChannels(_channels) if err != nil { @@ -133,7 +140,15 @@ type AddChannelRequest struct { Status int `json:"status"` } -func (r *AddChannelRequest) ToChannel() *model.Channel { +func (r *AddChannelRequest) ToChannel() (*model.Channel, error) { + channelType, ok := channeltype.GetAdaptorKeyValidator(r.Type) + if !ok { + return nil, errors.New("invalid channel type") + } + err := channelType.ValidateKey(r.Key) + if err != nil { + return nil, fmt.Errorf("%s [%s(%d)] invalid key: %w", r.Name, channeltype.ChannelNames[r.Type], r.Type, err) + } return &model.Channel{ Type: r.Type, Name: r.Name, @@ -143,21 +158,24 @@ func (r *AddChannelRequest) ToChannel() *model.Channel { ModelMapping: maps.Clone(r.ModelMapping), Priority: r.Priority, Status: r.Status, - } + }, nil } -func (r *AddChannelRequest) ToChannels() []*model.Channel { +func (r *AddChannelRequest) ToChannels() ([]*model.Channel, error) { keys := strings.Split(r.Key, "\n") channels := make([]*model.Channel, 0, len(keys)) for _, key := range keys { if key == "" { continue } - c := r.ToChannel() + c, err := r.ToChannel() + if err != nil { + return nil, err + } c.Key = key channels = append(channels, c) } - return channels + return channels, nil } func AddChannel(c *gin.Context) { @@ -167,7 +185,12 @@ func AddChannel(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - err = model.BatchInsertChannels(channel.ToChannels()) + channels, err := channel.ToChannels() + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, err.Error()) + return + } + err = model.BatchInsertChannels(channels) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return @@ -217,7 +240,11 @@ func UpdateChannel(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - ch := channel.ToChannel() + ch, err := channel.ToChannel() + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, err.Error()) + return + } ch.ID = id err = model.UpdateChannel(ch) if err != nil { diff --git a/service/aiproxy/relay/adaptor/aws/key.go b/service/aiproxy/relay/adaptor/aws/key.go new file mode 100644 index 00000000000..e60517beaa0 --- /dev/null +++ b/service/aiproxy/relay/adaptor/aws/key.go @@ -0,0 +1,16 @@ +package aws + +import ( + "github.com/labring/sealos/service/aiproxy/relay/adaptor" + "github.com/labring/sealos/service/aiproxy/relay/adaptor/aws/utils" +) + +var _ adaptor.KeyValidator = (*Adaptor)(nil) + +func (a *Adaptor) ValidateKey(key string) error { + _, err := utils.GetAwsConfigFromKey(key) + if err != nil { + return err + } + return nil +} diff --git a/service/aiproxy/relay/adaptor/aws/utils/adaptor.go b/service/aiproxy/relay/adaptor/aws/utils/adaptor.go index bb69fa29aa8..585d0acdc36 100644 --- a/service/aiproxy/relay/adaptor/aws/utils/adaptor.go +++ b/service/aiproxy/relay/adaptor/aws/utils/adaptor.go @@ -1,7 +1,7 @@ package utils import ( - "fmt" + "errors" "io" "net/http" "strings" @@ -28,7 +28,7 @@ type AwsConfig struct { func GetAwsConfigFromKey(key string) (*AwsConfig, error) { split := strings.Split(key, "|") if len(split) != 3 { - return nil, fmt.Errorf("invalid key format") + return nil, errors.New("invalid key format") } return &AwsConfig{ Region: split[0], diff --git a/service/aiproxy/relay/adaptor/azure/constants.go b/service/aiproxy/relay/adaptor/azure/constants.go index f0cc9694420..19bc3de87bf 100644 --- a/service/aiproxy/relay/adaptor/azure/constants.go +++ b/service/aiproxy/relay/adaptor/azure/constants.go @@ -8,36 +8,37 @@ import ( "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/meta" + "github.com/labring/sealos/service/aiproxy/relay/relaymode" ) type Adaptor struct { openai.Adaptor } -// func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { -// switch meta.Mode { -// case relaymode.ImagesGenerations: -// // https://learn.microsoft.com/en-us/azure/ai-services/openai/dall-e-quickstart?tabs=dalle3%2Ccommand-line&pivots=rest-api -// // https://{resource_name}.openai.azure.com/openai/deployments/dall-e-3/images/generations?api-version=2024-03-01-preview -// return fmt.Sprintf("%s/openai/deployments/%s/images/generations?api-version=%s", meta.Channel.BaseURL, meta.ActualModelName, meta.Channel.Config.APIVersion), nil -// case relaymode.AudioTranscription: -// // https://learn.microsoft.com/en-us/azure/ai-services/openai/whisper-quickstart?tabs=command-line#rest-api -// return fmt.Sprintf("%s/openai/deployments/%s/audio/transcriptions?api-version=%s", meta.Channel.BaseURL, meta.ActualModelName, meta.Channel.Config.APIVersion), nil -// case relaymode.AudioSpeech: -// // https://learn.microsoft.com/en-us/azure/ai-services/openai/text-to-speech-quickstart?tabs=command-line#rest-api -// return fmt.Sprintf("%s/openai/deployments/%s/audio/speech?api-version=%s", meta.Channel.BaseURL, meta.ActualModelName, meta.Channel.Config.APIVersion), nil -// } - -// // https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api -// requestURL := strings.Split(meta.RequestURLPath, "?")[0] -// requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, meta.Channel.Config.APIVersion) -// task := strings.TrimPrefix(requestURL, "/v1/") -// model := strings.ReplaceAll(meta.ActualModelName, ".", "") -// // https://github.com/labring/sealos/service/aiproxy/issues/1191 -// // {your endpoint}/openai/deployments/{your azure_model}/chat/completions?api-version={api_version} -// requestURL = fmt.Sprintf("/openai/deployments/%s/%s", model, task) -// return GetFullRequestURL(meta.Channel.BaseURL, requestURL), nil -// } +func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { + _, apiVersion, err := getTokenAndAPIVersion(meta.Channel.Key) + if err != nil { + return "", err + } + model := strings.ReplaceAll(meta.ActualModelName, ".", "") + switch meta.Mode { + case relaymode.ImagesGenerations: + // https://learn.microsoft.com/en-us/azure/ai-services/openai/dall-e-quickstart?tabs=dalle3%2Ccommand-line&pivots=rest-api + // https://{resource_name}.openai.azure.com/openai/deployments/dall-e-3/images/generations?api-version=2024-03-01-preview + return fmt.Sprintf("%s/openai/deployments/%s/images/generations?api-version=%s", meta.Channel.BaseURL, model, apiVersion), nil + case relaymode.AudioTranscription: + // https://learn.microsoft.com/en-us/azure/ai-services/openai/whisper-quickstart?tabs=command-line#rest-api + return fmt.Sprintf("%s/openai/deployments/%s/audio/transcriptions?api-version=%s", meta.Channel.BaseURL, model, apiVersion), nil + case relaymode.AudioSpeech: + // https://learn.microsoft.com/en-us/azure/ai-services/openai/text-to-speech-quickstart?tabs=command-line#rest-api + return fmt.Sprintf("%s/openai/deployments/%s/audio/speech?api-version=%s", meta.Channel.BaseURL, model, apiVersion), nil + case relaymode.ChatCompletions: + // https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api + return fmt.Sprintf("%s/openai/deployments/%s/chat/completions?api-version=%s", meta.Channel.BaseURL, model, apiVersion), nil + default: + return "", fmt.Errorf("unsupported mode: %d", meta.Mode) + } +} func GetFullRequestURL(baseURL string, requestURL string) string { fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL) @@ -49,7 +50,11 @@ func GetFullRequestURL(baseURL string, requestURL string) string { } func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error { - req.Header.Set("Api-Key", meta.Channel.Key) + token, _, err := getTokenAndAPIVersion(meta.Channel.Key) + if err != nil { + return err + } + req.Header.Set("Api-Key", token) return nil } diff --git a/service/aiproxy/relay/adaptor/azure/key.go b/service/aiproxy/relay/adaptor/azure/key.go new file mode 100644 index 00000000000..8de63b6fa07 --- /dev/null +++ b/service/aiproxy/relay/adaptor/azure/key.go @@ -0,0 +1,29 @@ +package azure + +import ( + "errors" + "strings" + + "github.com/labring/sealos/service/aiproxy/relay/adaptor" +) + +var _ adaptor.KeyValidator = (*Adaptor)(nil) + +func (a *Adaptor) ValidateKey(key string) error { + _, _, err := getTokenAndAPIVersion(key) + if err != nil { + return err + } + return nil +} + +func getTokenAndAPIVersion(key string) (string, string, error) { + split := strings.Split(key, "|") + if len(split) == 1 { + return key, "", nil + } + if len(split) != 2 { + return "", "", errors.New("invalid key format") + } + return split[0], split[1], nil +} diff --git a/service/aiproxy/relay/adaptor/coze/adaptor.go b/service/aiproxy/relay/adaptor/coze/adaptor.go index 0cd4d8cfb18..5e988b0eaeb 100644 --- a/service/aiproxy/relay/adaptor/coze/adaptor.go +++ b/service/aiproxy/relay/adaptor/coze/adaptor.go @@ -29,16 +29,11 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { return u + "/open_api/v2/chat", nil } -func getTokenAndUserID(key string) (string, string) { - split := strings.Split(key, "|") - if len(split) != 2 { - return "", "" - } - return split[0], split[1] -} - func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error { - token, _ := getTokenAndUserID(meta.Channel.Key) + token, _, err := getTokenAndUserID(meta.Channel.Key) + if err != nil { + return err + } req.Header.Set("Authorization", "Bearer "+token) return nil } @@ -51,7 +46,10 @@ func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Heade if err != nil { return nil, nil, err } - _, userID := getTokenAndUserID(meta.Channel.Key) + _, userID, err := getTokenAndUserID(meta.Channel.Key) + if err != nil { + return nil, nil, err + } request.User = userID request.Model = meta.ActualModelName cozeRequest := Request{ @@ -77,13 +75,6 @@ func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Heade return nil, bytes.NewReader(data), nil } -func (a *Adaptor) ConvertImageRequest(request *relaymodel.ImageRequest) (any, error) { - if request == nil { - return nil, errors.New("request is nil") - } - return request, nil -} - func (a *Adaptor) DoRequest(_ *meta.Meta, _ *gin.Context, req *http.Request) (*http.Response, error) { return utils.DoRequest(req) } diff --git a/service/aiproxy/relay/adaptor/coze/key.go b/service/aiproxy/relay/adaptor/coze/key.go new file mode 100644 index 00000000000..1e7aef59a8d --- /dev/null +++ b/service/aiproxy/relay/adaptor/coze/key.go @@ -0,0 +1,26 @@ +package coze + +import ( + "errors" + "strings" + + "github.com/labring/sealos/service/aiproxy/relay/adaptor" +) + +var _ adaptor.KeyValidator = (*Adaptor)(nil) + +func (a *Adaptor) ValidateKey(key string) error { + _, _, err := getTokenAndUserID(key) + if err != nil { + return err + } + return nil +} + +func getTokenAndUserID(key string) (string, string, error) { + split := strings.Split(key, "|") + if len(split) != 2 { + return "", "", errors.New("invalid key format") + } + return split[0], split[1], nil +} diff --git a/service/aiproxy/relay/adaptor/deepseek/balance.go b/service/aiproxy/relay/adaptor/deepseek/balance.go index 34ca472ac98..9ba1a95413e 100644 --- a/service/aiproxy/relay/adaptor/deepseek/balance.go +++ b/service/aiproxy/relay/adaptor/deepseek/balance.go @@ -12,7 +12,7 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/adaptor" ) -var _ adaptor.GetBalance = (*Adaptor)(nil) +var _ adaptor.Balancer = (*Adaptor)(nil) func (a *Adaptor) GetBalance(channel *model.Channel) (float64, error) { u := channel.BaseURL diff --git a/service/aiproxy/relay/adaptor/doubaoaudio/key.go b/service/aiproxy/relay/adaptor/doubaoaudio/key.go new file mode 100644 index 00000000000..e48cda17aff --- /dev/null +++ b/service/aiproxy/relay/adaptor/doubaoaudio/key.go @@ -0,0 +1,24 @@ +package doubaoaudio + +import ( + "errors" + "strings" + + "github.com/labring/sealos/service/aiproxy/relay/adaptor" +) + +var _ adaptor.KeyValidator = (*Adaptor)(nil) + +func (a *Adaptor) ValidateKey(key string) error { + _, _, err := getAppIDAndToken(key) + return err +} + +// key格式: app_id|app_token +func getAppIDAndToken(key string) (string, string, error) { + parts := strings.Split(key, "|") + if len(parts) != 2 { + return "", "", errors.New("invalid key format") + } + return parts[0], parts[1], nil +} diff --git a/service/aiproxy/relay/adaptor/doubaoaudio/main.go b/service/aiproxy/relay/adaptor/doubaoaudio/main.go index e3855af9d26..42aac95d93d 100644 --- a/service/aiproxy/relay/adaptor/doubaoaudio/main.go +++ b/service/aiproxy/relay/adaptor/doubaoaudio/main.go @@ -1,11 +1,9 @@ package doubaoaudio import ( - "errors" "fmt" "io" "net/http" - "strings" "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/model" @@ -49,15 +47,6 @@ func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Heade } } -// key格式: app_id|app_token -func getAppIDAndToken(key string) (string, string, error) { - parts := strings.Split(key, "|") - if len(parts) != 2 { - return "", "", errors.New("invalid key format") - } - return parts[0], parts[1], nil -} - func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error { switch meta.Mode { case relaymode.AudioSpeech: diff --git a/service/aiproxy/relay/adaptor/interface.go b/service/aiproxy/relay/adaptor/interface.go index f9e515ec59d..373a527767d 100644 --- a/service/aiproxy/relay/adaptor/interface.go +++ b/service/aiproxy/relay/adaptor/interface.go @@ -20,6 +20,10 @@ type Adaptor interface { GetModelList() []*model.ModelConfig } -type GetBalance interface { +type Balancer interface { GetBalance(channel *model.Channel) (float64, error) } + +type KeyValidator interface { + ValidateKey(key string) error +} diff --git a/service/aiproxy/relay/adaptor/minimax/adaptor.go b/service/aiproxy/relay/adaptor/minimax/adaptor.go index 954e5139dc9..86bb91562f1 100644 --- a/service/aiproxy/relay/adaptor/minimax/adaptor.go +++ b/service/aiproxy/relay/adaptor/minimax/adaptor.go @@ -4,7 +4,6 @@ import ( "fmt" "io" "net/http" - "strings" "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/model" @@ -20,28 +19,16 @@ type Adaptor struct { const baseURL = "https://api.minimax.chat" -func GetAPIKey(key string) string { - keys := strings.Split(key, "|") - if len(keys) > 0 { - return keys[0] - } - return "" -} - -func GetGroupID(key string) string { - keys := strings.Split(key, "|") - if len(keys) > 1 { - return keys[1] - } - return "" -} - func (a *Adaptor) GetModelList() []*model.ModelConfig { return ModelList } func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error { - req.Header.Set("Authorization", "Bearer "+GetAPIKey(meta.Channel.Key)) + apiKey, _, err := GetAPIKeyAndGroupID(meta.Channel.Key) + if err != nil { + return err + } + req.Header.Set("Authorization", "Bearer "+apiKey) return nil } @@ -49,13 +36,17 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { if meta.Channel.BaseURL == "" { meta.Channel.BaseURL = baseURL } + _, groupID, err := GetAPIKeyAndGroupID(meta.Channel.Key) + if err != nil { + return "", err + } switch meta.Mode { case relaymode.ChatCompletions: return meta.Channel.BaseURL + "/v1/text/chatcompletion_v2", nil case relaymode.Embeddings: - return fmt.Sprintf("%s/v1/embeddings?GroupId=%s", meta.Channel.BaseURL, GetGroupID(meta.Channel.Key)), nil + return fmt.Sprintf("%s/v1/embeddings?GroupId=%s", meta.Channel.BaseURL, groupID), nil case relaymode.AudioSpeech: - return fmt.Sprintf("%s/v1/t2a_v2?GroupId=%s", meta.Channel.BaseURL, GetGroupID(meta.Channel.Key)), nil + return fmt.Sprintf("%s/v1/t2a_v2?GroupId=%s", meta.Channel.BaseURL, groupID), nil default: return a.Adaptor.GetRequestURL(meta) } diff --git a/service/aiproxy/relay/adaptor/minimax/key.go b/service/aiproxy/relay/adaptor/minimax/key.go new file mode 100644 index 00000000000..59e77483acb --- /dev/null +++ b/service/aiproxy/relay/adaptor/minimax/key.go @@ -0,0 +1,26 @@ +package minimax + +import ( + "errors" + "strings" + + "github.com/labring/sealos/service/aiproxy/relay/adaptor" +) + +var _ adaptor.KeyValidator = (*Adaptor)(nil) + +func (a *Adaptor) ValidateKey(key string) error { + _, _, err := GetAPIKeyAndGroupID(key) + if err != nil { + return err + } + return nil +} + +func GetAPIKeyAndGroupID(key string) (string, string, error) { + keys := strings.Split(key, "|") + if len(keys) != 2 { + return "", "", errors.New("invalid key format") + } + return keys[0], keys[1], nil +} diff --git a/service/aiproxy/relay/adaptor/moonshot/balance.go b/service/aiproxy/relay/adaptor/moonshot/balance.go index 8e901a52976..896625ff918 100644 --- a/service/aiproxy/relay/adaptor/moonshot/balance.go +++ b/service/aiproxy/relay/adaptor/moonshot/balance.go @@ -10,7 +10,7 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/adaptor" ) -var _ adaptor.GetBalance = (*Adaptor)(nil) +var _ adaptor.Balancer = (*Adaptor)(nil) func (a *Adaptor) GetBalance(channel *model.Channel) (float64, error) { u := channel.BaseURL diff --git a/service/aiproxy/relay/adaptor/openai/balance.go b/service/aiproxy/relay/adaptor/openai/balance.go index f5bb886fa97..b38daf190e9 100644 --- a/service/aiproxy/relay/adaptor/openai/balance.go +++ b/service/aiproxy/relay/adaptor/openai/balance.go @@ -10,7 +10,7 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/adaptor" ) -var _ adaptor.GetBalance = (*Adaptor)(nil) +var _ adaptor.Balancer = (*Adaptor)(nil) func (a *Adaptor) GetBalance(channel *model.Channel) (float64, error) { return GetBalance(channel) diff --git a/service/aiproxy/relay/adaptor/siliconflow/balance.go b/service/aiproxy/relay/adaptor/siliconflow/balance.go index 0fd310964c4..2fcbeb4c78d 100644 --- a/service/aiproxy/relay/adaptor/siliconflow/balance.go +++ b/service/aiproxy/relay/adaptor/siliconflow/balance.go @@ -11,7 +11,7 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/adaptor" ) -var _ adaptor.GetBalance = (*Adaptor)(nil) +var _ adaptor.Balancer = (*Adaptor)(nil) func (a *Adaptor) GetBalance(channel *model.Channel) (float64, error) { u := channel.BaseURL diff --git a/service/aiproxy/relay/adaptor/vertexai/adaptor.go b/service/aiproxy/relay/adaptor/vertexai/adaptor.go index 2abb205d8f6..f3516ac79ae 100644 --- a/service/aiproxy/relay/adaptor/vertexai/adaptor.go +++ b/service/aiproxy/relay/adaptor/vertexai/adaptor.go @@ -23,29 +23,12 @@ const channelName = "vertexai" type Adaptor struct{} -type VertexAIConfig struct { +type Config struct { Region string ProjectID string ADCJSON string } -// region|projectID|adcJSON -func getConfigFromKey(key string) (VertexAIConfig, error) { - region, after, ok := strings.Cut(key, "|") - if !ok { - return VertexAIConfig{}, fmt.Errorf("invalid key format") - } - projectID, adcJSON, ok := strings.Cut(after, "|") - if !ok { - return VertexAIConfig{}, fmt.Errorf("invalid key format") - } - return VertexAIConfig{ - Region: region, - ProjectID: projectID, - ADCJSON: adcJSON, - }, nil -} - func (a *Adaptor) ConvertRequest(meta *meta.Meta, request *http.Request) (http.Header, io.Reader, error) { adaptor := GetAdaptor(meta.ActualModelName) if adaptor == nil { diff --git a/service/aiproxy/relay/adaptor/vertexai/key.go b/service/aiproxy/relay/adaptor/vertexai/key.go new file mode 100644 index 00000000000..17f2324b042 --- /dev/null +++ b/service/aiproxy/relay/adaptor/vertexai/key.go @@ -0,0 +1,35 @@ +package vertexai + +import ( + "errors" + "strings" + + "github.com/labring/sealos/service/aiproxy/relay/adaptor" +) + +var _ adaptor.KeyValidator = (*Adaptor)(nil) + +func (a *Adaptor) ValidateKey(key string) error { + _, err := getConfigFromKey(key) + if err != nil { + return err + } + return nil +} + +// region|projectID|adcJSON +func getConfigFromKey(key string) (Config, error) { + region, after, ok := strings.Cut(key, "|") + if !ok { + return Config{}, errors.New("invalid key format") + } + projectID, adcJSON, ok := strings.Cut(after, "|") + if !ok { + return Config{}, errors.New("invalid key format") + } + return Config{ + Region: region, + ProjectID: projectID, + ADCJSON: adcJSON, + }, nil +} diff --git a/service/aiproxy/relay/adaptor/xunfei/key.go b/service/aiproxy/relay/adaptor/xunfei/key.go new file mode 100644 index 00000000000..fba354f99c9 --- /dev/null +++ b/service/aiproxy/relay/adaptor/xunfei/key.go @@ -0,0 +1,17 @@ +package xunfei + +import ( + "errors" + "strings" + + "github.com/labring/sealos/service/aiproxy/relay/adaptor" +) + +var _ adaptor.KeyValidator = (*Adaptor)(nil) + +func (a *Adaptor) ValidateKey(key string) error { + if strings.Contains(key, ":") { + return nil + } + return errors.New("invalid key format") +} diff --git a/service/aiproxy/relay/channeltype/define.go b/service/aiproxy/relay/channeltype/define.go index b1b761bf1f5..7c4c221ba34 100644 --- a/service/aiproxy/relay/channeltype/define.go +++ b/service/aiproxy/relay/channeltype/define.go @@ -6,6 +6,7 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/adaptor/ali" "github.com/labring/sealos/service/aiproxy/relay/adaptor/anthropic" "github.com/labring/sealos/service/aiproxy/relay/adaptor/aws" + "github.com/labring/sealos/service/aiproxy/relay/adaptor/azure" "github.com/labring/sealos/service/aiproxy/relay/adaptor/baichuan" "github.com/labring/sealos/service/aiproxy/relay/adaptor/baidu" "github.com/labring/sealos/service/aiproxy/relay/adaptor/baiduv2" @@ -33,8 +34,8 @@ import ( ) var ChannelAdaptor = map[int]adaptor.Adaptor{ - 1: &openai.Adaptor{}, - // 3: &azure.Adaptor{}, + 1: &openai.Adaptor{}, + 3: &azure.Adaptor{}, 13: &baiduv2.Adaptor{}, 14: &anthropic.Adaptor{}, 15: &baidu.Adaptor{}, @@ -69,6 +70,24 @@ func GetAdaptor(channel int) (adaptor.Adaptor, bool) { return a, ok } +func GetAdaptorBalancer(channel int) (adaptor.Balancer, bool) { + a, ok := GetAdaptor(channel) + if !ok { + return nil, false + } + balancer, ok := a.(adaptor.Balancer) + return balancer, ok +} + +func GetAdaptorKeyValidator(channel int) (adaptor.KeyValidator, bool) { + a, ok := GetAdaptor(channel) + if !ok { + return nil, false + } + validator, ok := a.(adaptor.KeyValidator) + return validator, ok +} + var ChannelNames = map[int]string{} func init() { From 8c6e2bd80eadeb63dac2095290f16eaaba0d876f Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 24 Dec 2024 14:21:45 +0800 Subject: [PATCH 024/167] feat: add model error auto ban optioon --- service/aiproxy/common/config/config.go | 10 ++++++++++ service/aiproxy/model/option.go | 3 +++ service/aiproxy/monitor/model.go | 10 +++++----- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/service/aiproxy/common/config/config.go b/service/aiproxy/common/config/config.go index adb87d7ba89..cb32359c4e7 100644 --- a/service/aiproxy/common/config/config.go +++ b/service/aiproxy/common/config/config.go @@ -32,6 +32,8 @@ var ( var ( // 重试次数 retryTimes atomic.Int64 + // 是否开启模型错误率自动封禁 + enableModelErrorAutoBan atomic.Bool // 模型错误率自动封禁 modelErrorAutoBanRate = math.Float64bits(0.5) // 模型类型超时时间,单位秒 @@ -42,6 +44,14 @@ func GetRetryTimes() int64 { return retryTimes.Load() } +func GetEnableModelErrorAutoBan() bool { + return enableModelErrorAutoBan.Load() +} + +func SetEnableModelErrorAutoBan(enabled bool) { + enableModelErrorAutoBan.Store(enabled) +} + func GetModelErrorAutoBanRate() float64 { return math.Float64frombits(atomic.LoadUint64(&modelErrorAutoBanRate)) } diff --git a/service/aiproxy/model/option.go b/service/aiproxy/model/option.go index 9b9b4070fb7..6b0f81285ce 100644 --- a/service/aiproxy/model/option.go +++ b/service/aiproxy/model/option.go @@ -39,6 +39,7 @@ func InitOption2DB() error { OptionMap["BillingEnabled"] = strconv.FormatBool(config.GetBillingEnabled()) OptionMap["RetryTimes"] = strconv.FormatInt(config.GetRetryTimes(), 10) OptionMap["ModelErrorAutoBanRate"] = strconv.FormatFloat(config.GetModelErrorAutoBanRate(), 'f', -1, 64) + OptionMap["EnableModelErrorAutoBan"] = strconv.FormatBool(config.GetEnableModelErrorAutoBan()) timeoutWithModelTypeJSON, _ := json.Marshal(config.GetTimeoutWithModelType()) OptionMap["TimeoutWithModelType"] = conv.BytesToString(timeoutWithModelTypeJSON) OptionMap["GlobalApiRateLimitNum"] = strconv.FormatInt(config.GetGlobalAPIRateLimitNum(), 10) @@ -227,6 +228,8 @@ func updateOption(key string, value string, isInit bool) (err error) { return err } config.SetRetryTimes(retryTimes) + case "EnableModelErrorAutoBan": + config.SetEnableModelErrorAutoBan(isTrue(value)) case "ModelErrorAutoBanRate": modelErrorAutoBanRate, err := strconv.ParseFloat(value, 64) if err != nil { diff --git a/service/aiproxy/monitor/model.go b/service/aiproxy/monitor/model.go index fc14b3f4fb9..9e5a06b71bb 100644 --- a/service/aiproxy/monitor/model.go +++ b/service/aiproxy/monitor/model.go @@ -59,7 +59,7 @@ var addRequestScript = redis.NewScript(` `) func AddRequest(ctx context.Context, model string, channelID int64, isError bool) error { - if !common.RedisEnabled { + if !common.RedisEnabled || !config.GetEnableModelErrorAutoBan() { return nil } errorFlag := 0 @@ -87,7 +87,7 @@ var getBannedChannelsScript = redis.NewScript(` `) func GetBannedChannels(ctx context.Context, model string) ([]int64, error) { - if !common.RedisEnabled { + if !common.RedisEnabled || !config.GetEnableModelErrorAutoBan() { return nil, nil } result, err := getBannedChannelsScript.Run(ctx, common.RDB, []string{model}).Int64Slice() @@ -110,7 +110,7 @@ var clearChannelModelErrorsScript = redis.NewScript(` `) func ClearChannelModelErrors(ctx context.Context, model string, channelID int) error { - if !common.RedisEnabled { + if !common.RedisEnabled || !config.GetEnableModelErrorAutoBan() { return nil } return clearChannelModelErrorsScript.Run(ctx, common.RDB, []string{model}, channelID).Err() @@ -131,14 +131,14 @@ var clearChannelAllModelErrorsScript = redis.NewScript(` `) func ClearChannelAllModelErrors(ctx context.Context, channelID int) error { - if !common.RedisEnabled { + if !common.RedisEnabled || !config.GetEnableModelErrorAutoBan() { return nil } return clearChannelAllModelErrorsScript.Run(ctx, common.RDB, []string{}, channelID).Err() } func GetAllBannedChannels(ctx context.Context) (map[string][]int64, error) { - if !common.RedisEnabled { + if !common.RedisEnabled || !config.GetEnableModelErrorAutoBan() { return nil, nil } From f173b128a85ff409de94d46d0347dff04b368427 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 24 Dec 2024 14:56:49 +0800 Subject: [PATCH 025/167] feat: gemini tool --- service/aiproxy/relay/adaptor/gemini/main.go | 97 ++++++++++++------- service/aiproxy/relay/adaptor/gemini/model.go | 19 +++- 2 files changed, 77 insertions(+), 39 deletions(-) diff --git a/service/aiproxy/relay/adaptor/gemini/main.go b/service/aiproxy/relay/adaptor/gemini/main.go index e4732a3430b..43ee9234e17 100644 --- a/service/aiproxy/relay/adaptor/gemini/main.go +++ b/service/aiproxy/relay/adaptor/gemini/main.go @@ -33,6 +33,12 @@ const ( VisionMaxImageNum = 16 ) +var toolChoiceTypeMap = map[string]string{ + "none": "NONE", + "auto": "AUTO", + "required": "ANY", +} + var mimeTypeMap = map[string]string{ "json_object": "application/json", "text": "text/plain", @@ -88,6 +94,31 @@ func buildTools(textRequest *model.GeneralOpenAIRequest) []ChatTools { return nil } +func buildToolConfig(textRequest *model.GeneralOpenAIRequest) *ToolConfig { + if textRequest.ToolChoice == nil { + return nil + } + toolConfig := ToolConfig{ + FunctionCallingConfig: FunctionCallingConfig{ + Mode: "auto", + }, + } + switch mode := textRequest.ToolChoice.(type) { + case string: + if toolChoiceType, ok := toolChoiceTypeMap[mode]; ok { + toolConfig.FunctionCallingConfig.Mode = toolChoiceType + } + case map[string]interface{}: + toolConfig.FunctionCallingConfig.Mode = "ANY" + if fn, ok := mode["function"].(map[string]interface{}); ok { + if fnName, ok := fn["name"].(string); ok { + toolConfig.FunctionCallingConfig.AllowedFunctionNames = []string{fnName} + } + } + } + return &toolConfig +} + func buildMessageParts(ctx context.Context, part model.MessageContent) ([]Part, error) { if part.Type == model.ContentTypeText { return []Part{{Text: part.Text}}, nil @@ -109,26 +140,18 @@ func buildMessageParts(ctx context.Context, part model.MessageContent) ([]Part, return nil, nil } -func buildContents(textRequest *model.GeneralOpenAIRequest, req *http.Request) ([]ChatContent, error) { - contents := make([]ChatContent, 0, len(textRequest.Messages)) - shouldAddDummyModelMessage := false +func buildContents(ctx context.Context, textRequest *model.GeneralOpenAIRequest) (*ChatContent, []*ChatContent, error) { + contents := make([]*ChatContent, 0, len(textRequest.Messages)) imageNum := 0 + var systemContent *ChatContent + for _, message := range textRequest.Messages { content := ChatContent{ Role: message.Role, Parts: make([]Part, 0), } - // Convert role names - switch content.Role { - case "assistant": - content.Role = "model" - case "system": - content.Role = "user" - shouldAddDummyModelMessage = true - } - // Process message content openaiContent := message.ParseContent() for _, part := range openaiContent { @@ -139,26 +162,25 @@ func buildContents(textRequest *model.GeneralOpenAIRequest, req *http.Request) ( } } - parts, err := buildMessageParts(req.Context(), part) + parts, err := buildMessageParts(ctx, part) if err != nil { - return nil, err + return nil, nil, err } content.Parts = append(content.Parts, parts...) } - contents = append(contents, content) - - // Add dummy model message after system message - if shouldAddDummyModelMessage { - contents = append(contents, ChatContent{ - Role: "model", - Parts: []Part{{Text: "Okay"}}, - }) - shouldAddDummyModelMessage = false + // Convert role names + switch content.Role { + case "assistant": + content.Role = "model" + case "system": + systemContent = &content + continue } + contents = append(contents, &content) } - return contents, nil + return systemContent, contents, nil } // Setting safety to the lowest possible values since Gemini is already powerless enough @@ -171,7 +193,7 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, textRequest.Model = meta.ActualModelName meta.Set("stream", textRequest.Stream) - contents, err := buildContents(textRequest, req) + systemContent, contents, err := buildContents(req.Context(), textRequest) if err != nil { return nil, nil, err } @@ -184,10 +206,12 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, // Build actual request geminiRequest := ChatRequest{ - Contents: contents, - SafetySettings: buildSafetySettings(), - GenerationConfig: buildGenerationConfig(textRequest), - Tools: buildTools(textRequest), + Contents: contents, + SystemInstruction: systemContent, + SafetySettings: buildSafetySettings(), + GenerationConfig: buildGenerationConfig(textRequest), + Tools: buildTools(textRequest), + ToolConfig: buildToolConfig(textRequest), } data, err := json.Marshal(geminiRequest) @@ -198,7 +222,7 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, return nil, bytes.NewReader(data), nil } -func CountTokens(ctx context.Context, meta *meta.Meta, chat []ChatContent) (int, error) { +func CountTokens(ctx context.Context, meta *meta.Meta, chat []*ChatContent) (int, error) { countReq := ChatRequest{ Contents: chat, } @@ -240,7 +264,10 @@ func (g *ChatResponse) GetResponseText() string { } builder := strings.Builder{} for _, candidate := range g.Candidates { - for _, part := range candidate.Content.Parts { + for i, part := range candidate.Content.Parts { + if i > 0 { + builder.WriteString("\n") + } builder.WriteString(part.Text) } } @@ -365,7 +392,7 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model log := middleware.GetLogger(c) responseText := strings.Builder{} - respContent := []ChatContent{} + respContent := []*ChatContent{} scanner := bufio.NewScanner(resp.Body) scanner.Split(bufio.ScanLines) @@ -389,7 +416,7 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model continue } for _, candidate := range geminiResponse.Candidates { - respContent = append(respContent, candidate.Content) + respContent = append(respContent, &candidate.Content) } response := streamResponseGeminiChat2OpenAI(meta, &geminiResponse) if response == nil { @@ -440,9 +467,9 @@ func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage } fullTextResponse := responseGeminiChat2OpenAI(meta, &geminiResponse) fullTextResponse.Model = meta.OriginModelName - respContent := []ChatContent{} + respContent := []*ChatContent{} for _, candidate := range geminiResponse.Candidates { - respContent = append(respContent, candidate.Content) + respContent = append(respContent, &candidate.Content) } usage := model.Usage{ diff --git a/service/aiproxy/relay/adaptor/gemini/model.go b/service/aiproxy/relay/adaptor/gemini/model.go index 6b60976fa33..9dec4502b75 100644 --- a/service/aiproxy/relay/adaptor/gemini/model.go +++ b/service/aiproxy/relay/adaptor/gemini/model.go @@ -1,10 +1,12 @@ package gemini type ChatRequest struct { - GenerationConfig *ChatGenerationConfig `json:"generation_config,omitempty"` - Contents []ChatContent `json:"contents"` - SafetySettings []ChatSafetySettings `json:"safety_settings,omitempty"` - Tools []ChatTools `json:"tools,omitempty"` + Contents []*ChatContent `json:"contents"` + SystemInstruction *ChatContent `json:"system_instruction,omitempty"` + SafetySettings []ChatSafetySettings `json:"safety_settings,omitempty"` + GenerationConfig *ChatGenerationConfig `json:"generation_config,omitempty"` + Tools []ChatTools `json:"tools,omitempty"` + ToolConfig *ToolConfig `json:"tool_config,omitempty"` } type EmbeddingRequest struct { @@ -74,3 +76,12 @@ type ChatGenerationConfig struct { MaxOutputTokens int `json:"maxOutputTokens,omitempty"` CandidateCount int `json:"candidateCount,omitempty"` } + +type FunctionCallingConfig struct { + Mode string `json:"mode,omitempty"` + AllowedFunctionNames []string `json:"allowed_function_names,omitempty"` +} + +type ToolConfig struct { + FunctionCallingConfig FunctionCallingConfig `json:"function_calling_config"` +} From 28e2fb4b5512355049673e7eefe190b771189945 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 24 Dec 2024 15:13:57 +0800 Subject: [PATCH 026/167] feat: gemini openai sdk --- service/aiproxy/controller/channel.go | 14 +++++---- .../relay/adaptor/geminiopenai/adaptor.go | 30 +++++++++++++++++++ service/aiproxy/relay/channeltype/define.go | 20 ++----------- 3 files changed, 40 insertions(+), 24 deletions(-) create mode 100644 service/aiproxy/relay/adaptor/geminiopenai/adaptor.go diff --git a/service/aiproxy/controller/channel.go b/service/aiproxy/controller/channel.go index e62339e4ea2..644e54551bf 100644 --- a/service/aiproxy/controller/channel.go +++ b/service/aiproxy/controller/channel.go @@ -1,7 +1,6 @@ package controller import ( - "errors" "fmt" "maps" "net/http" @@ -13,6 +12,7 @@ import ( "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/monitor" + "github.com/labring/sealos/service/aiproxy/relay/adaptor" "github.com/labring/sealos/service/aiproxy/relay/channeltype" log "github.com/sirupsen/logrus" ) @@ -141,13 +141,15 @@ type AddChannelRequest struct { } func (r *AddChannelRequest) ToChannel() (*model.Channel, error) { - channelType, ok := channeltype.GetAdaptorKeyValidator(r.Type) + channelType, ok := channeltype.GetAdaptor(r.Type) if !ok { - return nil, errors.New("invalid channel type") + return nil, fmt.Errorf("invalid channel type: %d", r.Type) } - err := channelType.ValidateKey(r.Key) - if err != nil { - return nil, fmt.Errorf("%s [%s(%d)] invalid key: %w", r.Name, channeltype.ChannelNames[r.Type], r.Type, err) + if validator, ok := channelType.(adaptor.KeyValidator); ok { + err := validator.ValidateKey(r.Key) + if err != nil { + return nil, fmt.Errorf("%s [%s(%d)] invalid key: %w", r.Name, channeltype.ChannelNames[r.Type], r.Type, err) + } } return &model.Channel{ Type: r.Type, diff --git a/service/aiproxy/relay/adaptor/geminiopenai/adaptor.go b/service/aiproxy/relay/adaptor/geminiopenai/adaptor.go new file mode 100644 index 00000000000..749e5d50727 --- /dev/null +++ b/service/aiproxy/relay/adaptor/geminiopenai/adaptor.go @@ -0,0 +1,30 @@ +package geminiopenai + +import ( + "github.com/labring/sealos/service/aiproxy/model" + "github.com/labring/sealos/service/aiproxy/relay/adaptor/gemini" + "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" + "github.com/labring/sealos/service/aiproxy/relay/meta" +) + +type Adaptor struct { + openai.Adaptor +} + +const baseURL = "https://generativelanguage.googleapis.com/v1beta/openai" + +func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { + if meta.Channel.BaseURL == "" { + meta.Channel.BaseURL = baseURL + } + meta.Set(openai.MetaBaseURLNoV1, true) + return a.Adaptor.GetRequestURL(meta) +} + +func (a *Adaptor) GetModelList() []*model.ModelConfig { + return gemini.ModelList +} + +func (a *Adaptor) GetChannelName() string { + return "google gemini (openai)" +} diff --git a/service/aiproxy/relay/channeltype/define.go b/service/aiproxy/relay/channeltype/define.go index 7c4c221ba34..4a41be3e855 100644 --- a/service/aiproxy/relay/channeltype/define.go +++ b/service/aiproxy/relay/channeltype/define.go @@ -17,6 +17,7 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/adaptor/doubao" "github.com/labring/sealos/service/aiproxy/relay/adaptor/doubaoaudio" "github.com/labring/sealos/service/aiproxy/relay/adaptor/gemini" + "github.com/labring/sealos/service/aiproxy/relay/adaptor/geminiopenai" "github.com/labring/sealos/service/aiproxy/relay/adaptor/groq" "github.com/labring/sealos/service/aiproxy/relay/adaptor/lingyiwanwu" "github.com/labring/sealos/service/aiproxy/relay/adaptor/minimax" @@ -36,6 +37,7 @@ import ( var ChannelAdaptor = map[int]adaptor.Adaptor{ 1: &openai.Adaptor{}, 3: &azure.Adaptor{}, + 12: &geminiopenai.Adaptor{}, 13: &baiduv2.Adaptor{}, 14: &anthropic.Adaptor{}, 15: &baidu.Adaptor{}, @@ -70,24 +72,6 @@ func GetAdaptor(channel int) (adaptor.Adaptor, bool) { return a, ok } -func GetAdaptorBalancer(channel int) (adaptor.Balancer, bool) { - a, ok := GetAdaptor(channel) - if !ok { - return nil, false - } - balancer, ok := a.(adaptor.Balancer) - return balancer, ok -} - -func GetAdaptorKeyValidator(channel int) (adaptor.KeyValidator, bool) { - a, ok := GetAdaptor(channel) - if !ok { - return nil, false - } - validator, ok := a.(adaptor.KeyValidator) - return validator, ok -} - var ChannelNames = map[int]string{} func init() { From 1bfd5f48f80c29a830a9c13ccf22e976de83b3f8 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 24 Dec 2024 16:32:25 +0800 Subject: [PATCH 027/167] fix: option keys --- service/aiproxy/model/option.go | 72 ++++++++++++++++++++++----------- 1 file changed, 49 insertions(+), 23 deletions(-) diff --git a/service/aiproxy/model/option.go b/service/aiproxy/model/option.go index 6b0f81285ce..0bd05fb43ab 100644 --- a/service/aiproxy/model/option.go +++ b/service/aiproxy/model/option.go @@ -24,40 +24,66 @@ type Option struct { func GetAllOption() ([]*Option, error) { var options []*Option - err := DB.Find(&options).Error + err := DB.Where("key IN (?)", optionKeys).Find(&options).Error return options, err } -var OptionMap = make(map[string]string) +var ( + optionMap = make(map[string]string) + optionKeys []string +) func InitOption2DB() error { - OptionMap["LogDetailStorageHours"] = strconv.FormatInt(config.GetLogDetailStorageHours(), 10) - OptionMap["DisableServe"] = strconv.FormatBool(config.GetDisableServe()) - OptionMap["AutomaticDisableChannelEnabled"] = strconv.FormatBool(config.GetAutomaticDisableChannelEnabled()) - OptionMap["AutomaticEnableChannelWhenTestSucceedEnabled"] = strconv.FormatBool(config.GetAutomaticEnableChannelWhenTestSucceedEnabled()) - OptionMap["ApproximateTokenEnabled"] = strconv.FormatBool(config.GetApproximateTokenEnabled()) - OptionMap["BillingEnabled"] = strconv.FormatBool(config.GetBillingEnabled()) - OptionMap["RetryTimes"] = strconv.FormatInt(config.GetRetryTimes(), 10) - OptionMap["ModelErrorAutoBanRate"] = strconv.FormatFloat(config.GetModelErrorAutoBanRate(), 'f', -1, 64) - OptionMap["EnableModelErrorAutoBan"] = strconv.FormatBool(config.GetEnableModelErrorAutoBan()) - timeoutWithModelTypeJSON, _ := json.Marshal(config.GetTimeoutWithModelType()) - OptionMap["TimeoutWithModelType"] = conv.BytesToString(timeoutWithModelTypeJSON) - OptionMap["GlobalApiRateLimitNum"] = strconv.FormatInt(config.GetGlobalAPIRateLimitNum(), 10) - defaultChannelModelsJSON, _ := json.Marshal(config.GetDefaultChannelModels()) - OptionMap["DefaultChannelModels"] = conv.BytesToString(defaultChannelModelsJSON) - defaultChannelModelMappingJSON, _ := json.Marshal(config.GetDefaultChannelModelMapping()) - OptionMap["DefaultChannelModelMapping"] = conv.BytesToString(defaultChannelModelMappingJSON) - OptionMap["GeminiSafetySetting"] = config.GetGeminiSafetySetting() - OptionMap["GroupMaxTokenNum"] = strconv.FormatInt(int64(config.GetGroupMaxTokenNum()), 10) - err := loadOptionsFromDatabase(true) + err := initOptionMap() + if err != nil { + return err + } + + err = loadOptionsFromDatabase(true) if err != nil { return err } return storeOptionMap() } +func initOptionMap() error { + optionMap["LogDetailStorageHours"] = strconv.FormatInt(config.GetLogDetailStorageHours(), 10) + optionMap["DisableServe"] = strconv.FormatBool(config.GetDisableServe()) + optionMap["AutomaticDisableChannelEnabled"] = strconv.FormatBool(config.GetAutomaticDisableChannelEnabled()) + optionMap["AutomaticEnableChannelWhenTestSucceedEnabled"] = strconv.FormatBool(config.GetAutomaticEnableChannelWhenTestSucceedEnabled()) + optionMap["ApproximateTokenEnabled"] = strconv.FormatBool(config.GetApproximateTokenEnabled()) + optionMap["BillingEnabled"] = strconv.FormatBool(config.GetBillingEnabled()) + optionMap["RetryTimes"] = strconv.FormatInt(config.GetRetryTimes(), 10) + optionMap["ModelErrorAutoBanRate"] = strconv.FormatFloat(config.GetModelErrorAutoBanRate(), 'f', -1, 64) + optionMap["EnableModelErrorAutoBan"] = strconv.FormatBool(config.GetEnableModelErrorAutoBan()) + timeoutWithModelTypeJSON, err := json.Marshal(config.GetTimeoutWithModelType()) + if err != nil { + return err + } + optionMap["TimeoutWithModelType"] = conv.BytesToString(timeoutWithModelTypeJSON) + optionMap["GlobalApiRateLimitNum"] = strconv.FormatInt(config.GetGlobalAPIRateLimitNum(), 10) + defaultChannelModelsJSON, err := json.Marshal(config.GetDefaultChannelModels()) + if err != nil { + return err + } + optionMap["DefaultChannelModels"] = conv.BytesToString(defaultChannelModelsJSON) + defaultChannelModelMappingJSON, err := json.Marshal(config.GetDefaultChannelModelMapping()) + if err != nil { + return err + } + optionMap["DefaultChannelModelMapping"] = conv.BytesToString(defaultChannelModelMappingJSON) + optionMap["GeminiSafetySetting"] = config.GetGeminiSafetySetting() + optionMap["GroupMaxTokenNum"] = strconv.FormatInt(int64(config.GetGroupMaxTokenNum()), 10) + + optionKeys = make([]string, 0, len(optionMap)) + for key := range optionMap { + optionKeys = append(optionKeys, key) + } + return nil +} + func storeOptionMap() error { - for key, value := range OptionMap { + for key, value := range optionMap { err := saveOption(key, value) if err != nil { return err @@ -83,7 +109,7 @@ func loadOptionsFromDatabase(isInit bool) error { continue } if isInit { - delete(OptionMap, option.Key) + delete(optionMap, option.Key) } } return nil From b3d4c407595fb3fcf55f1f85d3808b121c9d848b Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 24 Dec 2024 17:06:33 +0800 Subject: [PATCH 028/167] feat: do not save access at --- service/aiproxy/controller/group.go | 50 +++++++++++++++-- service/aiproxy/controller/token.go | 87 ++++++++++++++++++++++++++--- service/aiproxy/model/channel.go | 8 +-- service/aiproxy/model/group.go | 21 +------ service/aiproxy/model/log.go | 18 ++++++ service/aiproxy/model/token.go | 76 +------------------------ 6 files changed, 146 insertions(+), 114 deletions(-) diff --git a/service/aiproxy/controller/group.go b/service/aiproxy/controller/group.go index b134688ddd1..e5b2c710873 100644 --- a/service/aiproxy/controller/group.go +++ b/service/aiproxy/controller/group.go @@ -3,15 +3,32 @@ package controller import ( "net/http" "strconv" + "time" + "github.com/gin-gonic/gin" json "github.com/json-iterator/go" - "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" - - "github.com/gin-gonic/gin" ) +type GroupResponse struct { + *model.Group + AccessedAt time.Time `json:"accessed_at,omitempty"` +} + +func (g *GroupResponse) MarshalJSON() ([]byte, error) { + type Alias model.Group + return json.Marshal(&struct { + *Alias + CreatedAt int64 `json:"created_at,omitempty"` + AccessedAt int64 `json:"accessed_at,omitempty"` + }{ + Alias: (*Alias)(g.Group), + CreatedAt: g.CreatedAt.UnixMilli(), + AccessedAt: g.AccessedAt.UnixMilli(), + }) +} + func GetGroups(c *gin.Context) { p, _ := strconv.Atoi(c.Query("p")) p-- @@ -31,8 +48,16 @@ func GetGroups(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } + groupResponses := make([]*GroupResponse, len(groups)) + for i, group := range groups { + lastRequestAt, _ := model.GetGroupLastRequestTime(group.ID) + groupResponses[i] = &GroupResponse{ + Group: group, + AccessedAt: lastRequestAt, + } + } middleware.SuccessResponse(c, gin.H{ - "groups": groups, + "groups": groupResponses, "total": total, }) } @@ -57,8 +82,16 @@ func SearchGroups(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } + groupResponses := make([]*GroupResponse, len(groups)) + for i, group := range groups { + lastRequestAt, _ := model.GetGroupLastRequestTime(group.ID) + groupResponses[i] = &GroupResponse{ + Group: group, + AccessedAt: lastRequestAt, + } + } middleware.SuccessResponse(c, gin.H{ - "groups": groups, + "groups": groupResponses, "total": total, }) } @@ -74,7 +107,12 @@ func GetGroup(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - middleware.SuccessResponse(c, _group) + lastRequestAt, _ := model.GetGroupLastRequestTime(group) + groupResponse := &GroupResponse{ + Group: _group, + AccessedAt: lastRequestAt, + } + middleware.SuccessResponse(c, groupResponse) } type UpdateGroupRPMRequest struct { diff --git a/service/aiproxy/controller/token.go b/service/aiproxy/controller/token.go index 090800004ce..84ceae6fb39 100644 --- a/service/aiproxy/controller/token.go +++ b/service/aiproxy/controller/token.go @@ -8,12 +8,33 @@ import ( "time" "github.com/gin-gonic/gin" + json "github.com/json-iterator/go" "github.com/labring/sealos/service/aiproxy/common/network" "github.com/labring/sealos/service/aiproxy/common/random" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" ) +type TokenResponse struct { + *model.Token + AccessedAt time.Time `json:"accessed_at"` +} + +func (t *TokenResponse) MarshalJSON() ([]byte, error) { + type Alias TokenResponse + return json.Marshal(&struct { + *Alias + CreatedAt int64 `json:"created_at"` + ExpiredAt int64 `json:"expired_at"` + AccessedAt int64 `json:"accessed_at"` + }{ + Alias: (*Alias)(t), + CreatedAt: t.CreatedAt.UnixMilli(), + ExpiredAt: t.ExpiredAt.UnixMilli(), + AccessedAt: t.AccessedAt.UnixMilli(), + }) +} + func GetTokens(c *gin.Context) { p, _ := strconv.Atoi(c.Query("p")) p-- @@ -34,8 +55,16 @@ func GetTokens(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } + tokenResponses := make([]*TokenResponse, len(tokens)) + for i, token := range tokens { + lastRequestAt, _ := model.GetTokenLastRequestTime(token.ID) + tokenResponses[i] = &TokenResponse{ + Token: token, + AccessedAt: lastRequestAt, + } + } middleware.SuccessResponse(c, gin.H{ - "tokens": tokens, + "tokens": tokenResponses, "total": total, }) } @@ -60,8 +89,16 @@ func GetGroupTokens(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } + tokenResponses := make([]*TokenResponse, len(tokens)) + for i, token := range tokens { + lastRequestAt, _ := model.GetGroupTokenLastRequestTime(group, token.ID) + tokenResponses[i] = &TokenResponse{ + Token: token, + AccessedAt: lastRequestAt, + } + } middleware.SuccessResponse(c, gin.H{ - "tokens": tokens, + "tokens": tokenResponses, "total": total, }) } @@ -89,8 +126,16 @@ func SearchTokens(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } + tokenResponses := make([]*TokenResponse, len(tokens)) + for i, token := range tokens { + lastRequestAt, _ := model.GetTokenLastRequestTime(token.ID) + tokenResponses[i] = &TokenResponse{ + Token: token, + AccessedAt: lastRequestAt, + } + } middleware.SuccessResponse(c, gin.H{ - "tokens": tokens, + "tokens": tokenResponses, "total": total, }) } @@ -118,8 +163,16 @@ func SearchGroupTokens(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } + tokenResponses := make([]*TokenResponse, len(tokens)) + for i, token := range tokens { + lastRequestAt, _ := model.GetGroupTokenLastRequestTime(group, token.ID) + tokenResponses[i] = &TokenResponse{ + Token: token, + AccessedAt: lastRequestAt, + } + } middleware.SuccessResponse(c, gin.H{ - "tokens": tokens, + "tokens": tokenResponses, "total": total, }) } @@ -135,7 +188,12 @@ func GetToken(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - middleware.SuccessResponse(c, token) + lastRequestAt, _ := model.GetTokenLastRequestTime(id) + tokenResponse := &TokenResponse{ + Token: token, + AccessedAt: lastRequestAt, + } + middleware.SuccessResponse(c, tokenResponse) } func GetGroupToken(c *gin.Context) { @@ -150,7 +208,12 @@ func GetGroupToken(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - middleware.SuccessResponse(c, token) + lastRequestAt, _ := model.GetGroupTokenLastRequestTime(group, id) + tokenResponse := &TokenResponse{ + Token: token, + AccessedAt: lastRequestAt, + } + middleware.SuccessResponse(c, tokenResponse) } func validateToken(token AddTokenRequest) error { @@ -212,7 +275,9 @@ func AddToken(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - middleware.SuccessResponse(c, cleanToken) + middleware.SuccessResponse(c, &TokenResponse{ + Token: cleanToken, + }) } func DeleteToken(c *gin.Context) { @@ -311,7 +376,9 @@ func UpdateToken(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - middleware.SuccessResponse(c, cleanToken) + middleware.SuccessResponse(c, &TokenResponse{ + Token: cleanToken, + }) } func UpdateGroupToken(c *gin.Context) { @@ -351,7 +418,9 @@ func UpdateGroupToken(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - middleware.SuccessResponse(c, cleanToken) + middleware.SuccessResponse(c, &TokenResponse{ + Token: cleanToken, + }) } type UpdateTokenStatusRequest struct { diff --git a/service/aiproxy/model/channel.go b/service/aiproxy/model/channel.go index bc6b98890db..6750f0d2de1 100644 --- a/service/aiproxy/model/channel.go +++ b/service/aiproxy/model/channel.go @@ -25,7 +25,6 @@ const ( type Channel struct { CreatedAt time.Time `gorm:"index" json:"created_at"` - AccessedAt time.Time `json:"accessed_at"` LastTestErrorAt time.Time `json:"last_test_error_at"` ChannelTests []*ChannelTest `gorm:"foreignKey:ChannelID;references:ID" json:"channel_tests"` BalanceUpdatedAt time.Time `json:"balance_updated_at"` @@ -112,13 +111,11 @@ func (c *Channel) MarshalJSON() ([]byte, error) { return json.Marshal(&struct { *Alias CreatedAt int64 `json:"created_at"` - AccessedAt int64 `json:"accessed_at"` BalanceUpdatedAt int64 `json:"balance_updated_at"` LastTestErrorAt int64 `json:"last_test_error_at"` }{ Alias: (*Alias)(c), CreatedAt: c.CreatedAt.UnixMilli(), - AccessedAt: c.AccessedAt.UnixMilli(), BalanceUpdatedAt: c.BalanceUpdatedAt.UnixMilli(), LastTestErrorAt: c.LastTestErrorAt.UnixMilli(), }) @@ -128,7 +125,7 @@ func (c *Channel) MarshalJSON() ([]byte, error) { func getChannelOrder(order string) string { prefix, suffix, _ := strings.Cut(order, "-") switch prefix { - case "name", "type", "created_at", "accessed_at", "status", "test_at", "balance_updated_at", "used_amount", "request_count", "priority", "id": + case "name", "type", "created_at", "status", "test_at", "balance_updated_at", "used_amount", "request_count", "priority", "id": switch suffix { case "asc": return prefix + " asc" @@ -273,7 +270,7 @@ func BatchInsertChannels(channels []*Channel) error { func UpdateChannel(channel *Channel) error { result := DB. Model(channel). - Omit("accessed_at", "used_amount", "request_count", "created_at", "balance_updated_at", "balance"). + Omit("used_amount", "request_count", "created_at", "balance_updated_at", "balance"). Clauses(clause.Returning{}). Updates(channel) return HandleUpdateResult(result, ErrChannelNotFound) @@ -351,7 +348,6 @@ func UpdateChannelUsedAmount(id int, amount float64, requestCount int) error { result := DB.Model(&Channel{}).Where("id = ?", id).Updates(map[string]interface{}{ "used_amount": gorm.Expr("used_amount + ?", amount), "request_count": gorm.Expr("request_count + ?", requestCount), - "accessed_at": time.Now(), }) return HandleUpdateResult(result, ErrChannelNotFound) } diff --git a/service/aiproxy/model/group.go b/service/aiproxy/model/group.go index 766f9b6fd7a..2650a7bf09c 100644 --- a/service/aiproxy/model/group.go +++ b/service/aiproxy/model/group.go @@ -6,8 +6,6 @@ import ( "strings" "time" - json "github.com/json-iterator/go" - "github.com/labring/sealos/service/aiproxy/common" log "github.com/sirupsen/logrus" "gorm.io/gorm" @@ -25,7 +23,6 @@ const ( type Group struct { CreatedAt time.Time `json:"created_at"` - AccessedAt time.Time `json:"accessed_at"` ID string `gorm:"primaryKey" json:"id"` Tokens []*Token `gorm:"foreignKey:GroupID" json:"-"` Status int `gorm:"default:1;index" json:"status"` @@ -38,24 +35,11 @@ func (g *Group) BeforeDelete(tx *gorm.DB) (err error) { return tx.Model(&Token{}).Where("group_id = ?", g.ID).Delete(&Token{}).Error } -func (g *Group) MarshalJSON() ([]byte, error) { - type Alias Group - return json.Marshal(&struct { - *Alias - CreatedAt int64 `json:"created_at"` - AccessedAt int64 `json:"accessed_at"` - }{ - Alias: (*Alias)(g), - CreatedAt: g.CreatedAt.UnixMilli(), - AccessedAt: g.AccessedAt.UnixMilli(), - }) -} - //nolint:goconst func getGroupOrder(order string) string { prefix, suffix, _ := strings.Cut(order, "-") switch prefix { - case "id", "request_count", "accessed_at", "status", "created_at", "used_amount": + case "id", "request_count", "status", "created_at", "used_amount": switch suffix { case "asc": return prefix + " asc" @@ -147,7 +131,6 @@ func UpdateGroupUsedAmountAndRequestCount(id string, amount float64, count int) result := DB.Model(&Group{}).Where("id = ?", id).Updates(map[string]interface{}{ "used_amount": gorm.Expr("used_amount + ?", amount), "request_count": gorm.Expr("request_count + ?", count), - "accessed_at": time.Now(), }) return HandleUpdateResult(result, ErrGroupNotFound) } @@ -155,7 +138,6 @@ func UpdateGroupUsedAmountAndRequestCount(id string, amount float64, count int) func UpdateGroupUsedAmount(id string, amount float64) error { result := DB.Model(&Group{}).Where("id = ?", id).Updates(map[string]interface{}{ "used_amount": gorm.Expr("used_amount + ?", amount), - "accessed_at": time.Now(), }) return HandleUpdateResult(result, ErrGroupNotFound) } @@ -163,7 +145,6 @@ func UpdateGroupUsedAmount(id string, amount float64) error { func UpdateGroupRequestCount(id string, count int) error { result := DB.Model(&Group{}).Where("id = ?", id).Updates(map[string]interface{}{ "request_count": gorm.Expr("request_count + ?", count), - "accessed_at": time.Now(), }) return HandleUpdateResult(result, ErrGroupNotFound) } diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index c5b14fa339b..23e779c1928 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -789,3 +789,21 @@ func GetDashboardData(group string, start, end time.Time, tokenName string, mode ExceptionCount: exceptionCount, }, nil } + +func GetGroupLastRequestTime(group string) (time.Time, error) { + var log Log + err := LogDB.Model(&Log{}).Where("group_id = ?", group).Order("request_at desc").First(&log).Error + return log.RequestAt, err +} + +func GetTokenLastRequestTime(id int) (time.Time, error) { + var log Log + err := LogDB.Model(&Log{}).Where("token_id = ?", id).Order("request_at desc").First(&log).Error + return log.RequestAt, err +} + +func GetGroupTokenLastRequestTime(group string, id int) (time.Time, error) { + var log Log + err := LogDB.Model(&Log{}).Where("group_id = ? and token_id = ?", group, id).Order("request_at desc").First(&log).Error + return log.RequestAt, err +} diff --git a/service/aiproxy/model/token.go b/service/aiproxy/model/token.go index aa8a915b8bf..cccdd8e1064 100644 --- a/service/aiproxy/model/token.go +++ b/service/aiproxy/model/token.go @@ -6,8 +6,6 @@ import ( "strings" "time" - json "github.com/json-iterator/go" - "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" log "github.com/sirupsen/logrus" @@ -29,7 +27,6 @@ const ( type Token struct { CreatedAt time.Time `json:"created_at"` ExpiredAt time.Time `json:"expired_at"` - AccessedAt time.Time `gorm:"index" json:"accessed_at"` Group *Group `gorm:"foreignKey:GroupID" json:"-"` Key string `gorm:"type:char(48);uniqueIndex" json:"key"` Name EmptyNullString `gorm:"index;uniqueIndex:idx_group_name;not null" json:"name"` @@ -43,26 +40,11 @@ type Token struct { RequestCount int `gorm:"index" json:"request_count"` } -func (t *Token) MarshalJSON() ([]byte, error) { - type Alias Token - return json.Marshal(&struct { - *Alias - CreatedAt int64 `json:"created_at"` - AccessedAt int64 `json:"accessed_at"` - ExpiredAt int64 `json:"expired_at"` - }{ - Alias: (*Alias)(t), - CreatedAt: t.CreatedAt.UnixMilli(), - AccessedAt: t.AccessedAt.UnixMilli(), - ExpiredAt: t.ExpiredAt.UnixMilli(), - }) -} - //nolint:goconst func getTokenOrder(order string) string { prefix, suffix, _ := strings.Cut(order, "-") switch prefix { - case "name", "accessed_at", "expired_at", "group", "used_amount", "request_count", "id", "created_at": + case "name", "expired_at", "group", "used_amount", "request_count", "id", "created_at": switch suffix { case "asc": return prefix + " asc" @@ -307,7 +289,7 @@ func ValidateAndGetToken(key string) (token *TokenCache, err error) { return nil, fmt.Errorf("token (%s[%d]) is not available", token.Name, token.ID) } if !time.Time(token.ExpiredAt).IsZero() && time.Time(token.ExpiredAt).Before(time.Now()) { - err := UpdateTokenStatusAndAccessedAt(token.ID, TokenStatusExpired) + err := UpdateTokenStatus(token.ID, TokenStatusExpired) if err != nil { log.Error("failed to update token status" + err.Error()) } @@ -315,7 +297,7 @@ func ValidateAndGetToken(key string) (token *TokenCache, err error) { } if token.Quota > 0 && token.UsedAmount >= token.Quota { // in this case, we can make sure the token is exhausted - err := UpdateTokenStatusAndAccessedAt(token.ID, TokenStatusExhausted) + err := UpdateTokenStatus(token.ID, TokenStatusExhausted) if err != nil { log.Error("failed to update token status" + err.Error()) } @@ -369,57 +351,6 @@ func UpdateTokenStatus(id int, status int) (err error) { return HandleUpdateResult(result, ErrTokenNotFound) } -func UpdateTokenStatusAndAccessedAt(id int, status int) (err error) { - token := Token{ID: id} - defer func() { - if err == nil { - if err := CacheDeleteToken(token.Key); err != nil { - log.Error("delete token from cache failed: " + err.Error()) - } - } - }() - result := DB. - Model(&token). - Clauses(clause.Returning{ - Columns: []clause.Column{ - {Name: "key"}, - }, - }). - Where("id = ?", id).Updates( - map[string]interface{}{ - "status": status, - "accessed_at": time.Now(), - }, - ) - return HandleUpdateResult(result, ErrTokenNotFound) -} - -func UpdateGroupTokenStatusAndAccessedAt(group string, id int, status int) (err error) { - token := Token{} - defer func() { - if err == nil { - if err := CacheDeleteToken(token.Key); err != nil { - log.Error("delete token from cache failed: " + err.Error()) - } - } - }() - result := DB. - Model(&token). - Clauses(clause.Returning{ - Columns: []clause.Column{ - {Name: "key"}, - }, - }). - Where("id = ? and group_id = ?", id, group). - Updates( - map[string]interface{}{ - "status": status, - "accessed_at": time.Now(), - }, - ) - return HandleUpdateResult(result, ErrTokenNotFound) -} - func UpdateGroupTokenStatus(group string, id int, status int) (err error) { token := Token{} defer func() { @@ -586,7 +517,6 @@ func UpdateTokenUsedAmount(id int, amount float64, requestCount int) (err error) map[string]interface{}{ "used_amount": gorm.Expr("used_amount + ?", amount), "request_count": gorm.Expr("request_count + ?", requestCount), - "accessed_at": time.Now(), }, ) return HandleUpdateResult(result, ErrTokenNotFound) From 3323c7d4373420bfbc12be14b658b5c481b4d6ec Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 24 Dec 2024 17:18:00 +0800 Subject: [PATCH 029/167] fix: del no use options --- service/aiproxy/common/config/config.go | 30 ------------------- service/aiproxy/model/option.go | 9 ------ service/aiproxy/relay/adaptor/openai/token.go | 4 --- 3 files changed, 43 deletions(-) diff --git a/service/aiproxy/common/config/config.go b/service/aiproxy/common/config/config.go index cb32359c4e7..89b8fb28fd0 100644 --- a/service/aiproxy/common/config/config.go +++ b/service/aiproxy/common/config/config.go @@ -17,12 +17,6 @@ var ( ) var ( - // 当测试或请求的时候发生错误是否自动禁用渠道 - automaticDisableChannelEnabled atomic.Bool - // 当测试成功是否自动启用渠道 - automaticEnableChannelWhenTestSucceedEnabled atomic.Bool - // 是否近似计算token - approximateTokenEnabled atomic.Bool // 暂停服务 disableServe atomic.Bool // log detail 存储时间(小时) @@ -92,30 +86,6 @@ func SetDisableServe(disabled bool) { disableServe.Store(disabled) } -func GetAutomaticDisableChannelEnabled() bool { - return automaticDisableChannelEnabled.Load() -} - -func SetAutomaticDisableChannelEnabled(enabled bool) { - automaticDisableChannelEnabled.Store(enabled) -} - -func GetAutomaticEnableChannelWhenTestSucceedEnabled() bool { - return automaticEnableChannelWhenTestSucceedEnabled.Load() -} - -func SetAutomaticEnableChannelWhenTestSucceedEnabled(enabled bool) { - automaticEnableChannelWhenTestSucceedEnabled.Store(enabled) -} - -func GetApproximateTokenEnabled() bool { - return approximateTokenEnabled.Load() -} - -func SetApproximateTokenEnabled(enabled bool) { - approximateTokenEnabled.Store(enabled) -} - var DisableAutoMigrateDB = os.Getenv("DISABLE_AUTO_MIGRATE_DB") == "true" var RateLimitKeyExpirationDuration = 20 * time.Minute diff --git a/service/aiproxy/model/option.go b/service/aiproxy/model/option.go index 0bd05fb43ab..adca1bbf510 100644 --- a/service/aiproxy/model/option.go +++ b/service/aiproxy/model/option.go @@ -49,9 +49,6 @@ func InitOption2DB() error { func initOptionMap() error { optionMap["LogDetailStorageHours"] = strconv.FormatInt(config.GetLogDetailStorageHours(), 10) optionMap["DisableServe"] = strconv.FormatBool(config.GetDisableServe()) - optionMap["AutomaticDisableChannelEnabled"] = strconv.FormatBool(config.GetAutomaticDisableChannelEnabled()) - optionMap["AutomaticEnableChannelWhenTestSucceedEnabled"] = strconv.FormatBool(config.GetAutomaticEnableChannelWhenTestSucceedEnabled()) - optionMap["ApproximateTokenEnabled"] = strconv.FormatBool(config.GetApproximateTokenEnabled()) optionMap["BillingEnabled"] = strconv.FormatBool(config.GetBillingEnabled()) optionMap["RetryTimes"] = strconv.FormatInt(config.GetRetryTimes(), 10) optionMap["ModelErrorAutoBanRate"] = strconv.FormatFloat(config.GetModelErrorAutoBanRate(), 'f', -1, 64) @@ -181,12 +178,6 @@ func updateOption(key string, value string, isInit bool) (err error) { config.SetLogDetailStorageHours(logDetailStorageHours) case "DisableServe": config.SetDisableServe(isTrue(value)) - case "AutomaticDisableChannelEnabled": - config.SetAutomaticDisableChannelEnabled(isTrue(value)) - case "AutomaticEnableChannelWhenTestSucceedEnabled": - config.SetAutomaticEnableChannelWhenTestSucceedEnabled(isTrue(value)) - case "ApproximateTokenEnabled": - config.SetApproximateTokenEnabled(isTrue(value)) case "BillingEnabled": config.SetBillingEnabled(isTrue(value)) case "GroupMaxTokenNum": diff --git a/service/aiproxy/relay/adaptor/openai/token.go b/service/aiproxy/relay/adaptor/openai/token.go index 99b8a98e194..1a985e3fe5c 100644 --- a/service/aiproxy/relay/adaptor/openai/token.go +++ b/service/aiproxy/relay/adaptor/openai/token.go @@ -8,7 +8,6 @@ import ( "sync" "unicode/utf8" - "github.com/labring/sealos/service/aiproxy/common/config" "github.com/labring/sealos/service/aiproxy/common/image" "github.com/labring/sealos/service/aiproxy/relay/model" "github.com/pkoukk/tiktoken-go" @@ -53,9 +52,6 @@ func getTokenEncoder(model string) *tiktoken.Tiktoken { } func getTokenNum(tokenEncoder *tiktoken.Tiktoken, text string) int { - if config.GetApproximateTokenEnabled() { - return int(float64(len(text)) * 0.38) - } return len(tokenEncoder.Encode(text, nil, nil)) } From 51bb99143449d62e7d85c8995ed35805eb767bf2 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 24 Dec 2024 17:23:16 +0800 Subject: [PATCH 030/167] fix: del no use options --- service/aiproxy/common/config/config.go | 10 ---------- service/aiproxy/middleware/rate-limit.go | 17 ----------------- service/aiproxy/model/option.go | 7 ------- service/aiproxy/router/relay.go | 1 - 4 files changed, 35 deletions(-) diff --git a/service/aiproxy/common/config/config.go b/service/aiproxy/common/config/config.go index 89b8fb28fd0..d2b9f0b2c43 100644 --- a/service/aiproxy/common/config/config.go +++ b/service/aiproxy/common/config/config.go @@ -95,7 +95,6 @@ var OnlyOneLogFile = env.Bool("ONLY_ONE_LOG_FILE", false) var AdminKey = env.String("ADMIN_KEY", "") var ( - globalAPIRateLimitNum atomic.Int64 defaultChannelModels atomic.Value defaultChannelModelMapping atomic.Value groupMaxTokenNum atomic.Int32 @@ -106,15 +105,6 @@ func init() { defaultChannelModelMapping.Store(make(map[int]map[string]string)) } -// 全局qpm,不是根据ip限制,而是所有请求共享一个qpm -func GetGlobalAPIRateLimitNum() int64 { - return globalAPIRateLimitNum.Load() -} - -func SetGlobalAPIRateLimitNum(num int64) { - globalAPIRateLimitNum.Store(num) -} - func GetDefaultChannelModels() map[int][]string { return defaultChannelModels.Load().(map[int][]string) } diff --git a/service/aiproxy/middleware/rate-limit.go b/service/aiproxy/middleware/rate-limit.go index f947ae04bba..103138e41c0 100644 --- a/service/aiproxy/middleware/rate-limit.go +++ b/service/aiproxy/middleware/rate-limit.go @@ -2,10 +2,8 @@ package middleware import ( "context" - "net/http" "time" - "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" log "github.com/sirupsen/logrus" @@ -84,18 +82,3 @@ func MemoryRateLimit(_ context.Context, key string, maxRequestNum int64, duratio inMemoryRateLimiter.Init(config.RateLimitKeyExpirationDuration) return inMemoryRateLimiter.Request(key, int(maxRequestNum), duration) } - -func GlobalAPIRateLimit(c *gin.Context) { - globalAPIRateLimitNum := config.GetGlobalAPIRateLimitNum() - if globalAPIRateLimitNum <= 0 { - c.Next() - return - } - ok := ForceRateLimit(c.Request.Context(), "global_qpm", globalAPIRateLimitNum, time.Minute) - if !ok { - c.Status(http.StatusTooManyRequests) - c.Abort() - return - } - c.Next() -} diff --git a/service/aiproxy/model/option.go b/service/aiproxy/model/option.go index adca1bbf510..81e5f1244c5 100644 --- a/service/aiproxy/model/option.go +++ b/service/aiproxy/model/option.go @@ -58,7 +58,6 @@ func initOptionMap() error { return err } optionMap["TimeoutWithModelType"] = conv.BytesToString(timeoutWithModelTypeJSON) - optionMap["GlobalApiRateLimitNum"] = strconv.FormatInt(config.GetGlobalAPIRateLimitNum(), 10) defaultChannelModelsJSON, err := json.Marshal(config.GetDefaultChannelModels()) if err != nil { return err @@ -188,12 +187,6 @@ func updateOption(key string, value string, isInit bool) (err error) { config.SetGroupMaxTokenNum(int32(groupMaxTokenNum)) case "GeminiSafetySetting": config.SetGeminiSafetySetting(value) - case "GlobalApiRateLimitNum": - globalAPIRateLimitNum, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return err - } - config.SetGlobalAPIRateLimitNum(globalAPIRateLimitNum) case "DefaultChannelModels": var newModels map[int][]string err := json.Unmarshal(conv.StringToBytes(value), &newModels) diff --git a/service/aiproxy/router/relay.go b/service/aiproxy/router/relay.go index a15ccaa91dc..bbeef8dd275 100644 --- a/service/aiproxy/router/relay.go +++ b/service/aiproxy/router/relay.go @@ -9,7 +9,6 @@ import ( func SetRelayRouter(router *gin.Engine) { router.Use(middleware.CORS()) - router.Use(middleware.GlobalAPIRateLimit) // https://platform.openai.com/docs/api-reference/introduction modelsRouter := router.Group("/v1/models") modelsRouter.Use(middleware.TokenAuth) From 5f94ee8fb02f51b8fa330fc7e2dc4cbb302e463b Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 24 Dec 2024 17:37:55 +0800 Subject: [PATCH 031/167] fix: auto test banned models need return when get from redis error happend --- service/aiproxy/controller/channel-test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/service/aiproxy/controller/channel-test.go b/service/aiproxy/controller/channel-test.go index efea3dca13d..4f7ba500db3 100644 --- a/service/aiproxy/controller/channel-test.go +++ b/service/aiproxy/controller/channel-test.go @@ -365,6 +365,7 @@ func AutoTestBannedModels() { channels, err := monitor.GetAllBannedChannels(context.Background()) if err != nil { log.Errorf("failed to get banned channels: %s", err.Error()) + return } if len(channels) == 0 { return From f946e6527e343955d0404d67241e27988334c87b Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 24 Dec 2024 18:00:13 +0800 Subject: [PATCH 032/167] fix: remove channel db hook --- service/aiproxy/model/cache.go | 2 +- service/aiproxy/model/channel.go | 45 ++++++++++++++++---------------- service/aiproxy/model/option.go | 2 +- 3 files changed, 25 insertions(+), 24 deletions(-) diff --git a/service/aiproxy/model/cache.go b/service/aiproxy/model/cache.go index 0a7b2e3bc90..f4a8618e51d 100644 --- a/service/aiproxy/model/cache.go +++ b/service/aiproxy/model/cache.go @@ -437,7 +437,7 @@ func initializeChannelModels(channel *Channel) { return } - findedModels, missingModels, err := CheckModelConfig(channel.Models) + findedModels, missingModels, err := GetModelConfigWithModels(channel.Models) if err != nil { return } diff --git a/service/aiproxy/model/channel.go b/service/aiproxy/model/channel.go index 6750f0d2de1..7efe419454e 100644 --- a/service/aiproxy/model/channel.go +++ b/service/aiproxy/model/channel.go @@ -2,6 +2,7 @@ package model import ( "fmt" + "slices" "strings" "time" @@ -46,32 +47,12 @@ func (c *Channel) BeforeDelete(tx *gorm.DB) (err error) { return tx.Model(&ChannelTest{}).Where("channel_id = ?", c.ID).Delete(&ChannelTest{}).Error } -// check model config exist -func (c *Channel) BeforeSave(tx *gorm.DB) (err error) { - if len(c.Models) == 0 { - return nil - } - _, missingModels, err := checkModelConfig(tx, c.Models) - if err != nil { - return err - } - if len(missingModels) > 0 { - return fmt.Errorf("model config not found: %v", missingModels) - } - return nil -} - -// check model config exist and rpm greater than 0 -func CheckModelConfig(models []string) ([]string, []string, error) { - return checkModelConfig(DB, models) -} - -func checkModelConfig(tx *gorm.DB, models []string) ([]string, []string, error) { +func GetModelConfigWithModels(models []string) ([]string, []string, error) { if len(models) == 0 { return models, nil, nil } - where := tx.Model(&ModelConfig{}).Where("model IN ?", models) + where := DB.Model(&ModelConfig{}).Where("model IN ?", models) var count int64 if err := where.Count(&count).Error; err != nil { return nil, nil, err @@ -106,6 +87,18 @@ func checkModelConfig(tx *gorm.DB, models []string) ([]string, []string, error) return foundModels, nil, nil } +func CheckModelConfigExist(models []string) error { + _, missingModels, err := GetModelConfigWithModels(models) + if err != nil { + return err + } + if len(missingModels) > 0 { + slices.Sort(missingModels) + return fmt.Errorf("model config not found: %v", missingModels) + } + return nil +} + func (c *Channel) MarshalJSON() ([]byte, error) { type Alias Channel return json.Marshal(&struct { @@ -262,12 +255,20 @@ func GetChannelByID(id int) (*Channel, error) { } func BatchInsertChannels(channels []*Channel) error { + for _, channel := range channels { + if err := CheckModelConfigExist(channel.Models); err != nil { + return err + } + } return DB.Transaction(func(tx *gorm.DB) error { return tx.Create(&channels).Error }) } func UpdateChannel(channel *Channel) error { + if err := CheckModelConfigExist(channel.Models); err != nil { + return err + } result := DB. Model(channel). Omit("used_amount", "request_count", "created_at", "balance_updated_at", "balance"). diff --git a/service/aiproxy/model/option.go b/service/aiproxy/model/option.go index 81e5f1244c5..d6d0989070a 100644 --- a/service/aiproxy/model/option.go +++ b/service/aiproxy/model/option.go @@ -204,7 +204,7 @@ func updateOption(key string, value string, isInit bool) (err error) { for model := range allModelsMap { allModels = append(allModels, model) } - foundModels, missingModels, err := CheckModelConfig(allModels) + foundModels, missingModels, err := GetModelConfigWithModels(allModels) if err != nil { return err } From 4005bdf69d741d2319f5ff50269533aef12f1323 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 24 Dec 2024 20:00:24 +0800 Subject: [PATCH 033/167] chore: clean detail only after insert it --- service/aiproxy/model/log.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index 23e779c1928..3d2f2db597b 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -80,6 +80,19 @@ func GetLogDetail(logID int) (*RequestDetail, error) { return &detail, nil } +func cleanRequestDetail() error { + detailStorageHours := config.GetLogDetailStorageHours() + if detailStorageHours <= 0 { + return nil + } + return LogDB. + Where( + "created_at < ?", + time.Now().Add(-time.Duration(detailStorageHours)*time.Hour), + ). + Delete(&RequestDetail{}).Error +} + func RecordConsumeLog( requestID string, requestAt time.Time, @@ -100,13 +113,10 @@ func RecordConsumeLog( requestDetail *RequestDetail, ) error { defer func() { - detailStorageHours := config.GetLogDetailStorageHours() - if detailStorageHours <= 0 { + if requestDetail == nil { return } - err := LogDB. - Where("created_at < ?", time.Now().Add(-time.Duration(detailStorageHours)*time.Hour)). - Delete(&RequestDetail{}).Error + err := cleanRequestDetail() if err != nil { log.Errorf("delete request detail failed: %s", err) } From 49e7bb80cfbd7dccdc90ea80af87442f40ec83a7 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 24 Dec 2024 20:33:43 +0800 Subject: [PATCH 034/167] fix: err print on debug --- service/aiproxy/model/group.go | 14 -------------- service/aiproxy/model/token.go | 3 +-- service/aiproxy/relay/controller/image.go | 3 ++- service/aiproxy/relay/controller/rerank.go | 3 ++- service/aiproxy/relay/controller/stt.go | 3 ++- service/aiproxy/relay/controller/text.go | 3 ++- service/aiproxy/relay/controller/tts.go | 3 ++- 7 files changed, 11 insertions(+), 21 deletions(-) diff --git a/service/aiproxy/model/group.go b/service/aiproxy/model/group.go index 2650a7bf09c..a2c8df1206a 100644 --- a/service/aiproxy/model/group.go +++ b/service/aiproxy/model/group.go @@ -135,20 +135,6 @@ func UpdateGroupUsedAmountAndRequestCount(id string, amount float64, count int) return HandleUpdateResult(result, ErrGroupNotFound) } -func UpdateGroupUsedAmount(id string, amount float64) error { - result := DB.Model(&Group{}).Where("id = ?", id).Updates(map[string]interface{}{ - "used_amount": gorm.Expr("used_amount + ?", amount), - }) - return HandleUpdateResult(result, ErrGroupNotFound) -} - -func UpdateGroupRequestCount(id string, count int) error { - result := DB.Model(&Group{}).Where("id = ?", id).Updates(map[string]interface{}{ - "request_count": gorm.Expr("request_count + ?", count), - }) - return HandleUpdateResult(result, ErrGroupNotFound) -} - func UpdateGroupRPM(id string, rpmRatio float64) (err error) { defer func() { if err == nil { diff --git a/service/aiproxy/model/token.go b/service/aiproxy/model/token.go index cccdd8e1064..d77fe36428a 100644 --- a/service/aiproxy/model/token.go +++ b/service/aiproxy/model/token.go @@ -497,7 +497,7 @@ func UpdateToken(token *Token) (err error) { func UpdateTokenUsedAmount(id int, amount float64, requestCount int) (err error) { token := &Token{ID: id} defer func() { - if amount > 0 && err == nil && token.Quota > 0 { + if amount > 0 && err == nil { if err := CacheUpdateTokenUsedAmountOnlyIncrease(token.Key, token.UsedAmount); err != nil { log.Error("update token used amount in cache failed: " + err.Error()) } @@ -508,7 +508,6 @@ func UpdateTokenUsedAmount(id int, amount float64, requestCount int) (err error) Clauses(clause.Returning{ Columns: []clause.Column{ {Name: "key"}, - {Name: "quota"}, {Name: "used_amount"}, }, }). diff --git a/service/aiproxy/relay/controller/image.go b/service/aiproxy/relay/controller/image.go index d9b514464a8..a983c569556 100644 --- a/service/aiproxy/relay/controller/image.go +++ b/service/aiproxy/relay/controller/image.go @@ -7,6 +7,7 @@ import ( "net/http" "github.com/gin-gonic/gin" + "github.com/labring/sealos/service/aiproxy/common/config" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/channeltype" @@ -97,7 +98,7 @@ func RelayImageHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStat // do response usage, detail, respErr := DoHelper(adaptor, c, meta) if respErr != nil { - if detail != nil { + if detail != nil && config.DebugEnabled { log.Errorf("do image failed: %s\nrequest detail:\n%s\nresponse detail:\n%s", respErr, detail.RequestBody, detail.ResponseBody) } else { log.Errorf("do image failed: %s", respErr) diff --git a/service/aiproxy/relay/controller/rerank.go b/service/aiproxy/relay/controller/rerank.go index 70001623ff1..fbffcc772a2 100644 --- a/service/aiproxy/relay/controller/rerank.go +++ b/service/aiproxy/relay/controller/rerank.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/gin-gonic/gin" + "github.com/labring/sealos/service/aiproxy/common/config" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/channeltype" @@ -57,7 +58,7 @@ func RerankHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatusCo usage, detail, respErr := DoHelper(adaptor, c, meta) if respErr != nil { - if detail != nil { + if detail != nil && config.DebugEnabled { log.Errorf("do rerank failed: %s\nrequest detail:\n%s\nresponse detail:\n%s", respErr, detail.RequestBody, detail.ResponseBody) } else { log.Errorf("do rerank failed: %s", respErr) diff --git a/service/aiproxy/relay/controller/stt.go b/service/aiproxy/relay/controller/stt.go index 030de04add4..f351f8d6278 100644 --- a/service/aiproxy/relay/controller/stt.go +++ b/service/aiproxy/relay/controller/stt.go @@ -7,6 +7,7 @@ import ( "net/http" "github.com/gin-gonic/gin" + "github.com/labring/sealos/service/aiproxy/common/config" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/channeltype" @@ -47,7 +48,7 @@ func RelaySTTHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatus usage, detail, respErr := DoHelper(adaptor, c, meta) if respErr != nil { - if detail != nil { + if detail != nil && config.DebugEnabled { log.Errorf("do stt failed: %s\nrequest detail:\n%s\nresponse detail:\n%s", respErr, detail.RequestBody, detail.ResponseBody) } else { log.Errorf("do stt failed: %s", respErr) diff --git a/service/aiproxy/relay/controller/text.go b/service/aiproxy/relay/controller/text.go index 50ffafb485d..26426fc113b 100644 --- a/service/aiproxy/relay/controller/text.go +++ b/service/aiproxy/relay/controller/text.go @@ -7,6 +7,7 @@ import ( "net/http" "github.com/gin-gonic/gin" + "github.com/labring/sealos/service/aiproxy/common/config" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/channeltype" @@ -59,7 +60,7 @@ func RelayTextHelper(meta *meta.Meta, c *gin.Context) *model.ErrorWithStatusCode // do response usage, detail, respErr := DoHelper(adaptor, c, meta) if respErr != nil { - if detail != nil { + if detail != nil && config.DebugEnabled { log.Errorf("do text failed: %s\nrequest detail:\n%s\nresponse detail:\n%s", respErr, detail.RequestBody, detail.ResponseBody) } else { log.Errorf("do text failed: %s", respErr) diff --git a/service/aiproxy/relay/controller/tts.go b/service/aiproxy/relay/controller/tts.go index df669b3b768..41571b95fce 100644 --- a/service/aiproxy/relay/controller/tts.go +++ b/service/aiproxy/relay/controller/tts.go @@ -7,6 +7,7 @@ import ( "net/http" "github.com/gin-gonic/gin" + "github.com/labring/sealos/service/aiproxy/common/config" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/channeltype" @@ -54,7 +55,7 @@ func RelayTTSHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatus usage, detail, respErr := DoHelper(adaptor, c, meta) if respErr != nil { - if detail != nil { + if detail != nil && config.DebugEnabled { log.Errorf("do tts failed: %s\nrequest detail:\n%s\nresponse detail:\n%s", respErr, detail.RequestBody, detail.ResponseBody) } else { log.Errorf("do tts failed: %s", respErr) From efc71d56dda55ea27f15f511f08394eca9bfa009 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 24 Dec 2024 21:18:12 +0800 Subject: [PATCH 035/167] fix: cache update --- service/aiproxy/model/cache.go | 75 +++++++++++++++++++++------------- service/aiproxy/model/group.go | 26 +++++++++--- service/aiproxy/model/token.go | 21 ++++++---- 3 files changed, 80 insertions(+), 42 deletions(-) diff --git a/service/aiproxy/model/cache.go b/service/aiproxy/model/cache.go index f4a8618e51d..e16c1384f08 100644 --- a/service/aiproxy/model/cache.go +++ b/service/aiproxy/model/cache.go @@ -132,13 +132,6 @@ func CacheGetTokenByKey(key string) (*TokenCache, error) { return token.ToTokenCache(), nil } -var updateTokenUsedAmountScript = redis.NewScript(` - if redis.call("HExists", KEYS[1], "ua") then - redis.call("HSet", KEYS[1], "ua", ARGV[1]) - end - return redis.status_reply("ok") -`) - var updateTokenUsedAmountOnlyIncreaseScript = redis.NewScript(` local used_amount = redis.call("HGet", KEYS[1], "ua") if used_amount == false then @@ -151,47 +144,54 @@ var updateTokenUsedAmountOnlyIncreaseScript = redis.NewScript(` return redis.status_reply("ok") `) -var increaseTokenUsedAmountScript = redis.NewScript(` - local used_amount = redis.call("HGet", KEYS[1], "ua") - if used_amount == false then - return redis.status_reply("ok") - end - redis.call("HSet", KEYS[1], "ua", used_amount + ARGV[1]) - return redis.status_reply("ok") -`) - -func CacheUpdateTokenUsedAmount(key string, amount float64) error { +func CacheUpdateTokenUsedAmountOnlyIncrease(key string, amount float64) error { if !common.RedisEnabled { return nil } - return updateTokenUsedAmountScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(TokenCacheKey, key)}, amount).Err() + return updateTokenUsedAmountOnlyIncreaseScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(TokenCacheKey, key)}, amount).Err() } -func CacheUpdateTokenUsedAmountOnlyIncrease(key string, amount float64) error { +var updateTokenNameScript = redis.NewScript(` + if redis.call("HExists", KEYS[1], "n") then + redis.call("HSet", KEYS[1], "n", ARGV[1]) + end + return redis.status_reply("ok") +`) + +func CacheUpdateTokenName(key string, name string) error { if !common.RedisEnabled { return nil } - return updateTokenUsedAmountOnlyIncreaseScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(TokenCacheKey, key)}, amount).Err() + return updateTokenNameScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(TokenCacheKey, key)}, name).Err() } -func CacheIncreaseTokenUsedAmount(key string, amount float64) error { +var updateTokenStatusScript = redis.NewScript(` + if redis.call("HExists", KEYS[1], "st") then + redis.call("HSet", KEYS[1], "st", ARGV[1]) + end + return redis.status_reply("ok") +`) + +func CacheUpdateTokenStatus(key string, status int) error { if !common.RedisEnabled { return nil } - return increaseTokenUsedAmountScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(TokenCacheKey, key)}, amount).Err() + return updateTokenStatusScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(TokenCacheKey, key)}, status).Err() } type GroupCache struct { - ID string `json:"-" redis:"-"` - Status int `json:"status" redis:"st"` - RPMRatio float64 `json:"rpm_ratio" redis:"rpm"` + ID string `json:"-" redis:"-"` + Status int `json:"status" redis:"st"` + UsedAmount float64 `json:"used_amount" redis:"ua"` + RPMRatio float64 `json:"rpm_ratio" redis:"rpm"` } func (g *Group) ToGroupCache() *GroupCache { return &GroupCache{ - ID: g.ID, - Status: g.Status, - RPMRatio: g.RPMRatio, + ID: g.ID, + Status: g.Status, + UsedAmount: g.UsedAmount, + RPMRatio: g.RPMRatio, } } @@ -275,6 +275,25 @@ func CacheGetGroup(id string) (*GroupCache, error) { return group.ToGroupCache(), nil } +var updateGroupUsedAmountOnlyIncreaseScript = redis.NewScript(` + local used_amount = redis.call("HGet", KEYS[1], "ua") + if used_amount == false then + return redis.status_reply("ok") + end + if ARGV[1] < used_amount then + return redis.status_reply("ok") + end + redis.call("HSet", KEYS[1], "ua", ARGV[1]) + return redis.status_reply("ok") +`) + +func CacheUpdateGroupUsedAmountOnlyIncrease(id string, amount float64) error { + if !common.RedisEnabled { + return nil + } + return updateGroupUsedAmountOnlyIncreaseScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, amount).Err() +} + var ( enabledChannels []*Channel allChannels []*Channel diff --git a/service/aiproxy/model/group.go b/service/aiproxy/model/group.go index a2c8df1206a..febc7529cb9 100644 --- a/service/aiproxy/model/group.go +++ b/service/aiproxy/model/group.go @@ -127,11 +127,27 @@ func DeleteGroupsByIDs(ids []string) (err error) { }) } -func UpdateGroupUsedAmountAndRequestCount(id string, amount float64, count int) error { - result := DB.Model(&Group{}).Where("id = ?", id).Updates(map[string]interface{}{ - "used_amount": gorm.Expr("used_amount + ?", amount), - "request_count": gorm.Expr("request_count + ?", count), - }) +func UpdateGroupUsedAmountAndRequestCount(id string, amount float64, count int) (err error) { + group := &Group{ID: id} + defer func() { + if amount > 0 && err == nil { + if err := CacheUpdateGroupUsedAmountOnlyIncrease(group.ID, group.UsedAmount); err != nil { + log.Error("update group used amount in cache failed: " + err.Error()) + } + } + }() + result := DB. + Model(group). + Clauses(clause.Returning{ + Columns: []clause.Column{ + {Name: "used_amount"}, + }, + }). + Where("id = ?", id). + Updates(map[string]interface{}{ + "used_amount": gorm.Expr("used_amount + ?", amount), + "request_count": gorm.Expr("request_count + ?", count), + }) return HandleUpdateResult(result, ErrGroupNotFound) } diff --git a/service/aiproxy/model/token.go b/service/aiproxy/model/token.go index d77fe36428a..4e49b3d02af 100644 --- a/service/aiproxy/model/token.go +++ b/service/aiproxy/model/token.go @@ -284,6 +284,8 @@ func ValidateAndGetToken(key string) (token *TokenCache, err error) { return nil, fmt.Errorf("token (%s[%d]) quota is exhausted", token.Name, token.ID) case TokenStatusExpired: return nil, fmt.Errorf("token (%s[%d]) is expired", token.Name, token.ID) + case TokenStatusDisabled: + return nil, fmt.Errorf("token (%s[%d]) is disabled", token.Name, token.ID) } if token.Status != TokenStatusEnabled { return nil, fmt.Errorf("token (%s[%d]) is not available", token.Name, token.ID) @@ -330,8 +332,8 @@ func UpdateTokenStatus(id int, status int) (err error) { token := Token{ID: id} defer func() { if err == nil { - if err := CacheDeleteToken(token.Key); err != nil { - log.Error("delete token from cache failed: " + err.Error()) + if err := CacheUpdateTokenStatus(token.Key, status); err != nil { + log.Error("update token status in cache failed: " + err.Error()) } } }() @@ -355,8 +357,8 @@ func UpdateGroupTokenStatus(group string, id int, status int) (err error) { token := Token{} defer func() { if err == nil { - if err := CacheDeleteToken(token.Key); err != nil { - log.Error("delete token from cache failed: " + err.Error()) + if err := CacheUpdateTokenStatus(token.Key, status); err != nil { + log.Error("update token status in cache failed: " + err.Error()) } } }() @@ -497,7 +499,7 @@ func UpdateToken(token *Token) (err error) { func UpdateTokenUsedAmount(id int, amount float64, requestCount int) (err error) { token := &Token{ID: id} defer func() { - if amount > 0 && err == nil { + if amount > 0 && err == nil && token.Quota > 0 { if err := CacheUpdateTokenUsedAmountOnlyIncrease(token.Key, token.UsedAmount); err != nil { log.Error("update token used amount in cache failed: " + err.Error()) } @@ -508,6 +510,7 @@ func UpdateTokenUsedAmount(id int, amount float64, requestCount int) (err error) Clauses(clause.Returning{ Columns: []clause.Column{ {Name: "key"}, + {Name: "quota"}, {Name: "used_amount"}, }, }). @@ -525,8 +528,8 @@ func UpdateTokenName(id int, name string) (err error) { token := &Token{ID: id} defer func() { if err == nil { - if err := CacheDeleteToken(token.Key); err != nil { - log.Error("delete token from cache failed: " + err.Error()) + if err := CacheUpdateTokenName(token.Key, name); err != nil { + log.Error("update token name in cache failed: " + err.Error()) } } }() @@ -549,8 +552,8 @@ func UpdateGroupTokenName(group string, id int, name string) (err error) { token := &Token{ID: id, GroupID: group} defer func() { if err == nil { - if err := CacheDeleteToken(token.Key); err != nil { - log.Error("delete token from cache failed: " + err.Error()) + if err := CacheUpdateTokenName(token.Key, name); err != nil { + log.Error("update token name in cache failed: " + err.Error()) } } }() From aacff9a5d99c24a3a948228a248d130d4615ce5f Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 25 Dec 2024 11:38:26 +0800 Subject: [PATCH 036/167] feat: group consume level rpm ratio --- service/aiproxy/common/config/config.go | 11 ++++ service/aiproxy/controller/option.go | 18 +++++- service/aiproxy/middleware/distributor.go | 75 +++++++++++++++++------ service/aiproxy/model/option.go | 46 ++++++++++++++ service/aiproxy/router/api.go | 1 + 5 files changed, 129 insertions(+), 22 deletions(-) diff --git a/service/aiproxy/common/config/config.go b/service/aiproxy/common/config/config.go index d2b9f0b2c43..dc5b09036db 100644 --- a/service/aiproxy/common/config/config.go +++ b/service/aiproxy/common/config/config.go @@ -98,11 +98,14 @@ var ( defaultChannelModels atomic.Value defaultChannelModelMapping atomic.Value groupMaxTokenNum atomic.Int32 + // group消费金额对应的rpm乘数,使用map[float64]float64 + groupConsumeLevelRpmRatio atomic.Value ) func init() { defaultChannelModels.Store(make(map[int][]string)) defaultChannelModelMapping.Store(make(map[int]map[string]string)) + groupConsumeLevelRpmRatio.Store(make(map[float64]float64)) } func GetDefaultChannelModels() map[int][]string { @@ -125,6 +128,14 @@ func SetDefaultChannelModelMapping(mapping map[int]map[string]string) { defaultChannelModelMapping.Store(mapping) } +func GetGroupConsumeLevelRpmRatio() map[float64]float64 { + return groupConsumeLevelRpmRatio.Load().(map[float64]float64) +} + +func SetGroupConsumeLevelRpmRatio(ratio map[float64]float64) { + groupConsumeLevelRpmRatio.Store(ratio) +} + // 那个group最多可创建的token数量,0表示不限制 func GetGroupMaxTokenNum() int32 { return groupMaxTokenNum.Load() diff --git a/service/aiproxy/controller/option.go b/service/aiproxy/controller/option.go index 6197331bb49..348a37f08db 100644 --- a/service/aiproxy/controller/option.go +++ b/service/aiproxy/controller/option.go @@ -3,12 +3,10 @@ package controller import ( "net/http" + "github.com/gin-gonic/gin" json "github.com/json-iterator/go" - "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" - - "github.com/gin-gonic/gin" ) func GetOptions(c *gin.Context) { @@ -24,6 +22,20 @@ func GetOptions(c *gin.Context) { middleware.SuccessResponse(c, options) } +func GetOption(c *gin.Context) { + key := c.Param("key") + if key == "" { + middleware.ErrorResponse(c, http.StatusOK, "key is required") + return + } + option, err := model.GetOption(key) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, err.Error()) + return + } + middleware.SuccessResponse(c, option) +} + func UpdateOption(c *gin.Context) { var option model.Option err := json.NewDecoder(c.Request.Body).Decode(&option) diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 15b617dc959..0b98c913ee1 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -22,6 +22,59 @@ type ModelRequest struct { Model string `form:"model" json:"model"` } +func calculateGroupConsumeLevelRpmRatio(usedAmount float64) float64 { + v := config.GetGroupConsumeLevelRpmRatio() + var maxConsumeLevel float64 = -1 + var groupConsumeLevelRpmRatio float64 + for consumeLevel, ratio := range v { + if usedAmount < consumeLevel { + continue + } + if consumeLevel > maxConsumeLevel { + maxConsumeLevel = consumeLevel + groupConsumeLevelRpmRatio = ratio + } + } + if groupConsumeLevelRpmRatio <= 0 { + groupConsumeLevelRpmRatio = 1 + } + return groupConsumeLevelRpmRatio +} + +func getGroupRPMRatio(group *model.GroupCache) float64 { + groupRPMRatio := group.RPMRatio + if groupRPMRatio <= 0 { + groupRPMRatio = 1 + } + return groupRPMRatio +} + +func checkModelRPM(c *gin.Context, group *model.GroupCache, requestModel string, modelRPM int64) bool { + if modelRPM <= 0 { + return true + } + + groupConsumeLevelRpmRatio := calculateGroupConsumeLevelRpmRatio(group.UsedAmount) + groupRPMRatio := getGroupRPMRatio(group) + + adjustedModelRPM := int64(float64(modelRPM) * groupRPMRatio * groupConsumeLevelRpmRatio) + + ok := ForceRateLimit( + c.Request.Context(), + fmt.Sprintf(groupModelRPMKey, group.ID, requestModel), + adjustedModelRPM, + time.Minute, + ) + + if !ok { + abortWithMessage(c, http.StatusTooManyRequests, + group.ID+" is requesting too frequently", + ) + return false + } + return true +} + func Distribute(c *gin.Context) { if config.GetDisableServe() { abortWithMessage(c, http.StatusServiceUnavailable, "service is under maintenance") @@ -60,25 +113,9 @@ func Distribute(c *gin.Context) { abortWithMessage(c, http.StatusServiceUnavailable, requestModel+" is not available") return } - modelRPM := mc.RPM - if modelRPM > 0 { - groupRPMRatio := group.RPMRatio - if groupRPMRatio <= 0 { - groupRPMRatio = 1 - } - modelRPM = int64(float64(modelRPM) * groupRPMRatio) - ok = ForceRateLimit( - c.Request.Context(), - fmt.Sprintf(groupModelRPMKey, group.ID, requestModel), - modelRPM, - time.Minute, - ) - if !ok { - abortWithMessage(c, http.StatusTooManyRequests, - group.ID+" is requesting too frequently", - ) - return - } + + if !checkModelRPM(c, group, requestModel, mc.RPM) { + return } c.Set(ctxkey.OriginalModel, requestModel) diff --git a/service/aiproxy/model/option.go b/service/aiproxy/model/option.go index d6d0989070a..d13b8f06757 100644 --- a/service/aiproxy/model/option.go +++ b/service/aiproxy/model/option.go @@ -28,6 +28,15 @@ func GetAllOption() ([]*Option, error) { return options, err } +func GetOption(key string) (*Option, error) { + if !slices.Contains(optionKeys, key) { + return nil, ErrUnknownOptionKey + } + var option Option + err := DB.Where("key = ?", key).First(&option).Error + return &option, err +} + var ( optionMap = make(map[string]string) optionKeys []string @@ -70,6 +79,11 @@ func initOptionMap() error { optionMap["DefaultChannelModelMapping"] = conv.BytesToString(defaultChannelModelMappingJSON) optionMap["GeminiSafetySetting"] = config.GetGeminiSafetySetting() optionMap["GroupMaxTokenNum"] = strconv.FormatInt(int64(config.GetGroupMaxTokenNum()), 10) + groupConsumeLevelRpmRatioJSON, err := json.Marshal(config.GetGroupConsumeLevelRpmRatio()) + if err != nil { + return err + } + optionMap["GroupConsumeLevelRpmRatio"] = conv.BytesToString(groupConsumeLevelRpmRatioJSON) optionKeys = make([]string, 0, len(optionMap)) for key := range optionMap { @@ -174,6 +188,9 @@ func updateOption(key string, value string, isInit bool) (err error) { if err != nil { return err } + if logDetailStorageHours < 0 { + return errors.New("log detail storage hours must be greater than 0") + } config.SetLogDetailStorageHours(logDetailStorageHours) case "DisableServe": config.SetDisableServe(isTrue(value)) @@ -184,6 +201,9 @@ func updateOption(key string, value string, isInit bool) (err error) { if err != nil { return err } + if groupMaxTokenNum < 0 { + return errors.New("group max token num must be greater than 0") + } config.SetGroupMaxTokenNum(int32(groupMaxTokenNum)) case "GeminiSafetySetting": config.SetGeminiSafetySetting(value) @@ -237,6 +257,9 @@ func updateOption(key string, value string, isInit bool) (err error) { if err != nil { return err } + if retryTimes < 0 { + return errors.New("retry times must be greater than 0") + } config.SetRetryTimes(retryTimes) case "EnableModelErrorAutoBan": config.SetEnableModelErrorAutoBan(isTrue(value)) @@ -245,6 +268,9 @@ func updateOption(key string, value string, isInit bool) (err error) { if err != nil { return err } + if modelErrorAutoBanRate < 0 || modelErrorAutoBanRate > 1 { + return errors.New("model error auto ban rate must be between 0 and 1") + } config.SetModelErrorAutoBanRate(modelErrorAutoBanRate) case "TimeoutWithModelType": var newTimeoutWithModelType map[int]int64 @@ -252,7 +278,27 @@ func updateOption(key string, value string, isInit bool) (err error) { if err != nil { return err } + for _, v := range newTimeoutWithModelType { + if v < 0 { + return errors.New("timeout must be greater than 0") + } + } config.SetTimeoutWithModelType(newTimeoutWithModelType) + case "GroupConsumeLevelRpmRatio": + var newGroupRpmRatio map[float64]float64 + err := json.Unmarshal(conv.StringToBytes(value), &newGroupRpmRatio) + if err != nil { + return err + } + for k, v := range newGroupRpmRatio { + if k < 0 { + return errors.New("consume level must be greater than 0") + } + if v < 0 { + return errors.New("rpm ratio must be greater than 0") + } + } + config.SetGroupConsumeLevelRpmRatio(newGroupRpmRatio) default: return ErrUnknownOptionKey } diff --git a/service/aiproxy/router/api.go b/service/aiproxy/router/api.go index 92c2a610b1c..13954bdf293 100644 --- a/service/aiproxy/router/api.go +++ b/service/aiproxy/router/api.go @@ -56,6 +56,7 @@ func SetAPIRouter(router *gin.Engine) { optionRoute := apiRouter.Group("/option") { optionRoute.GET("/", controller.GetOptions) + optionRoute.GET("/:key", controller.GetOption) optionRoute.PUT("/", controller.UpdateOption) optionRoute.PUT("/batch", controller.UpdateOptions) } From 853e0e1a459bd8952b058ab627e29c24b6df394e Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 25 Dec 2024 14:54:30 +0800 Subject: [PATCH 037/167] fix: error return --- service/aiproxy/controller/relay.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/service/aiproxy/controller/relay.go b/service/aiproxy/controller/relay.go index bcab8b90df1..95ac73b5ff0 100644 --- a/service/aiproxy/controller/relay.go +++ b/service/aiproxy/controller/relay.go @@ -65,7 +65,7 @@ func RelayHelper(meta *meta.Meta, c *gin.Context) (*model.ErrorWithStatusCode, b } return err, true } - return nil, false + return err, false } func getChannelWithFallback(model string, failedChannelIDs ...int) (*dbmodel.Channel, error) { From 0ff4cc7eec75c5d57ebe6dec4da45b4aaf4dc835 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 25 Dec 2024 15:33:40 +0800 Subject: [PATCH 038/167] feat: decode svg --- service/aiproxy/common/image/svg.go | 45 +++++++++++++++++++++++++++++ service/aiproxy/go.mod | 2 ++ service/aiproxy/go.sum | 4 +++ 3 files changed, 51 insertions(+) create mode 100644 service/aiproxy/common/image/svg.go diff --git a/service/aiproxy/common/image/svg.go b/service/aiproxy/common/image/svg.go new file mode 100644 index 00000000000..a183d2b6f36 --- /dev/null +++ b/service/aiproxy/common/image/svg.go @@ -0,0 +1,45 @@ +package image + +import ( + "image" + "image/color" + "io" + + "github.com/srwiley/oksvg" + "github.com/srwiley/rasterx" +) + +func Decode(r io.Reader) (image.Image, error) { + icon, err := oksvg.ReadIconStream(r) + if err != nil { + return nil, err + } + + w, h := int(icon.ViewBox.W), int(icon.ViewBox.H) + icon.SetTarget(0, 0, float64(w), float64(h)) + + rgba := image.NewRGBA(image.Rect(0, 0, w, h)) + icon.Draw(rasterx.NewDasher(w, h, rasterx.NewScannerGV(w, h, rgba, rgba.Bounds())), 1) + + return rgba, err +} + +func DecodeConfig(r io.Reader) (image.Config, error) { + var config image.Config + + icon, err := oksvg.ReadIconStream(r) + if err != nil { + return config, err + } + + config.ColorModel = color.RGBAModel + config.Width = int(icon.ViewBox.W) + config.Height = int(icon.ViewBox.H) + + return config, nil +} + +func init() { + image.RegisterFormat("svg", " Date: Wed, 25 Dec 2024 15:36:16 +0800 Subject: [PATCH 039/167] fix: check is image --- service/aiproxy/common/image/image.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/service/aiproxy/common/image/image.go b/service/aiproxy/common/image/image.go index ca2a5511b29..3ab4e5df7cd 100644 --- a/service/aiproxy/common/image/image.go +++ b/service/aiproxy/common/image/image.go @@ -76,6 +76,10 @@ func GetImageFromURL(ctx context.Context, url string) (string, string, error) { if resp.StatusCode != http.StatusOK { return "", "", fmt.Errorf("status code: %d", resp.StatusCode) } + isImage := IsImageURL(resp) + if !isImage { + return "", "", errors.New("not an image") + } var buf []byte if resp.ContentLength <= 0 { buf, err = io.ReadAll(resp.Body) @@ -86,10 +90,6 @@ func GetImageFromURL(ctx context.Context, url string) (string, string, error) { if err != nil { return "", "", err } - isImage := IsImageURL(resp) - if !isImage { - return "", "", errors.New("not an image") - } return resp.Header.Get("Content-Type"), base64.StdEncoding.EncodeToString(buf), nil } From 982cd0e9d534056d1ee3b52397c138179d448b9d Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 25 Dec 2024 16:17:29 +0800 Subject: [PATCH 040/167] fix: reply raw 429 message --- service/aiproxy/controller/relay.go | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/service/aiproxy/controller/relay.go b/service/aiproxy/controller/relay.go index 95ac73b5ff0..b3e8a4fed6d 100644 --- a/service/aiproxy/controller/relay.go +++ b/service/aiproxy/controller/relay.go @@ -98,7 +98,7 @@ func Relay(c *gin.Context) { if err != nil { c.JSON(http.StatusServiceUnavailable, gin.H{ "error": &model.Error{ - Message: "The upstream load of the current group is saturated, please try again later", + Message: "The upstream load is saturated, please try again later", Code: "upstream_load_saturated", Type: middleware.ErrorTypeAIPROXY, }, @@ -146,17 +146,9 @@ func Relay(c *gin.Context) { failedChannelIDs = append(failedChannelIDs, newChannel.ID) } if bizErr != nil { - message := bizErr.Message - if bizErr.StatusCode == http.StatusTooManyRequests { - message = "The upstream load of the current group is saturated, please try again later" - } + bizErr.Message = middleware.MessageWithRequestID(bizErr.Message, requestID) c.JSON(bizErr.StatusCode, gin.H{ - "error": &model.Error{ - Message: middleware.MessageWithRequestID(message, requestID), - Code: bizErr.Code, - Param: bizErr.Param, - Type: bizErr.Type, - }, + "error": bizErr, }) } } From 04895b619abf3678a340a3c79d1807e372d2a8e8 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 25 Dec 2024 17:02:00 +0800 Subject: [PATCH 041/167] feat: req and resp body max size limit --- service/aiproxy/common/gin.go | 37 ++++++++++++++++++++++++++- service/aiproxy/common/image/image.go | 16 +++++++++++- 2 files changed, 51 insertions(+), 2 deletions(-) diff --git a/service/aiproxy/common/gin.go b/service/aiproxy/common/gin.go index 113617b4d77..eb2c95f0123 100644 --- a/service/aiproxy/common/gin.go +++ b/service/aiproxy/common/gin.go @@ -3,6 +3,7 @@ package common import ( "bytes" "context" + "errors" "fmt" "io" "net/http" @@ -13,6 +14,31 @@ import ( type RequestBodyKey struct{} +const ( + MaxRequestBodySize = 1024 * 1024 * 50 // 50MB +) + +func LimitReader(r io.Reader, n int64) io.Reader { return &LimitedReader{r, n} } + +type LimitedReader struct { + R io.Reader + N int64 +} + +var ErrLimitedReaderExceeded = errors.New("limited reader exceeded") + +func (l *LimitedReader) Read(p []byte) (n int, err error) { + if l.N <= 0 { + return 0, ErrLimitedReaderExceeded + } + if int64(len(p)) > l.N { + p = p[0:l.N] + } + n, err = l.R.Read(p) + l.N -= int64(n) + return +} + func GetRequestBody(req *http.Request) ([]byte, error) { requestBody := req.Context().Value(RequestBodyKey{}) if requestBody != nil { @@ -27,8 +53,17 @@ func GetRequestBody(req *http.Request) ([]byte, error) { } }() if req.ContentLength <= 0 || req.Header.Get("Content-Type") != "application/json" { - buf, err = io.ReadAll(req.Body) + buf, err = io.ReadAll(LimitReader(req.Body, MaxRequestBodySize)) + if err != nil { + if errors.Is(err, ErrLimitedReaderExceeded) { + return nil, fmt.Errorf("request body too large, max: %d", MaxRequestBodySize) + } + return nil, fmt.Errorf("request body read failed: %w", err) + } } else { + if req.ContentLength > MaxRequestBodySize { + return nil, fmt.Errorf("request body too large: %d, max: %d", req.ContentLength, MaxRequestBodySize) + } buf = make([]byte, req.ContentLength) _, err = io.ReadFull(req.Body, buf) } diff --git a/service/aiproxy/common/image/image.go b/service/aiproxy/common/image/image.go index 3ab4e5df7cd..a584b8b84b6 100644 --- a/service/aiproxy/common/image/image.go +++ b/service/aiproxy/common/image/image.go @@ -20,6 +20,7 @@ import ( "strings" // import webp decoder + "github.com/labring/sealos/service/aiproxy/common" _ "golang.org/x/image/webp" ) @@ -56,6 +57,10 @@ func GetImageSizeFromURL(url string) (width int, height int, err error) { return img.Width, img.Height, nil } +const ( + MaxImageSize = 1024 * 1024 * 5 // 5MB +) + func GetImageFromURL(ctx context.Context, url string) (string, string, error) { // Check if the URL is a data URL matches := dataURLPattern.FindStringSubmatch(url) @@ -82,8 +87,17 @@ func GetImageFromURL(ctx context.Context, url string) (string, string, error) { } var buf []byte if resp.ContentLength <= 0 { - buf, err = io.ReadAll(resp.Body) + buf, err = io.ReadAll(common.LimitReader(resp.Body, MaxImageSize)) + if err != nil { + if errors.Is(err, common.ErrLimitedReaderExceeded) { + return "", "", fmt.Errorf("image too large, max: %d", MaxImageSize) + } + return "", "", fmt.Errorf("image read failed: %w", err) + } } else { + if resp.ContentLength > MaxImageSize { + return "", "", fmt.Errorf("image too large: %d, max: %d", resp.ContentLength, MaxImageSize) + } buf = make([]byte, resp.ContentLength) _, err = io.ReadFull(resp.Body, buf) } From 03396a1c30c1b4dfc706ba0d222fb6236cbbf072 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 25 Dec 2024 17:08:21 +0800 Subject: [PATCH 042/167] fix: _ import lint --- service/aiproxy/common/image/image.go | 3 +- service/aiproxy/common/image/image_test.go | 169 --------------------- 2 files changed, 2 insertions(+), 170 deletions(-) delete mode 100644 service/aiproxy/common/image/image_test.go diff --git a/service/aiproxy/common/image/image.go b/service/aiproxy/common/image/image.go index a584b8b84b6..9ec808c0e37 100644 --- a/service/aiproxy/common/image/image.go +++ b/service/aiproxy/common/image/image.go @@ -19,8 +19,9 @@ import ( "regexp" "strings" - // import webp decoder "github.com/labring/sealos/service/aiproxy/common" + + // import webp decoder _ "golang.org/x/image/webp" ) diff --git a/service/aiproxy/common/image/image_test.go b/service/aiproxy/common/image/image_test.go deleted file mode 100644 index f5abb3f1271..00000000000 --- a/service/aiproxy/common/image/image_test.go +++ /dev/null @@ -1,169 +0,0 @@ -package image_test - -import ( - "encoding/base64" - "image" - _ "image/gif" - _ "image/jpeg" - _ "image/png" - "io" - "net/http" - "strconv" - "strings" - "testing" - - img "github.com/labring/sealos/service/aiproxy/common/image" - - "github.com/stretchr/testify/assert" - _ "golang.org/x/image/webp" -) - -type CountingReader struct { - reader io.Reader - BytesRead int -} - -func (r *CountingReader) Read(p []byte) (n int, err error) { - n, err = r.reader.Read(p) - r.BytesRead += n - return n, err -} - -var cases = []struct { - url string - format string - width int - height int -}{ - {"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", "jpeg", 2560, 1669}, - {"https://upload.wikimedia.org/wikipedia/commons/9/97/Basshunter_live_performances.png", "png", 4500, 2592}, - {"https://upload.wikimedia.org/wikipedia/commons/c/c6/TO_THE_ONE_SOMETHINGNESS.webp", "webp", 984, 985}, - {"https://upload.wikimedia.org/wikipedia/commons/d/d0/01_Das_Sandberg-Modell.gif", "gif", 1917, 1533}, - {"https://upload.wikimedia.org/wikipedia/commons/6/62/102Cervus.jpg", "jpeg", 270, 230}, -} - -func TestDecode(t *testing.T) { - // Bytes read: varies sometimes - // jpeg: 1063892 - // png: 294462 - // webp: 99529 - // gif: 956153 - // jpeg#01: 32805 - for _, c := range cases { - t.Run("Decode:"+c.format, func(t *testing.T) { - resp, err := http.Get(c.url) - assert.NoError(t, err) - defer resp.Body.Close() - reader := &CountingReader{reader: resp.Body} - img, format, err := image.Decode(reader) - assert.NoError(t, err) - size := img.Bounds().Size() - assert.Equal(t, c.format, format) - assert.Equal(t, c.width, size.X) - assert.Equal(t, c.height, size.Y) - t.Logf("Bytes read: %d", reader.BytesRead) - }) - } - - // Bytes read: - // jpeg: 4096 - // png: 4096 - // webp: 4096 - // gif: 4096 - // jpeg#01: 4096 - for _, c := range cases { - t.Run("DecodeConfig:"+c.format, func(t *testing.T) { - resp, err := http.Get(c.url) - assert.NoError(t, err) - defer resp.Body.Close() - reader := &CountingReader{reader: resp.Body} - config, format, err := image.DecodeConfig(reader) - assert.NoError(t, err) - assert.Equal(t, c.format, format) - assert.Equal(t, c.width, config.Width) - assert.Equal(t, c.height, config.Height) - t.Logf("Bytes read: %d", reader.BytesRead) - }) - } -} - -func TestBase64(t *testing.T) { - // Bytes read: - // jpeg: 1063892 - // png: 294462 - // webp: 99072 - // gif: 953856 - // jpeg#01: 32805 - for _, c := range cases { - t.Run("Decode:"+c.format, func(t *testing.T) { - resp, err := http.Get(c.url) - assert.NoError(t, err) - defer resp.Body.Close() - data, err := io.ReadAll(resp.Body) - assert.NoError(t, err) - encoded := base64.StdEncoding.EncodeToString(data) - body := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encoded)) - reader := &CountingReader{reader: body} - img, format, err := image.Decode(reader) - assert.NoError(t, err) - size := img.Bounds().Size() - assert.Equal(t, c.format, format) - assert.Equal(t, c.width, size.X) - assert.Equal(t, c.height, size.Y) - t.Logf("Bytes read: %d", reader.BytesRead) - }) - } - - // Bytes read: - // jpeg: 1536 - // png: 768 - // webp: 768 - // gif: 1536 - // jpeg#01: 3840 - for _, c := range cases { - t.Run("DecodeConfig:"+c.format, func(t *testing.T) { - resp, err := http.Get(c.url) - assert.NoError(t, err) - defer resp.Body.Close() - data, err := io.ReadAll(resp.Body) - assert.NoError(t, err) - encoded := base64.StdEncoding.EncodeToString(data) - body := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encoded)) - reader := &CountingReader{reader: body} - config, format, err := image.DecodeConfig(reader) - assert.NoError(t, err) - assert.Equal(t, c.format, format) - assert.Equal(t, c.width, config.Width) - assert.Equal(t, c.height, config.Height) - t.Logf("Bytes read: %d", reader.BytesRead) - }) - } -} - -func TestGetImageSize(t *testing.T) { - for i, c := range cases { - t.Run("Decode:"+strconv.Itoa(i), func(t *testing.T) { - width, height, err := img.GetImageSize(c.url) - assert.NoError(t, err) - assert.Equal(t, c.width, width) - assert.Equal(t, c.height, height) - }) - } -} - -func TestGetImageSizeFromBase64(t *testing.T) { - for i, c := range cases { - t.Run("Decode:"+strconv.Itoa(i), func(t *testing.T) { - resp, err := http.Get(c.url) - assert.NoError(t, err) - defer resp.Body.Close() - data, err := io.ReadAll(resp.Body) - assert.NoError(t, err) - encoded := base64.StdEncoding.EncodeToString(data) - width, height, err := img.GetImageSizeFromBase64(encoded) - assert.NoError(t, err) - assert.Equal(t, c.width, width) - assert.Equal(t, c.height, height) - }) - } -} From 907f5c00226d410c8d617c1a8571c9b1c7583687 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 25 Dec 2024 17:36:27 +0800 Subject: [PATCH 043/167] fix: get token encoder log --- service/aiproxy/relay/adaptor/openai/token.go | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/service/aiproxy/relay/adaptor/openai/token.go b/service/aiproxy/relay/adaptor/openai/token.go index 1a985e3fe5c..2b3df7d2be7 100644 --- a/service/aiproxy/relay/adaptor/openai/token.go +++ b/service/aiproxy/relay/adaptor/openai/token.go @@ -2,7 +2,6 @@ package openai import ( "errors" - "fmt" "math" "strings" "sync" @@ -33,22 +32,23 @@ func getTokenEncoder(model string) *tiktoken.Tiktoken { tokenEncoderLock.RLock() tokenEncoder, ok := tokenEncoderMap[model] tokenEncoderLock.RUnlock() - - if ok && tokenEncoder != nil { + if ok { return tokenEncoder } - if ok { - tokenEncoder, err := tiktoken.EncodingForModel(model) - if err != nil { - log.Error(fmt.Sprintf("failed to get token encoder for model %s: %s, using encoder for gpt-3.5-turbo", model, err.Error())) - tokenEncoder = defaultTokenEncoder - } - tokenEncoderLock.Lock() - tokenEncoderMap[model] = tokenEncoder - tokenEncoderLock.Unlock() + + tokenEncoderLock.Lock() + defer tokenEncoderLock.Unlock() + if tokenEncoder, ok := tokenEncoderMap[model]; ok { return tokenEncoder } - return defaultTokenEncoder + + tokenEncoder, err := tiktoken.EncodingForModel(model) + if err != nil { + log.Warnf("failed to get token encoder for model %s: %v, using encoder for gpt-3.5-turbo", model, err) + tokenEncoder = defaultTokenEncoder + } + tokenEncoderMap[model] = tokenEncoder + return tokenEncoder } func getTokenNum(tokenEncoder *tiktoken.Tiktoken, text string) int { From bf6b03784c7a35766744be0ae13786d77928912f Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 26 Dec 2024 11:51:07 +0800 Subject: [PATCH 044/167] fix: sum used amount --- service/aiproxy/model/log.go | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index 3d2f2db597b..f67daaddd65 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -7,11 +7,11 @@ import ( "time" json "github.com/json-iterator/go" - log "github.com/sirupsen/logrus" - "gorm.io/gorm" - "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" + "github.com/shopspring/decimal" + log "github.com/sirupsen/logrus" + "gorm.io/gorm" ) type RequestDetail struct { @@ -682,7 +682,7 @@ func DeleteGroupLogs(groupID string) (int64, error) { type HourlyChartData struct { Timestamp int64 `json:"timestamp"` RequestCount int64 `json:"request_count"` - TotalCost float64 `json:"total_cost"` + UsedAmount float64 `json:"used_amount"` ExceptionCount int64 `json:"exception_count"` } @@ -692,6 +692,7 @@ type DashboardResponse struct { Models []string `json:"models"` TotalCount int64 `json:"total_count"` ExceptionCount int64 `json:"exception_count"` + UsedAmount float64 `json:"used_amount"` } func getHourTimestamp() string { @@ -716,7 +717,7 @@ func getChartData(group string, start, end time.Time, tokenName, modelName strin } query := LogDB.Table("logs"). - Select(hourTimestamp+" as timestamp, count(*) as request_count, sum(price) as total_cost, sum(case when code != 200 then 1 else 0 end) as exception_count"). + Select(hourTimestamp+" as timestamp, count(*) as request_count, sum(used_amount) as used_amount, sum(case when code != 200 then 1 else 0 end) as exception_count"). Where("group_id = ? AND request_at BETWEEN ? AND ?", group, start, end). Group("timestamp"). Order("timestamp ASC") @@ -766,6 +767,14 @@ func sumExceptionCount(chartData []*HourlyChartData) int64 { return count } +func sumUsedAmount(chartData []*HourlyChartData) float64 { + var amount decimal.Decimal + for _, data := range chartData { + amount = amount.Add(decimal.NewFromFloat(data.UsedAmount)) + } + return amount.InexactFloat64() +} + func GetDashboardData(group string, start, end time.Time, tokenName string, modelName string) (*DashboardResponse, error) { if end.IsZero() { end = time.Now() @@ -790,6 +799,7 @@ func GetDashboardData(group string, start, end time.Time, tokenName string, mode totalCount := sumTotalCount(chartData) exceptionCount := sumExceptionCount(chartData) + usedAmount := sumUsedAmount(chartData) return &DashboardResponse{ ChartData: chartData, @@ -797,6 +807,7 @@ func GetDashboardData(group string, start, end time.Time, tokenName string, mode Models: models, TotalCount: totalCount, ExceptionCount: exceptionCount, + UsedAmount: usedAmount, }, nil } From f71a911043d61d2c477cda40814267dfa3f8d5de Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 27 Dec 2024 15:26:52 +0800 Subject: [PATCH 045/167] fix: delete no need cache --- service/aiproxy/main.go | 19 ++++---- service/aiproxy/model/cache.go | 85 ++++------------------------------ 2 files changed, 19 insertions(+), 85 deletions(-) diff --git a/service/aiproxy/main.go b/service/aiproxy/main.go index c2a3ce8d401..352475c723f 100644 --- a/service/aiproxy/main.go +++ b/service/aiproxy/main.go @@ -103,17 +103,13 @@ func initializeCaches() error { if err := model.InitOption2DB(); err != nil { return err } - if err := model.InitModelConfigCache(); err != nil { - return err - } - return model.InitChannelCache() + return model.InitModelConfigAndChannelCache() } func startSyncServices(ctx context.Context, wg *sync.WaitGroup) { - wg.Add(3) + wg.Add(2) go model.SyncOptions(ctx, wg, time.Second*5) - go model.SyncChannelCache(ctx, wg, time.Second*5) - go model.SyncModelConfigCache(ctx, wg, time.Second*5) + go model.SyncModelConfigAndChannelCache(ctx, wg, time.Second*10) } func setupHTTPServer() (*http.Server, *gin.Engine) { @@ -177,17 +173,22 @@ func main() { go autoTestBannedModels() <-ctx.Done() - log.Info("shutting down server...") - log.Info("max wait time: 120s") shutdownCtx, cancel := context.WithTimeout(context.Background(), 120*time.Second) defer cancel() + log.Info("shutting down http server...") + log.Info("max wait time: 120s") if err := srv.Shutdown(shutdownCtx); err != nil { log.Error("server forced to shutdown: " + err.Error()) + } else { + log.Info("server shutdown successfully") } + log.Info("shutting down relay consumer...") relaycontroller.ConsumeWaitGroup.Wait() + + log.Info("shutting down sync services...") wg.Wait() log.Info("server exiting") diff --git a/service/aiproxy/model/cache.go b/service/aiproxy/model/cache.go index e16c1384f08..159250ac0c8 100644 --- a/service/aiproxy/model/cache.go +++ b/service/aiproxy/model/cache.go @@ -295,30 +295,14 @@ func CacheUpdateGroupUsedAmountOnlyIncrease(id string, amount float64) error { } var ( - enabledChannels []*Channel - allChannels []*Channel enabledModel2channels map[string][]*Channel enabledModels []string enabledModelConfigs []*ModelConfig enabledChannelType2ModelConfigs map[int][]*ModelConfig enabledChannelID2channel map[int]*Channel - allChannelID2channel map[int]*Channel channelSyncLock sync.RWMutex ) -func CacheGetAllChannels() []*Channel { - channelSyncLock.RLock() - defer channelSyncLock.RUnlock() - return allChannels -} - -func CacheGetAllChannelByID(id int) (*Channel, bool) { - channelSyncLock.RLock() - defer channelSyncLock.RUnlock() - channel, ok := allChannelID2channel[id] - return channel, ok -} - // GetEnabledModel2Channels returns a map of model name to enabled channels func GetEnabledModel2Channels() map[string][]*Channel { channelSyncLock.RLock() @@ -347,12 +331,6 @@ func CacheGetEnabledModelConfigs() []*ModelConfig { return enabledModelConfigs } -func CacheGetEnabledChannels() []*Channel { - channelSyncLock.RLock() - defer channelSyncLock.RUnlock() - return enabledChannels -} - func CacheGetEnabledChannelByID(id int) (*Channel, bool) { channelSyncLock.RLock() defer channelSyncLock.RUnlock() @@ -360,16 +338,15 @@ func CacheGetEnabledChannelByID(id int) (*Channel, bool) { return channel, ok } -// InitChannelCache initializes the channel cache from database -func InitChannelCache() error { - // Load enabled newEnabledChannels from database - newEnabledChannels, err := LoadEnabledChannels() +// InitModelConfigAndChannelCache initializes the channel cache from database +func InitModelConfigAndChannelCache() error { + err := initModelConfigCache() if err != nil { return err } - // Load all channels from database - newAllChannels, err := LoadChannels() + // Load enabled newEnabledChannels from database + newEnabledChannels, err := LoadEnabledChannels() if err != nil { return err } @@ -378,7 +355,6 @@ func InitChannelCache() error { newEnabledChannelID2channel := buildChannelIDMap(newEnabledChannels) // Build all channel ID to channel map - newAllChannelID2channel := buildChannelIDMap(newAllChannels) // Build model to channels map newEnabledModel2channels := buildModelToChannelsMap(newEnabledChannels) @@ -394,14 +370,11 @@ func InitChannelCache() error { // Update global cache atomically updateGlobalCache( - newEnabledChannels, - newAllChannels, newEnabledModel2channels, newEnabledModels, newEnabledModelConfigs, newEnabledChannelID2channel, newEnabledChannelType2ModelConfigs, - newAllChannelID2channel, ) return nil @@ -572,28 +545,22 @@ func SortModelConfigsFunc(i, j *ModelConfig) int { } func updateGlobalCache( - newEnabledChannels []*Channel, - newAllChannels []*Channel, newEnabledModel2channels map[string][]*Channel, newEnabledModels []string, newEnabledModelConfigs []*ModelConfig, newEnabledChannelID2channel map[int]*Channel, newEnabledChannelType2ModelConfigs map[int][]*ModelConfig, - newAllChannelID2channel map[int]*Channel, ) { channelSyncLock.Lock() defer channelSyncLock.Unlock() - enabledChannels = newEnabledChannels - allChannels = newAllChannels enabledModel2channels = newEnabledModel2channels enabledModels = newEnabledModels enabledModelConfigs = newEnabledModelConfigs enabledChannelID2channel = newEnabledChannelID2channel enabledChannelType2ModelConfigs = newEnabledChannelType2ModelConfigs - allChannelID2channel = newAllChannelID2channel } -func SyncChannelCache(ctx context.Context, wg *sync.WaitGroup, frequency time.Duration) { +func SyncModelConfigAndChannelCache(ctx context.Context, wg *sync.WaitGroup, frequency time.Duration) { defer wg.Done() ticker := time.NewTicker(frequency) @@ -603,7 +570,7 @@ func SyncChannelCache(ctx context.Context, wg *sync.WaitGroup, frequency time.Du case <-ctx.Done(): return case <-ticker.C: - err := InitChannelCache() + err := InitModelConfigAndChannelCache() if err != nil { log.Error("failed to sync channels: " + err.Error()) continue @@ -672,7 +639,7 @@ var ( modelConfigMap map[string]*ModelConfig ) -func InitModelConfigCache() error { +func initModelConfigCache() error { modelConfigs, err := GetAllModelConfigs() if err != nil { return err @@ -683,48 +650,14 @@ func InitModelConfigCache() error { } modelConfigSyncLock.Lock() + defer modelConfigSyncLock.Unlock() modelConfigMap = newModelConfigMap - modelConfigSyncLock.Unlock() return nil } -func SyncModelConfigCache(ctx context.Context, wg *sync.WaitGroup, frequency time.Duration) { - defer wg.Done() - - ticker := time.NewTicker(frequency) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - err := InitModelConfigCache() - if err != nil { - log.Error("failed to sync model configs: " + err.Error()) - } - } - } -} - func CacheGetModelConfig(model string) (*ModelConfig, bool) { modelConfigSyncLock.RLock() defer modelConfigSyncLock.RUnlock() modelConfig, ok := modelConfigMap[model] return modelConfig, ok } - -func CacheCheckModelConfig(models []string) ([]string, []string) { - if len(models) == 0 { - return models, nil - } - founded := make([]string, 0) - missing := make([]string, 0) - for _, model := range models { - if _, ok := modelConfigMap[model]; ok { - founded = append(founded, model) - } else { - missing = append(missing, model) - } - } - return founded, missing -} From aa8ae73dadd3886b7d4160653e9fac658412add8 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 27 Dec 2024 17:34:34 +0800 Subject: [PATCH 046/167] feat: dashboard rpm --- service/aiproxy/controller/dashboard.go | 34 +- service/aiproxy/controller/group.go | 16 +- service/aiproxy/controller/log.go | 20 +- service/aiproxy/controller/token.go | 70 ++-- service/aiproxy/model/log.go | 516 ++++++++++++------------ service/aiproxy/model/token.go | 82 +--- service/aiproxy/router/api.go | 3 +- 7 files changed, 356 insertions(+), 385 deletions(-) diff --git a/service/aiproxy/controller/dashboard.go b/service/aiproxy/controller/dashboard.go index edfd40f215d..454bcf14c12 100644 --- a/service/aiproxy/controller/dashboard.go +++ b/service/aiproxy/controller/dashboard.go @@ -9,16 +9,10 @@ import ( "github.com/labring/sealos/service/aiproxy/model" ) -func GetGroupDashboard(c *gin.Context) { - group := c.Param("group") - if group == "" { - middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") - return - } - +func getDashboardStartEndTime(t string) (time.Time, time.Time) { end := time.Now() var start time.Time - switch c.Query("type") { + switch t { case "month": start = end.AddDate(0, 0, -30) case "two_week": @@ -30,10 +24,32 @@ func GetGroupDashboard(c *gin.Context) { default: start = end.AddDate(0, 0, -1) } + return start, end +} + +func GetDashboard(c *gin.Context) { + start, end := getDashboardStartEndTime(c.Query("type")) + modelName := c.Query("model") + dashboards, err := model.GetDashboardData(start, end, modelName) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, err.Error()) + return + } + middleware.SuccessResponse(c, dashboards) +} + +func GetGroupDashboard(c *gin.Context) { + group := c.Param("group") + if group == "" { + middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") + return + } + + start, end := getDashboardStartEndTime(c.Query("type")) tokenName := c.Query("token_name") modelName := c.Query("model") - dashboards, err := model.GetDashboardData(group, start, end, tokenName, modelName) + dashboards, err := model.GetGroupDashboardData(group, start, end, tokenName, modelName) if err != nil { middleware.ErrorResponse(c, http.StatusOK, "failed to get statistics") return diff --git a/service/aiproxy/controller/group.go b/service/aiproxy/controller/group.go index e5b2c710873..2de78ed4252 100644 --- a/service/aiproxy/controller/group.go +++ b/service/aiproxy/controller/group.go @@ -193,20 +193,24 @@ func DeleteGroups(c *gin.Context) { } type CreateGroupRequest struct { - ID string `json:"id"` RPMRatio float64 `json:"rpm_ratio"` } func CreateGroup(c *gin.Context) { - var group CreateGroupRequest - err := json.NewDecoder(c.Request.Body).Decode(&group) - if err != nil || group.ID == "" { + group := c.Param("group") + if group == "" { + middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") + return + } + req := CreateGroupRequest{} + err := json.NewDecoder(c.Request.Body).Decode(&req) + if err != nil { middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") return } if err := model.CreateGroup(&model.Group{ - ID: group.ID, - RPMRatio: group.RPMRatio, + ID: group, + RPMRatio: req.RPMRatio, }); err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return diff --git a/service/aiproxy/controller/log.go b/service/aiproxy/controller/log.go index 9b9c1522d39..c4b8e209f39 100644 --- a/service/aiproxy/controller/log.go +++ b/service/aiproxy/controller/log.go @@ -44,10 +44,10 @@ func GetLogs(c *gin.Context) { codeType := c.Query("code_type") withBody, _ := strconv.ParseBool(c.Query("with_body")) result, err := model.GetLogs( + group, startTimestampTime, endTimestampTime, modelName, - group, requestID, tokenID, tokenName, @@ -68,6 +68,11 @@ func GetLogs(c *gin.Context) { } func GetGroupLogs(c *gin.Context) { + group := c.Param("group") + if group == "" { + middleware.ErrorResponse(c, http.StatusOK, "group is required") + return + } p, _ := strconv.Atoi(c.Query("p")) p-- if p < 0 { @@ -92,7 +97,6 @@ func GetGroupLogs(c *gin.Context) { tokenName := c.Query("token_name") modelName := c.Query("model_name") channelID, _ := strconv.Atoi(c.Query("channel")) - group := c.Param("group") endpoint := c.Query("endpoint") tokenID, _ := strconv.Atoi(c.Query("token_id")) order := c.Query("order") @@ -136,7 +140,7 @@ func SearchLogs(c *gin.Context) { endpoint := c.Query("endpoint") tokenName := c.Query("token_name") modelName := c.Query("model_name") - groupID := c.Query("group_id") + group := c.Query("group_id") tokenID, _ := strconv.Atoi(c.Query("token_id")) channelID, _ := strconv.Atoi(c.Query("channel")) startTimestamp, _ := strconv.ParseInt(c.Query("start_timestamp"), 10, 64) @@ -155,11 +159,11 @@ func SearchLogs(c *gin.Context) { codeType := c.Query("code_type") withBody, _ := strconv.ParseBool(c.Query("with_body")) result, err := model.SearchLogs( + group, keyword, p, perPage, endpoint, - groupID, requestID, tokenID, tokenName, @@ -180,6 +184,11 @@ func SearchLogs(c *gin.Context) { } func SearchGroupLogs(c *gin.Context) { + group := c.Param("group") + if group == "" { + middleware.ErrorResponse(c, http.StatusOK, "group is required") + return + } keyword := c.Query("keyword") p, _ := strconv.Atoi(c.Query("p")) perPage, _ := strconv.Atoi(c.Query("per_page")) @@ -188,7 +197,6 @@ func SearchGroupLogs(c *gin.Context) { } else if perPage > 100 { perPage = 100 } - group := c.Param("group") endpoint := c.Query("endpoint") tokenName := c.Query("token_name") modelName := c.Query("model_name") @@ -251,7 +259,7 @@ func GetGroupLogDetail(c *gin.Context) { return } logID, _ := strconv.Atoi(c.Param("log_id")) - log, err := model.GetGroupLogDetail(group, logID) + log, err := model.GetGroupLogDetail(logID, group) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return diff --git a/service/aiproxy/controller/token.go b/service/aiproxy/controller/token.go index 84ceae6fb39..abdf52815aa 100644 --- a/service/aiproxy/controller/token.go +++ b/service/aiproxy/controller/token.go @@ -50,7 +50,7 @@ func GetTokens(c *gin.Context) { group := c.Query("group") order := c.Query("order") status, _ := strconv.Atoi(c.Query("status")) - tokens, total, err := model.GetTokens(p*perPage, perPage, order, group, status) + tokens, total, err := model.GetTokens(group, p*perPage, perPage, order, status) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return @@ -70,6 +70,11 @@ func GetTokens(c *gin.Context) { } func GetGroupTokens(c *gin.Context) { + group := c.Param("group") + if group == "" { + middleware.ErrorResponse(c, http.StatusOK, "group is required") + return + } p, _ := strconv.Atoi(c.Query("p")) p-- if p < 0 { @@ -81,17 +86,16 @@ func GetGroupTokens(c *gin.Context) { } else if perPage > 100 { perPage = 100 } - group := c.Param("group") order := c.Query("order") status, _ := strconv.Atoi(c.Query("status")) - tokens, total, err := model.GetGroupTokens(group, p*perPage, perPage, order, status) + tokens, total, err := model.GetTokens(group, p*perPage, perPage, order, status) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } tokenResponses := make([]*TokenResponse, len(tokens)) for i, token := range tokens { - lastRequestAt, _ := model.GetGroupTokenLastRequestTime(group, token.ID) + lastRequestAt, _ := model.GetTokenLastRequestTime(token.ID) tokenResponses[i] = &TokenResponse{ Token: token, AccessedAt: lastRequestAt, @@ -121,7 +125,7 @@ func SearchTokens(c *gin.Context) { key := c.Query("key") status, _ := strconv.Atoi(c.Query("status")) group := c.Query("group") - tokens, total, err := model.SearchTokens(keyword, p*perPage, perPage, order, status, name, key, group) + tokens, total, err := model.SearchTokens(group, keyword, p*perPage, perPage, order, status, name, key) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return @@ -141,6 +145,11 @@ func SearchTokens(c *gin.Context) { } func SearchGroupTokens(c *gin.Context) { + group := c.Param("group") + if group == "" { + middleware.ErrorResponse(c, http.StatusOK, "group is required") + return + } keyword := c.Query("keyword") p, _ := strconv.Atoi(c.Query("p")) p-- @@ -153,19 +162,18 @@ func SearchGroupTokens(c *gin.Context) { } else if perPage > 100 { perPage = 100 } - group := c.Param("group") order := c.Query("order") name := c.Query("name") key := c.Query("key") status, _ := strconv.Atoi(c.Query("status")) - tokens, total, err := model.SearchGroupTokens(group, keyword, p*perPage, perPage, order, status, name, key) + tokens, total, err := model.SearchTokens(group, keyword, p*perPage, perPage, order, status, name, key) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } tokenResponses := make([]*TokenResponse, len(tokens)) for i, token := range tokens { - lastRequestAt, _ := model.GetGroupTokenLastRequestTime(group, token.ID) + lastRequestAt, _ := model.GetTokenLastRequestTime(token.ID) tokenResponses[i] = &TokenResponse{ Token: token, AccessedAt: lastRequestAt, @@ -197,18 +205,22 @@ func GetToken(c *gin.Context) { } func GetGroupToken(c *gin.Context) { + group := c.Param("group") + if group == "" { + middleware.ErrorResponse(c, http.StatusOK, "group is required") + return + } id, err := strconv.Atoi(c.Param("id")) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - group := c.Param("group") token, err := model.GetGroupTokenByID(group, id) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - lastRequestAt, _ := model.GetGroupTokenLastRequestTime(group, id) + lastRequestAt, _ := model.GetTokenLastRequestTime(id) tokenResponse := &TokenResponse{ Token: token, AccessedAt: lastRequestAt, @@ -444,20 +456,14 @@ func UpdateTokenStatus(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } + if token.Status == model.TokenStatusEnabled { - if cleanToken.Status == model.TokenStatusExpired && !cleanToken.ExpiredAt.IsZero() && cleanToken.ExpiredAt.Before(time.Now()) { - middleware.ErrorResponse(c, http.StatusOK, "token expired, please update token expired time or set to never expire") - return - } - if cleanToken.Status == model.TokenStatusExhausted && cleanToken.Quota > 0 && cleanToken.UsedAmount >= cleanToken.Quota { - middleware.ErrorResponse(c, http.StatusOK, "token quota exhausted, please update token quota or set to unlimited quota") - return - } - if cleanToken.Status == model.TokenStatusExhausted && cleanToken.Quota > 0 && cleanToken.UsedAmount >= cleanToken.Quota { - middleware.ErrorResponse(c, http.StatusOK, "token quota exhausted, please update token quota or set to unlimited quota") + if err := validateTokenStatus(cleanToken); err != nil { + middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } } + err = model.UpdateTokenStatus(id, token.Status) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) @@ -488,20 +494,14 @@ func UpdateGroupTokenStatus(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } + if token.Status == model.TokenStatusEnabled { - if cleanToken.Status == model.TokenStatusExpired && !cleanToken.ExpiredAt.IsZero() && cleanToken.ExpiredAt.Before(time.Now()) { - middleware.ErrorResponse(c, http.StatusOK, "token expired, please update token expired time or set to never expire") - return - } - if cleanToken.Status == model.TokenStatusExhausted && cleanToken.Quota > 0 && cleanToken.UsedAmount >= cleanToken.Quota { - middleware.ErrorResponse(c, http.StatusOK, "token quota exhausted, please update token quota or set to unlimited quota") - return - } - if cleanToken.Status == model.TokenStatusExhausted && cleanToken.Quota > 0 && cleanToken.UsedAmount >= cleanToken.Quota { - middleware.ErrorResponse(c, http.StatusOK, "token quota exhausted, please update token quota or set to unlimited quota") + if err := validateTokenStatus(cleanToken); err != nil { + middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } } + err = model.UpdateGroupTokenStatus(group, id, token.Status) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) @@ -510,6 +510,16 @@ func UpdateGroupTokenStatus(c *gin.Context) { middleware.SuccessResponse(c, nil) } +func validateTokenStatus(token *model.Token) error { + if token.Status == model.TokenStatusExpired && !token.ExpiredAt.IsZero() && token.ExpiredAt.Before(time.Now()) { + return errors.New("token expired, please update token expired time or set to never expire") + } + if token.Status == model.TokenStatusExhausted && token.Quota > 0 && token.UsedAmount >= token.Quota { + return errors.New("token quota exhausted, please update token quota or set to unlimited quota") + } + return nil +} + type UpdateTokenNameRequest struct { Name string `json:"name"` } diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index f67daaddd65..f53cb72b97a 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -57,11 +57,11 @@ func (l *Log) MarshalJSON() ([]byte, error) { }) } -func GetGroupLogDetail(group string, logID int) (*RequestDetail, error) { +func GetLogDetail(logID int) (*RequestDetail, error) { var detail RequestDetail - err := LogDB.Model(&RequestDetail{}). - Joins("JOIN logs ON logs.id = request_details.log_id"). - Where("logs.group_id = ? AND logs.id = ?", group, logID). + err := LogDB. + Model(&RequestDetail{}). + Where("log_id = ?", logID). First(&detail).Error if err != nil { return nil, err @@ -69,9 +69,15 @@ func GetGroupLogDetail(group string, logID int) (*RequestDetail, error) { return &detail, nil } -func GetLogDetail(logID int) (*RequestDetail, error) { +func GetGroupLogDetail(logID int, group string) (*RequestDetail, error) { + if group == "" { + return nil, errors.New("group is required") + } var detail RequestDetail - err := LogDB.Model(&RequestDetail{}). + err := LogDB. + Model(&RequestDetail{}). + Joins("JOIN logs ON logs.id = request_details.log_id"). + Where("logs.group_id = ?", group). Where("log_id = ?", logID). First(&detail).Error if err != nil { @@ -169,15 +175,21 @@ const ( ) type GetLogsResult struct { - Logs []*Log `json:"logs"` - Total int64 `json:"total"` + Logs []*Log `json:"logs"` + Total int64 `json:"total"` + Models []string `json:"models"` } -func GetLogs( +type GetGroupLogsResult struct { + GetLogsResult + TokenNames []string `json:"token_names"` +} + +func getLogs( + group string, startTimestamp time.Time, endTimestamp time.Time, modelName string, - group string, requestID string, tokenID int, tokenName string, @@ -189,7 +201,7 @@ func GetLogs( mode int, codeType CodeType, withBody bool, -) (*GetLogsResult, error) { +) (int64, []*Log, error) { tx := LogDB.Model(&Log{}) if group != "" { tx = tx.Where("group_id = ?", group) @@ -203,15 +215,15 @@ func GetLogs( if tokenName != "" { tx = tx.Where("token_name = ?", tokenName) } - if requestID != "" { - tx = tx.Where("request_id = ?", requestID) - } if modelName != "" { tx = tx.Where("model = ?", modelName) } if mode != 0 { tx = tx.Where("mode = ?", mode) } + if requestID != "" { + tx = tx.Where("request_id = ?", requestID) + } if tokenID != 0 { tx = tx.Where("token_id = ?", tokenID) } @@ -228,13 +240,14 @@ func GetLogs( tx = tx.Where("code != 200") } - result := &GetLogsResult{} - err := tx.Count(&result.Total).Error + var total int64 + var logs []*Log + err := tx.Count(&total).Error if err != nil { - return nil, err + return total, nil, err } - if result.Total <= 0 { - return result, nil + if total <= 0 { + return total, nil, nil } if withBody { @@ -249,17 +262,14 @@ func GetLogs( Order(getLogOrder(order)). Limit(num). Offset(startIdx). - Find(&result.Logs).Error - return result, err -} - -type GetGroupLogsResult struct { - GetLogsResult - TokenNames []string `json:"token_names"` - Models []string `json:"models"` + Find(&logs).Error + if err != nil { + return total, nil, err + } + return total, logs, nil } -func GetGroupLogs( +func GetLogs( group string, startTimestamp time.Time, endTimestamp time.Time, @@ -275,88 +285,74 @@ func GetGroupLogs( mode int, codeType CodeType, withBody bool, -) (*GetGroupLogsResult, error) { - tx := LogDB.Model(&Log{}).Where("group_id = ?", group) - if !startTimestamp.IsZero() { - tx = tx.Where("request_at >= ?", startTimestamp) - } - if !endTimestamp.IsZero() { - tx = tx.Where("request_at <= ?", endTimestamp) - } - if tokenName != "" { - tx = tx.Where("token_name = ?", tokenName) - } - if modelName != "" { - tx = tx.Where("model = ?", modelName) - } - if mode != 0 { - tx = tx.Where("mode = ?", mode) - } - if requestID != "" { - tx = tx.Where("request_id = ?", requestID) - } - if tokenID != 0 { - tx = tx.Where("token_id = ?", tokenID) - } - if channelID != 0 { - tx = tx.Where("channel_id = ?", channelID) - } - if endpoint != "" { - tx = tx.Where("endpoint = ?", endpoint) - } - switch codeType { - case CodeTypeSuccess: - tx = tx.Where("code = 200") - case CodeTypeError: - tx = tx.Where("code != 200") +) (*GetLogsResult, error) { + total, logs, err := getLogs(group, startTimestamp, endTimestamp, modelName, requestID, tokenID, tokenName, startIdx, num, channelID, endpoint, order, mode, codeType, withBody) + if err != nil { + return nil, err } - result := &GetGroupLogsResult{} - err := tx.Count(&result.Total).Error + models, err := getLogDistinctValues[string]("model", group, startTimestamp, endTimestamp) if err != nil { return nil, err } - if result.Total <= 0 { - return result, nil - } - if withBody { - tx = tx.Preload("RequestDetail") - } else { - tx = tx.Preload("RequestDetail", func(db *gorm.DB) *gorm.DB { - return db.Select("id", "log_id") - }) + result := &GetLogsResult{ + Logs: logs, + Total: total, + Models: models, } - err = tx. - Order(getLogOrder(order)). - Limit(num). - Offset(startIdx). - Find(&result.Logs).Error + return result, nil +} + +func GetGroupLogs( + group string, + startTimestamp time.Time, + endTimestamp time.Time, + modelName string, + requestID string, + tokenID int, + tokenName string, + startIdx int, + num int, + channelID int, + endpoint string, + order string, + mode int, + codeType CodeType, + withBody bool, +) (*GetGroupLogsResult, error) { + if group == "" { + return nil, errors.New("group is required") + } + total, logs, err := getLogs(group, startTimestamp, endTimestamp, modelName, requestID, tokenID, tokenName, startIdx, num, channelID, endpoint, order, mode, codeType, withBody) if err != nil { return nil, err } - - // Get distinct token names and models for the time period - result.TokenNames, err = getGroupLogDistinctValues[string]("token_name", group, startTimestamp, endTimestamp) + tokenNames, err := getLogDistinctValues[string]("token_name", group, startTimestamp, endTimestamp) if err != nil { return nil, err } - - result.Models, err = getGroupLogDistinctValues[string]("model", group, startTimestamp, endTimestamp) + models, err := getLogDistinctValues[string]("model", group, startTimestamp, endTimestamp) if err != nil { return nil, err } - - return result, nil + return &GetGroupLogsResult{ + GetLogsResult: GetLogsResult{ + Logs: logs, + Total: total, + Models: models, + }, + TokenNames: tokenNames, + }, nil } -func SearchLogs( +func searchLogs( + group string, keyword string, page int, perPage int, endpoint string, - groupID string, requestID string, tokenID int, tokenName string, @@ -368,13 +364,13 @@ func SearchLogs( mode int, codeType CodeType, withBody bool, -) (*GetLogsResult, error) { +) (int64, []*Log, error) { tx := LogDB.Model(&Log{}) + if group != "" { + tx = tx.Where("group_id = ?", group) + } // Handle exact match conditions for non-zero values - if groupID != "" { - tx = tx.Where("group_id = ?", groupID) - } if !startTimestamp.IsZero() { tx = tx.Where("request_at >= ?", startTimestamp) } @@ -390,15 +386,15 @@ func SearchLogs( if mode != 0 { tx = tx.Where("mode = ?", mode) } - if tokenID != 0 { - tx = tx.Where("token_id = ?", tokenID) - } if endpoint != "" { tx = tx.Where("endpoint = ?", endpoint) } if requestID != "" { tx = tx.Where("request_id = ?", requestID) } + if tokenID != 0 { + tx = tx.Where("token_id = ?", tokenID) + } if channelID != 0 { tx = tx.Where("channel_id = ?", channelID) } @@ -414,6 +410,15 @@ func SearchLogs( var conditions []string var values []interface{} + if group == "" { + if common.UsingPostgreSQL { + conditions = append(conditions, "content ILIKE ?") + } else { + conditions = append(conditions, "content LIKE ?") + } + values = append(values, "%"+keyword+"%") + } + if num := String2Int(keyword); num != 0 { if channelID == 0 { conditions = append(conditions, "channel_id = ?") @@ -424,7 +429,6 @@ func SearchLogs( values = append(values, num) } } - if endpoint == "" { if common.UsingPostgreSQL { conditions = append(conditions, "endpoint ILIKE ?") @@ -433,14 +437,6 @@ func SearchLogs( } values = append(values, "%"+keyword+"%") } - if groupID == "" { - if common.UsingPostgreSQL { - conditions = append(conditions, "group_id ILIKE ?") - } else { - conditions = append(conditions, "group_id LIKE ?") - } - values = append(values, "%"+keyword+"%") - } if requestID == "" { if common.UsingPostgreSQL { conditions = append(conditions, "request_id ILIKE ?") @@ -477,13 +473,14 @@ func SearchLogs( } } - result := &GetLogsResult{} - err := tx.Count(&result.Total).Error + var total int64 + var logs []*Log + err := tx.Count(&total).Error if err != nil { - return nil, err + return total, nil, err } - if result.Total <= 0 { - return result, nil + if total <= 0 { + return total, logs, nil } page-- @@ -503,11 +500,14 @@ func SearchLogs( Order(getLogOrder(order)). Limit(perPage). Offset(page * perPage). - Find(&result.Logs).Error - return result, err + Find(&logs).Error + if err != nil { + return total, nil, err + } + return total, logs, nil } -func SearchGroupLogs( +func SearchLogs( group string, keyword string, page int, @@ -524,148 +524,71 @@ func SearchGroupLogs( mode int, codeType CodeType, withBody bool, -) (*GetGroupLogsResult, error) { - if group == "" { - return nil, errors.New("group is empty") - } - tx := LogDB.Model(&Log{}).Where("group_id = ?", group) - - // Handle exact match conditions for non-zero values - if !startTimestamp.IsZero() { - tx = tx.Where("request_at >= ?", startTimestamp) - } - if !endTimestamp.IsZero() { - tx = tx.Where("request_at <= ?", endTimestamp) - } - if tokenName != "" { - tx = tx.Where("token_name = ?", tokenName) - } - if modelName != "" { - tx = tx.Where("model = ?", modelName) - } - if mode != 0 { - tx = tx.Where("mode = ?", mode) - } - if endpoint != "" { - tx = tx.Where("endpoint = ?", endpoint) - } - if requestID != "" { - tx = tx.Where("request_id = ?", requestID) - } - if tokenID != 0 { - tx = tx.Where("token_id = ?", tokenID) - } - if channelID != 0 { - tx = tx.Where("channel_id = ?", channelID) - } - switch codeType { - case CodeTypeSuccess: - tx = tx.Where("code = 200") - case CodeTypeError: - tx = tx.Where("code != 200") - } - - // Handle keyword search for zero value fields - if keyword != "" { - var conditions []string - var values []interface{} - - if num := String2Int(keyword); num != 0 { - if channelID == 0 { - conditions = append(conditions, "channel_id = ?") - values = append(values, num) - } - if mode != 0 { - conditions = append(conditions, "mode = ?") - values = append(values, num) - } - } - if endpoint == "" { - if common.UsingPostgreSQL { - conditions = append(conditions, "endpoint ILIKE ?") - } else { - conditions = append(conditions, "endpoint LIKE ?") - } - values = append(values, "%"+keyword+"%") - } - if requestID == "" { - if common.UsingPostgreSQL { - conditions = append(conditions, "request_id ILIKE ?") - } else { - conditions = append(conditions, "request_id LIKE ?") - } - values = append(values, "%"+keyword+"%") - } - if tokenName == "" { - if common.UsingPostgreSQL { - conditions = append(conditions, "token_name ILIKE ?") - } else { - conditions = append(conditions, "token_name LIKE ?") - } - values = append(values, "%"+keyword+"%") - } - if modelName == "" { - if common.UsingPostgreSQL { - conditions = append(conditions, "model ILIKE ?") - } else { - conditions = append(conditions, "model LIKE ?") - } - values = append(values, "%"+keyword+"%") - } - if common.UsingPostgreSQL { - conditions = append(conditions, "content ILIKE ?") - } else { - conditions = append(conditions, "content LIKE ?") - } - values = append(values, "%"+keyword+"%") - - if len(conditions) > 0 { - tx = tx.Where(fmt.Sprintf("(%s)", strings.Join(conditions, " OR ")), values...) - } +) (*GetLogsResult, error) { + total, logs, err := searchLogs(group, keyword, page, perPage, endpoint, requestID, tokenID, tokenName, modelName, startTimestamp, endTimestamp, channelID, order, mode, codeType, withBody) + if err != nil { + return nil, err } - result := &GetGroupLogsResult{} - err := tx.Count(&result.Total).Error + models, err := getLogDistinctValues[string]("model", group, startTimestamp, endTimestamp) if err != nil { return nil, err } - if result.Total <= 0 { - return result, nil - } - page-- - if page < 0 { - page = 0 + result := &GetLogsResult{ + Logs: logs, + Total: total, + Models: models, } - if withBody { - tx = tx.Preload("RequestDetail") - } else { - tx = tx.Preload("RequestDetail", func(db *gorm.DB) *gorm.DB { - return db.Select("id", "log_id") - }) - } + return result, nil +} - err = tx. - Order(getLogOrder(order)). - Limit(perPage). - Offset(page * perPage). - Find(&result.Logs).Error +func SearchGroupLogs( + group string, + keyword string, + page int, + perPage int, + endpoint string, + requestID string, + tokenID int, + tokenName string, + modelName string, + startTimestamp time.Time, + endTimestamp time.Time, + channelID int, + order string, + mode int, + codeType CodeType, + withBody bool, +) (*GetGroupLogsResult, error) { + if group == "" { + return nil, errors.New("group is required") + } + total, logs, err := searchLogs(group, keyword, page, perPage, endpoint, requestID, tokenID, tokenName, modelName, startTimestamp, endTimestamp, channelID, order, mode, codeType, withBody) if err != nil { return nil, err } - // Get distinct token names and models for the time period - result.TokenNames, err = getGroupLogDistinctValues[string]("token_name", group, startTimestamp, endTimestamp) + tokenNames, err := getLogDistinctValues[string]("token_name", group, startTimestamp, endTimestamp) if err != nil { return nil, err } - result.Models, err = getGroupLogDistinctValues[string]("model", group, startTimestamp, endTimestamp) + models, err := getLogDistinctValues[string]("model", group, startTimestamp, endTimestamp) if err != nil { return nil, err } + result := &GetGroupLogsResult{ + GetLogsResult: GetLogsResult{ + Logs: logs, + Total: total, + Models: models, + }, + TokenNames: tokenNames, + } + return result, nil } @@ -675,6 +598,9 @@ func DeleteOldLog(timestamp time.Time) (int64, error) { } func DeleteGroupLogs(groupID string) (int64, error) { + if groupID == "" { + return 0, errors.New("group is required") + } result := LogDB.Where("group_id = ?", groupID).Delete(&Log{}) return result.RowsAffected, result.Error } @@ -688,11 +614,16 @@ type HourlyChartData struct { type DashboardResponse struct { ChartData []*HourlyChartData `json:"chart_data"` - TokenNames []string `json:"token_names"` Models []string `json:"models"` TotalCount int64 `json:"total_count"` ExceptionCount int64 `json:"exception_count"` UsedAmount float64 `json:"used_amount"` + RPM int64 `json:"rpm"` +} + +type GroupDashboardResponse struct { + DashboardResponse + TokenNames []string `json:"token_names"` } func getHourTimestamp() string { @@ -717,11 +648,20 @@ func getChartData(group string, start, end time.Time, tokenName, modelName strin } query := LogDB.Table("logs"). - Select(hourTimestamp+" as timestamp, count(*) as request_count, sum(used_amount) as used_amount, sum(case when code != 200 then 1 else 0 end) as exception_count"). - Where("group_id = ? AND request_at BETWEEN ? AND ?", group, start, end). + Select(hourTimestamp + " as timestamp, count(*) as request_count, sum(used_amount) as used_amount, sum(case when code != 200 then 1 else 0 end) as exception_count"). Group("timestamp"). Order("timestamp ASC") + if group != "" { + query = query.Where("group_id = ?", group) + } + if !start.IsZero() { + query = query.Where("request_at >= ?", start) + } + if !end.IsZero() { + query = query.Where("request_at <= ?", end) + } + if tokenName != "" { query = query.Where("token_name = ?", tokenName) } @@ -733,12 +673,15 @@ func getChartData(group string, start, end time.Time, tokenName, modelName strin return chartData, err } -func getGroupLogDistinctValues[T any](field string, group string, start, end time.Time) ([]T, error) { +func getLogDistinctValues[T any](field string, group string, start, end time.Time) ([]T, error) { var values []T query := LogDB. Model(&Log{}). - Distinct(field). - Where("group_id = ?", group) + Distinct(field) + + if group != "" { + query = query.Where("group_id = ?", group) + } if !start.IsZero() { query = query.Where("request_at >= ?", start) @@ -775,24 +718,38 @@ func sumUsedAmount(chartData []*HourlyChartData) float64 { return amount.InexactFloat64() } -func GetDashboardData(group string, start, end time.Time, tokenName string, modelName string) (*DashboardResponse, error) { +func getRPM(group string, end time.Time, tokenName, modelName string) (int64, error) { + query := LogDB.Model(&Log{}). + Where("request_at >= ? AND request_at <= ?", end.Add(-time.Minute), end) + + if group != "" { + query = query.Where("group_id = ?", group) + } + if tokenName != "" { + query = query.Where("token_name = ?", tokenName) + } + if modelName != "" { + query = query.Where("model = ?", modelName) + } + + var count int64 + err := query.Count(&count).Error + return count, err +} + +func GetDashboardData(start, end time.Time, modelName string) (*DashboardResponse, error) { if end.IsZero() { end = time.Now() } else if end.Before(start) { return nil, errors.New("end time is before start time") } - chartData, err := getChartData(group, start, end, tokenName, modelName) - if err != nil { - return nil, err - } - - tokenNames, err := getGroupLogDistinctValues[string]("token_name", group, start, end) + chartData, err := getChartData("", start, end, "", modelName) if err != nil { return nil, err } - models, err := getGroupLogDistinctValues[string]("model", group, start, end) + models, err := getLogDistinctValues[string]("model", "", start, end) if err != nil { return nil, err } @@ -801,17 +758,73 @@ func GetDashboardData(group string, start, end time.Time, tokenName string, mode exceptionCount := sumExceptionCount(chartData) usedAmount := sumUsedAmount(chartData) + rpm, err := getRPM("", end, "", modelName) + if err != nil { + return nil, err + } + return &DashboardResponse{ ChartData: chartData, - TokenNames: tokenNames, Models: models, TotalCount: totalCount, ExceptionCount: exceptionCount, UsedAmount: usedAmount, + RPM: rpm, + }, nil +} + +func GetGroupDashboardData(group string, start, end time.Time, tokenName string, modelName string) (*GroupDashboardResponse, error) { + if group == "" { + return nil, errors.New("group is required") + } + + if end.IsZero() { + end = time.Now() + } else if end.Before(start) { + return nil, errors.New("end time is before start time") + } + + chartData, err := getChartData(group, start, end, tokenName, modelName) + if err != nil { + return nil, err + } + + tokenNames, err := getLogDistinctValues[string]("token_name", group, start, end) + if err != nil { + return nil, err + } + + models, err := getLogDistinctValues[string]("model", group, start, end) + if err != nil { + return nil, err + } + + totalCount := sumTotalCount(chartData) + exceptionCount := sumExceptionCount(chartData) + usedAmount := sumUsedAmount(chartData) + + rpm, err := getRPM(group, end, tokenName, modelName) + if err != nil { + return nil, err + } + + return &GroupDashboardResponse{ + DashboardResponse: DashboardResponse{ + ChartData: chartData, + Models: models, + TotalCount: totalCount, + ExceptionCount: exceptionCount, + UsedAmount: usedAmount, + RPM: rpm, + }, + TokenNames: tokenNames, }, nil } func GetGroupLastRequestTime(group string) (time.Time, error) { + if group == "" { + return time.Time{}, errors.New("group is required") + } var log Log err := LogDB.Model(&Log{}).Where("group_id = ?", group).Order("request_at desc").First(&log).Error return log.RequestAt, err @@ -819,12 +832,7 @@ func GetGroupLastRequestTime(group string) (time.Time, error) { func GetTokenLastRequestTime(id int) (time.Time, error) { var log Log - err := LogDB.Model(&Log{}).Where("token_id = ?", id).Order("request_at desc").First(&log).Error - return log.RequestAt, err -} - -func GetGroupTokenLastRequestTime(group string, id int) (time.Time, error) { - var log Log - err := LogDB.Model(&Log{}).Where("group_id = ? and token_id = ?", group, id).Order("request_at desc").First(&log).Error + tx := LogDB.Model(&Log{}) + err := tx.Where("token_id = ?", id).Order("request_at desc").First(&log).Error return log.RequestAt, err } diff --git a/service/aiproxy/model/token.go b/service/aiproxy/model/token.go index 4e49b3d02af..5b82ce7e092 100644 --- a/service/aiproxy/model/token.go +++ b/service/aiproxy/model/token.go @@ -88,34 +88,11 @@ func InsertToken(token *Token, autoCreateGroup bool) error { return nil } -func GetTokens(startIdx int, num int, order string, group string, status int) (tokens []*Token, total int64, err error) { +func GetTokens(group string, startIdx int, num int, order string, status int) (tokens []*Token, total int64, err error) { tx := DB.Model(&Token{}) - if group != "" { tx = tx.Where("group_id = ?", group) } - if status != 0 { - tx = tx.Where("status = ?", status) - } - - err = tx.Count(&total).Error - if err != nil { - return nil, 0, err - } - - if total <= 0 { - return nil, 0, nil - } - err = tx.Order(getTokenOrder(order)).Limit(num).Offset(startIdx).Find(&tokens).Error - return tokens, total, err -} - -func GetGroupTokens(group string, startIdx int, num int, order string, status int) (tokens []*Token, total int64, err error) { - if group == "" { - return nil, 0, errors.New("group is empty") - } - - tx := DB.Model(&Token{}).Where("group_id = ?", group) if status != 0 { tx = tx.Where("status = ?", status) @@ -133,7 +110,7 @@ func GetGroupTokens(group string, startIdx int, num int, order string, status in return tokens, total, err } -func SearchTokens(keyword string, startIdx int, num int, order string, status int, name string, key string, group string) (tokens []*Token, total int64, err error) { +func SearchTokens(group string, keyword string, startIdx int, num int, order string, status int, name string, key string) (tokens []*Token, total int64, err error) { tx := DB.Model(&Token{}) if group != "" { tx = tx.Where("group_id = ?", group) @@ -151,19 +128,8 @@ func SearchTokens(keyword string, startIdx int, num int, order string, status in if keyword != "" { var conditions []string var values []interface{} - if status == 0 { - conditions = append(conditions, "status = ?") - values = append(values, 1) - } + if group == "" { - if common.UsingPostgreSQL { - conditions = append(conditions, "group_id ILIKE ?") - } else { - conditions = append(conditions, "group_id LIKE ?") - } - values = append(values, "%"+keyword+"%") - } - if name == "" { if common.UsingPostgreSQL { conditions = append(conditions, "name ILIKE ?") } else { @@ -171,48 +137,6 @@ func SearchTokens(keyword string, startIdx int, num int, order string, status in } values = append(values, "%"+keyword+"%") } - if key == "" { - if common.UsingPostgreSQL { - conditions = append(conditions, "key ILIKE ?") - } else { - conditions = append(conditions, "key LIKE ?") - } - values = append(values, keyword) - } - if len(conditions) > 0 { - tx = tx.Where(fmt.Sprintf("(%s)", strings.Join(conditions, " OR ")), values...) - } - } - - err = tx.Count(&total).Error - if err != nil { - return nil, 0, err - } - if total <= 0 { - return nil, 0, nil - } - err = tx.Order(getTokenOrder(order)).Limit(num).Offset(startIdx).Find(&tokens).Error - return tokens, total, err -} - -func SearchGroupTokens(group string, keyword string, startIdx int, num int, order string, status int, name string, key string) (tokens []*Token, total int64, err error) { - if group == "" { - return nil, 0, errors.New("group is empty") - } - tx := DB.Model(&Token{}).Where("group_id = ?", group) - if status != 0 { - tx = tx.Where("status = ?", status) - } - if name != "" { - tx = tx.Where("name = ?", name) - } - if key != "" { - tx = tx.Where("key = ?", key) - } - - if keyword != "" { - var conditions []string - var values []interface{} if status == 0 { conditions = append(conditions, "status = ?") values = append(values, 1) diff --git a/service/aiproxy/router/api.go b/service/aiproxy/router/api.go index 13954bdf293..e302ccb80d1 100644 --- a/service/aiproxy/router/api.go +++ b/service/aiproxy/router/api.go @@ -35,6 +35,7 @@ func SetAPIRouter(router *gin.Engine) { dashboardRoute := apiRouter.Group("/dashboard") { + dashboardRoute.GET("/", controller.GetDashboard) dashboardRoute.GET("/:group", controller.GetGroupDashboard) } @@ -46,7 +47,7 @@ func SetAPIRouter(router *gin.Engine) { } groupRoute := apiRouter.Group("/group") { - groupRoute.POST("/", controller.CreateGroup) + groupRoute.POST("/:group", controller.CreateGroup) groupRoute.GET("/:group", controller.GetGroup) groupRoute.DELETE("/:group", controller.DeleteGroup) groupRoute.POST("/:group/status", controller.UpdateGroupStatus) From 9d2d5e84bcbd7bb24c92c72fbdfedda7dfd74420 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 27 Dec 2024 18:03:12 +0800 Subject: [PATCH 047/167] feat: dashboard tpm --- service/aiproxy/model/log.go | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index f53cb72b97a..80490ee9505 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -619,6 +619,7 @@ type DashboardResponse struct { ExceptionCount int64 `json:"exception_count"` UsedAmount float64 `json:"used_amount"` RPM int64 `json:"rpm"` + TPM int64 `json:"tpm"` } type GroupDashboardResponse struct { @@ -737,6 +738,26 @@ func getRPM(group string, end time.Time, tokenName, modelName string) (int64, er return count, err } +func getTPM(group string, end time.Time, tokenName, modelName string) (int64, error) { + query := LogDB.Model(&Log{}). + Select("COALESCE(SUM(prompt_tokens + completion_tokens), 0)"). + Where("request_at >= ? AND request_at <= ?", end.Add(-time.Minute), end) + + if group != "" { + query = query.Where("group_id = ?", group) + } + if tokenName != "" { + query = query.Where("token_name = ?", tokenName) + } + if modelName != "" { + query = query.Where("model = ?", modelName) + } + + var tpm int64 + err := query.Scan(&tpm).Error + return tpm, err +} + func GetDashboardData(start, end time.Time, modelName string) (*DashboardResponse, error) { if end.IsZero() { end = time.Now() @@ -763,6 +784,11 @@ func GetDashboardData(start, end time.Time, modelName string) (*DashboardRespons return nil, err } + tpm, err := getTPM("", end, "", modelName) + if err != nil { + return nil, err + } + return &DashboardResponse{ ChartData: chartData, Models: models, @@ -770,6 +796,7 @@ func GetDashboardData(start, end time.Time, modelName string) (*DashboardRespons ExceptionCount: exceptionCount, UsedAmount: usedAmount, RPM: rpm, + TPM: tpm, }, nil } @@ -808,6 +835,11 @@ func GetGroupDashboardData(group string, start, end time.Time, tokenName string, return nil, err } + tpm, err := getTPM(group, end, tokenName, modelName) + if err != nil { + return nil, err + } + return &GroupDashboardResponse{ DashboardResponse: DashboardResponse{ ChartData: chartData, @@ -816,6 +848,7 @@ func GetGroupDashboardData(group string, start, end time.Time, tokenName string, ExceptionCount: exceptionCount, UsedAmount: usedAmount, RPM: rpm, + TPM: tpm, }, TokenNames: tokenNames, }, nil From 1df839239fa2383d9413b1cc2eb68449d795b287 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 30 Dec 2024 14:09:53 +0800 Subject: [PATCH 048/167] feat: step modelinfo --- service/aiproxy/main.go | 2 +- .../relay/adaptor/stepfun/constants.go | 165 +++++++++++++++--- 2 files changed, 139 insertions(+), 28 deletions(-) diff --git a/service/aiproxy/main.go b/service/aiproxy/main.go index 352475c723f..9b12bb947d8 100644 --- a/service/aiproxy/main.go +++ b/service/aiproxy/main.go @@ -71,7 +71,7 @@ func setLog(l *log.Logger) { l.SetOutput(os.Stdout) stdlog.SetOutput(l.Writer()) - log.SetFormatter(&log.TextFormatter{ + l.SetFormatter(&log.TextFormatter{ ForceColors: true, DisableColors: false, ForceQuote: config.DebugEnabled, diff --git a/service/aiproxy/relay/adaptor/stepfun/constants.go b/service/aiproxy/relay/adaptor/stepfun/constants.go index 944d733a4cb..09ad93613e7 100644 --- a/service/aiproxy/relay/adaptor/stepfun/constants.go +++ b/service/aiproxy/relay/adaptor/stepfun/constants.go @@ -7,48 +7,159 @@ import ( var ModelList = []*model.ModelConfig{ { - Model: "step-1-8k", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerStepFun, + Model: "step-1-8k", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerStepFun, + InputPrice: 0.005, + OutputPrice: 0.02, + RPM: 60, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(8000), + model.WithModelConfigToolChoice(true), + ), }, { - Model: "step-1-32k", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerStepFun, + Model: "step-1-32k", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerStepFun, + InputPrice: 0.015, + OutputPrice: 0.07, + RPM: 60, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32000), + model.WithModelConfigToolChoice(true), + ), }, { - Model: "step-1-128k", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerStepFun, + Model: "step-1-128k", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerStepFun, + InputPrice: 0.04, + OutputPrice: 0.2, + RPM: 60, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(128000), + model.WithModelConfigToolChoice(true), + ), }, { - Model: "step-1-256k", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerStepFun, + Model: "step-1-256k", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerStepFun, + InputPrice: 0.95, + OutputPrice: 0.3, + RPM: 60, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(256000), + model.WithModelConfigToolChoice(true), + ), }, { - Model: "step-1-flash", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerStepFun, + Model: "step-1-flash", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerStepFun, + InputPrice: 0.001, + OutputPrice: 0.004, + RPM: 60, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(8000), + model.WithModelConfigToolChoice(true), + ), }, { - Model: "step-2-16k", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerStepFun, + Model: "step-2-16k", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerStepFun, + InputPrice: 0.038, + OutputPrice: 0.12, + RPM: 60, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(16000), + model.WithModelConfigToolChoice(true), + ), }, { - Model: "step-1v-8k", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerStepFun, + Model: "step-1v-8k", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerStepFun, + InputPrice: 0.005, + OutputPrice: 0.02, + RPM: 60, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(8000), + model.WithModelConfigToolChoice(true), + model.WithModelConfigVision(true), + ), }, { - Model: "step-1v-32k", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerStepFun, + Model: "step-1v-32k", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerStepFun, + InputPrice: 0.015, + OutputPrice: 0.07, + RPM: 60, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32000), + model.WithModelConfigToolChoice(true), + model.WithModelConfigVision(true), + ), }, { - Model: "step-1x-medium", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerStepFun, + Model: "step-1.5v-mini", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerStepFun, + InputPrice: 0.008, + OutputPrice: 0.035, + RPM: 60, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32000), + model.WithModelConfigToolChoice(true), + model.WithModelConfigVision(true), + ), + }, + + { + Model: "step-tts-mini", + Type: relaymode.AudioSpeech, + Owner: model.ModelOwnerStepFun, + InputPrice: 0.09, + RPM: 60, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(1000), + model.WithModelConfigSupportFormats([]string{"opus", "wav", "flac", "mp3"}), + model.WithModelConfigSupportVoices([]string{ + "cixingnansheng", "zhengpaiqingnian", "yuanqinansheng", + "qingniandaxuesheng", "boyinnansheng", "ruyananshi", + "shenchennanyin", "qinqienvsheng", "wenrounvsheng", + "jilingshaonv", "yuanqishaonv", "ruanmengnvsheng", + "youyanvsheng", "lengyanyujie", "shuangkuaijiejie", + "wenjingxuejie", "linjiajiejie", "linjiameimei", + "zhixingjiejie", + }), + ), + }, + + { + Model: "step-asr", + Type: relaymode.AudioTranscription, + Owner: model.ModelOwnerStepFun, + InputPrice: 0.09, + RPM: 60, + }, + + { + Model: "step-1x-medium", + Type: relaymode.ImagesGenerations, + Owner: model.ModelOwnerStepFun, + RPM: 60, + ImageMaxBatchSize: 1, + ImagePrices: map[string]float64{ + "256x256": 0.1, + "512x512": 0.1, + "768x768": 0.1, + "1024x1024": 0.1, + "1280x800": 0.1, + "800x1280": 0.1, + }, }, } From b29edf5cb1049ef014529c7032fb30325cdda16c Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 30 Dec 2024 16:41:57 +0800 Subject: [PATCH 049/167] feat: yi --- .../relay/adaptor/lingyiwanwu/constants.go | 86 ++----------------- 1 file changed, 9 insertions(+), 77 deletions(-) diff --git a/service/aiproxy/relay/adaptor/lingyiwanwu/constants.go b/service/aiproxy/relay/adaptor/lingyiwanwu/constants.go index bfa063ddb2c..0d61e1d0c97 100644 --- a/service/aiproxy/relay/adaptor/lingyiwanwu/constants.go +++ b/service/aiproxy/relay/adaptor/lingyiwanwu/constants.go @@ -14,88 +14,20 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerLingyiWanwu, InputPrice: 0.00099, OutputPrice: 0.00099, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 16384, - }, + RPM: 60, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(16384), + ), }, { - Model: "yi-large", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerLingyiWanwu, - InputPrice: 0.02, - OutputPrice: 0.02, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 32768, - }, - }, - { - Model: "yi-medium", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerLingyiWanwu, - InputPrice: 0.0025, - OutputPrice: 0.0025, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 16384, - }, - }, - { - Model: "yi-vision", + Model: "yi-vision-v2", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerLingyiWanwu, InputPrice: 0.006, OutputPrice: 0.006, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 16384, - }, - }, - { - Model: "yi-medium-200k", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerLingyiWanwu, - InputPrice: 0.012, - OutputPrice: 0.012, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 204800, - }, - }, - { - Model: "yi-spark", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerLingyiWanwu, - InputPrice: 0.001, - OutputPrice: 0.001, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 16384, - }, - }, - { - Model: "yi-large-rag", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerLingyiWanwu, - InputPrice: 0.025, - OutputPrice: 0.025, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 16384, - }, - }, - { - Model: "yi-large-fc", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerLingyiWanwu, - InputPrice: 0.02, - OutputPrice: 0.02, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 32768, - }, - }, - { - Model: "yi-large-turbo", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerLingyiWanwu, - InputPrice: 0.012, - OutputPrice: 0.012, - Config: map[model.ModelConfigKey]any{ - model.ModelConfigMaxContextTokensKey: 16384, - }, + RPM: 60, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(16384), + ), }, } From b0a4de68b8517a91614458c13c9f5d2a19a8bca0 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 30 Dec 2024 16:48:47 +0800 Subject: [PATCH 050/167] fix: yi --- service/aiproxy/relay/adaptor/lingyiwanwu/constants.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/service/aiproxy/relay/adaptor/lingyiwanwu/constants.go b/service/aiproxy/relay/adaptor/lingyiwanwu/constants.go index 0d61e1d0c97..3b95f8412cb 100644 --- a/service/aiproxy/relay/adaptor/lingyiwanwu/constants.go +++ b/service/aiproxy/relay/adaptor/lingyiwanwu/constants.go @@ -17,6 +17,7 @@ var ModelList = []*model.ModelConfig{ RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(16384), + model.WithModelConfigToolChoice(true), ), }, { @@ -28,6 +29,8 @@ var ModelList = []*model.ModelConfig{ RPM: 60, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(16384), + model.WithModelConfigVision(true), + model.WithModelConfigToolChoice(true), ), }, } From b87b5105e72bf815eed02ffd00a1d4e4a7484ed8 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 31 Dec 2024 10:36:09 +0800 Subject: [PATCH 051/167] feat: debug banned --- service/aiproxy/controller/relay.go | 2 ++ service/aiproxy/monitor/model.go | 20 +++++++++++++++----- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/service/aiproxy/controller/relay.go b/service/aiproxy/controller/relay.go index b3e8a4fed6d..1fb569f6ff7 100644 --- a/service/aiproxy/controller/relay.go +++ b/service/aiproxy/controller/relay.go @@ -89,6 +89,8 @@ func Relay(c *gin.Context) { log.Errorf("get %s auto banned channels failed: %+v", requestModel, err) } + log.Debugf("%s model banned channels: %+v", requestModel, ids) + failedChannelIDs := []int{} for _, id := range ids { failedChannelIDs = append(failedChannelIDs, int(id)) diff --git a/service/aiproxy/monitor/model.go b/service/aiproxy/monitor/model.go index 9e5a06b71bb..02b70a66049 100644 --- a/service/aiproxy/monitor/model.go +++ b/service/aiproxy/monitor/model.go @@ -8,6 +8,7 @@ import ( "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" "github.com/redis/go-redis/v9" + log "github.com/sirupsen/logrus" ) // 使用set存储被永久禁用的channelID @@ -21,7 +22,7 @@ var addRequestScript = redis.NewScript(` local banned_key = "model:" .. model .. ":banned" if redis.call("SISMEMBER", banned_key, channel_id) == 1 then - return redis.status_reply("ok") + return 2 end local now_ms = redis.call("TIME")[1] * 1000 + math.floor(redis.call("TIME")[2]/1000) @@ -35,7 +36,7 @@ var addRequestScript = redis.NewScript(` redis.call("PEXPIRE", channel_requests_key, error_time_to_live) local total_count = redis.call("ZCARD", channel_requests_key) - if total_count >= 5 then + if total_count >= 10 then local error_count = 0 local requests = redis.call("ZRANGE", channel_requests_key, 0, -1) for _, request in ipairs(requests) do @@ -52,10 +53,11 @@ var addRequestScript = redis.NewScript(` redis.call("PEXPIRE", banned_key, ban_time) end redis.call("DEL", channel_requests_key) + return 1 end end - return redis.status_reply("ok") + return 0 `) func AddRequest(ctx context.Context, model string, channelID int64, isError bool) error { @@ -68,7 +70,7 @@ func AddRequest(ctx context.Context, model string, channelID int64, isError bool } live := 60 * time.Second banTime := 4 * live - return addRequestScript.Run( + val, err := addRequestScript.Run( ctx, common.RDB, []string{model}, @@ -76,7 +78,15 @@ func AddRequest(ctx context.Context, model string, channelID int64, isError bool live.Milliseconds(), config.GetModelErrorAutoBanRate(), errorFlag, - banTime.Milliseconds()).Err() + banTime.Milliseconds()).Int64() + if err != nil { + return err + } + log.Debugf("add request result: %d", val) + if val == 1 { + log.Errorf("channel %d model %s is banned", channelID, model) + } + return nil } var getBannedChannelsScript = redis.NewScript(` From 4bd2802c1c397f299c170ad44f9e8645f15041a5 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 31 Dec 2024 10:39:59 +0800 Subject: [PATCH 052/167] chore: bump go mod --- service/aiproxy/go.mod | 64 ++++++------ service/aiproxy/go.sum | 229 ++++++++++++++--------------------------- 2 files changed, 107 insertions(+), 186 deletions(-) diff --git a/service/aiproxy/go.mod b/service/aiproxy/go.mod index af939647e73..24a10f8b386 100644 --- a/service/aiproxy/go.mod +++ b/service/aiproxy/go.mod @@ -6,11 +6,11 @@ replace github.com/labring/sealos/service/aiproxy => ../aiproxy require ( cloud.google.com/go/iam v1.3.0 - github.com/aws/aws-sdk-go-v2 v1.32.6 - github.com/aws/aws-sdk-go-v2/credentials v1.17.47 - github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.23.0 - github.com/gin-contrib/cors v1.7.2 - github.com/gin-contrib/gzip v1.0.1 + github.com/aws/aws-sdk-go-v2 v1.32.7 + github.com/aws/aws-sdk-go-v2/credentials v1.17.48 + github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.23.1 + github.com/gin-contrib/cors v1.7.3 + github.com/gin-contrib/gzip v1.1.0 github.com/gin-gonic/gin v1.10.0 github.com/glebarez/sqlite v1.11.0 github.com/golang-jwt/jwt/v5 v5.2.1 @@ -30,24 +30,24 @@ require ( github.com/smartystreets/goconvey v1.8.1 github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 golang.org/x/image v0.23.0 - google.golang.org/api v0.210.0 + google.golang.org/api v0.214.0 gorm.io/driver/mysql v1.5.7 gorm.io/driver/postgres v1.5.11 gorm.io/gorm v1.25.12 ) require ( - cloud.google.com/go/auth v0.12.0 // indirect + cloud.google.com/go/auth v0.13.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect - cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26 // indirect github.com/aws/smithy-go v1.22.1 // indirect - github.com/bytedance/sonic v1.12.5 // indirect + github.com/bytedance/sonic v1.12.6 // indirect github.com/bytedance/sonic/loader v0.2.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudwego/base64x v0.1.4 // indirect @@ -66,21 +66,19 @@ require ( github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.23.0 // indirect github.com/go-sql-driver/mysql v1.8.1 // indirect - github.com/goccy/go-json v0.10.3 // indirect - github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/goccy/go-json v0.10.4 // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect - github.com/googleapis/gax-go/v2 v2.14.0 // indirect + github.com/googleapis/gax-go/v2 v2.14.1 // indirect github.com/gopherjs/gopherjs v1.17.2 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect - github.com/jackc/pgx/v5 v5.7.1 // indirect + github.com/jackc/pgx/v5 v5.7.2 // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/klauspost/cpuid/v2 v2.2.9 // indirect - github.com/kr/text v0.2.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -91,28 +89,28 @@ require ( github.com/smarty/assertions v1.15.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.12 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect golang.org/x/arch v0.12.0 // indirect - golang.org/x/crypto v0.30.0 // indirect - golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d // indirect - golang.org/x/net v0.32.0 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 // indirect + golang.org/x/net v0.33.0 // indirect golang.org/x/oauth2 v0.24.0 // indirect golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.8.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect - google.golang.org/grpc v1.68.1 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241230172942-26aa7a208def // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241230172942-26aa7a208def // indirect + google.golang.org/grpc v1.69.2 // indirect + google.golang.org/protobuf v1.36.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - modernc.org/libc v1.61.4 // indirect - modernc.org/mathutil v1.6.0 // indirect + modernc.org/libc v1.61.6 // indirect + modernc.org/mathutil v1.7.1 // indirect modernc.org/memory v1.8.0 // indirect - modernc.org/sqlite v1.34.2 // indirect + modernc.org/sqlite v1.34.4 // indirect ) diff --git a/service/aiproxy/go.sum b/service/aiproxy/go.sum index ae9ae889d2f..d6474617571 100644 --- a/service/aiproxy/go.sum +++ b/service/aiproxy/go.sum @@ -1,48 +1,42 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/auth v0.12.0 h1:ARAD8r0lkiHw2go7kEnmviF6TOYhzLM+yDGcDt9mP68= -cloud.google.com/go/auth v0.12.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= +cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= -cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= -cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= cloud.google.com/go/iam v1.3.0 h1:4Wo2qTaGKFtajbLpF6I4mywg900u3TLlHDb6mriLDPU= cloud.google.com/go/iam v1.3.0/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/aws/aws-sdk-go-v2 v1.32.6 h1:7BokKRgRPuGmKkFMhEg/jSul+tB9VvXhcViILtfG8b4= -github.com/aws/aws-sdk-go-v2 v1.32.6/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2 v1.32.7 h1:ky5o35oENWi0JYWUZkB7WYvVPP+bcRF5/Iq7JWSb5Rw= +github.com/aws/aws-sdk-go-v2 v1.32.7/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc= -github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= -github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE= -github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.23.0 h1:mfV5tcLXeRLbiyI4EHoHWH1sIU7JvbfXVvymUCIgZEo= -github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.23.0/go.mod h1:YSSgYnasDKm5OjU3bOPkaz+2PFO6WjEQGIA6KQNsR3Q= +github.com/aws/aws-sdk-go-v2/credentials v1.17.48 h1:IYdLD1qTJ0zanRavulofmqut4afs45mOWEI+MzZtTfQ= +github.com/aws/aws-sdk-go-v2/credentials v1.17.48/go.mod h1:tOscxHN3CGmuX9idQ3+qbkzrjVIx32lqDSU1/0d/qXs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26 h1:I/5wmGMffY4happ8NOCuIUEWGUvvFp5NSeQcXl9RHcI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26/go.mod h1:FR8f4turZtNy6baO0KJ5FJUmXH/cSkI9fOngs0yl6mA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26 h1:zXFLuEuMMUOvEARXFUVJdfqZ4bvvSgdGRq/ATcrQxzM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26/go.mod h1:3o2Wpy0bogG1kyOPrgkXA8pgIfEEv0+m19O9D5+W8y8= +github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.23.1 h1:rqrvjFScEwD7VfP4L0hhnrXyTkgUkpQWAdwOrW2slOo= +github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.23.1/go.mod h1:Vn5GopXsOAC6kbwzjfM6V37dxc4mo4J4xCRiF27pSZA= github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= -github.com/bytedance/sonic v1.12.5 h1:hoZxY8uW+mT+OpkcUWw4k0fDINtOcVavEsGfzwzFU/w= -github.com/bytedance/sonic v1.12.5/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk= +github.com/bytedance/sonic v1.12.6 h1:/isNmCUF2x3Sh8RAp/4mh4ZGkcFAX/hLrzrK3AvpRzk= +github.com/bytedance/sonic v1.12.6/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/bytedance/sonic/loader v0.2.1 h1:1GgorWTqf12TA8mma4DDSbaQigE2wOgQo7iCjjJv3+E= github.com/bytedance/sonic/loader v0.2.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -52,18 +46,14 @@ github.com/dlclark/regexp2 v1.11.4 h1:rPYF9/LECdNymJufQKmri9gV604RvvABwgOA8un7yA github.com/dlclark/regexp2 v1.11.4/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/gabriel-vasile/mimetype v1.4.7 h1:SKFKl7kD0RiPdbht0s7hFtjl489WcQ1VyPW8ZzUMYCA= github.com/gabriel-vasile/mimetype v1.4.7/go.mod h1:GDlAgAyIRT27BhFl53XNAFtfjzOkLaF35JdEG0P7LtU= -github.com/gin-contrib/cors v1.7.2 h1:oLDHxdg8W/XDoN/8zamqk/Drgt4oVZDvaV0YmvVICQw= -github.com/gin-contrib/cors v1.7.2/go.mod h1:SUJVARKgQ40dmrzgXEVxj2m7Ig1v1qIboQkPDTQ9t2E= -github.com/gin-contrib/gzip v1.0.1 h1:HQ8ENHODeLY7a4g1Au/46Z92bdGFl74OhxcZble9WJE= -github.com/gin-contrib/gzip v1.0.1/go.mod h1:njt428fdUNRvjuJf16tZMYZ2Yl+WQB53X5wmhDwXvC4= +github.com/gin-contrib/cors v1.7.3 h1:hV+a5xp8hwJoTw7OY+a70FsL8JkVVFTXw9EcfrYUdns= +github.com/gin-contrib/cors v1.7.3/go.mod h1:M3bcKZhxzsvI+rlRSkkxHyljJt1ESd93COUvemZ79j4= +github.com/gin-contrib/gzip v1.1.0 h1:kVw7Nr9M+Z6Ch4qo7aGMbiqxDeyQFru+07MgAcUF62M= +github.com/gin-contrib/gzip v1.1.0/go.mod h1:iHJXCup4CWiKyPUEl+GwkHjchl+YyYuMKbOCiXujPIA= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= @@ -88,32 +78,12 @@ github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaC github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= -github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= -github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= -github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -121,13 +91,12 @@ github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlG github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= -github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o= -github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk= +github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= +github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= @@ -136,8 +105,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs= -github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA= +github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI= +github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= @@ -156,8 +125,8 @@ github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02 github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= @@ -183,13 +152,12 @@ github.com/pkoukk/tiktoken-go v0.1.7 h1:qOBHXX4PHtvIvmOtyg1EeKlwFRiMKAcoMp4Q+bLQ github.com/pkoukk/tiktoken-go v0.1.7/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= -github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -210,107 +178,64 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0 h1:qtFISDHKolvIxzSs0gIaiPUPR0Cucb0F2coHC7ZLdps= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0/go.mod h1:Y+Pop1Q6hCOnETWTW4NROK/q1hv50hM7yDaUTjG8lp8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= golang.org/x/arch v0.12.0 h1:UsYJhbzPYGsT0HbEdmYcqtCv8UNGvnaL561NnIUvaKg= golang.org/x/arch v0.12.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= -golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d h1:0olWaB5pg3+oychR51GUVCEsGkeCU/2JxjBgIo4f3M0= -golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo= +golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/image v0.23.0 h1:HseQ7c2OpPKTPVzNjG5fwJsOTCiiwS4QdsYi5XU6H68= golang.org/x/image v0.23.0/go.mod h1:wJJBTdLfCCf3tiHa1fNxpZmUI4mmoZvwMCPP0ddoNKY= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= -golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.210.0 h1:HMNffZ57OoZCRYSbdWVRoqOa8V8NIHLL0CzdBPLztWk= -google.golang.org/api v0.210.0/go.mod h1:B9XDZGnx2NtyjzVkOVTGrFSAVZgPcbedzKg/gTLwqBs= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= -google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/api v0.214.0 h1:h2Gkq07OYi6kusGOaT/9rnNljuXmqPnaig7WGPmKbwA= +google.golang.org/api v0.214.0/go.mod h1:bYPpLG8AyeMWwDU6NXoB00xC0DFkikVvd5MfwoxjLqE= +google.golang.org/genproto/googleapis/api v0.0.0-20241230172942-26aa7a208def h1:0Km0hi+g2KXbXL0+riZzSCKz23f4MmwicuEb00JeonI= +google.golang.org/genproto/googleapis/api v0.0.0-20241230172942-26aa7a208def/go.mod h1:u2DoMSpCXjrzqLdobRccQMc9wrnMAJ1DLng0a2yqM2Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241230172942-26aa7a208def h1:4P81qv5JXI/sDNae2ClVx88cgDDA6DPilADkG9tYKz8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241230172942-26aa7a208def/go.mod h1:bdAgzvd4kFrpykc5/AC2eLUiegK9T/qxZHD4hXYf/ho= +google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= +google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -324,28 +249,26 @@ gorm.io/driver/postgres v1.5.11/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSk gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8= gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -modernc.org/cc/v4 v4.23.1 h1:WqJoPL3x4cUufQVHkXpXX7ThFJ1C4ik80i2eXEXbhD8= -modernc.org/cc/v4 v4.23.1/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ= -modernc.org/ccgo/v4 v4.23.1 h1:N49a7JiWGWV7lkPE4yYcvjkBGZQi93/JabRYjdWmJXc= -modernc.org/ccgo/v4 v4.23.1/go.mod h1:JoIUegEIfutvoWV/BBfDFpPpfR2nc3U0jKucGcbmwDU= +modernc.org/cc/v4 v4.24.2 h1:uektamHbSXU7egelXcyVpMaaAsrRH4/+uMKUQAQUdOw= +modernc.org/cc/v4 v4.24.2/go.mod h1:T1lKJZhXIi2VSqGBiB4LIbKs9NsKTbUXj4IDrmGqtTI= +modernc.org/ccgo/v4 v4.23.5 h1:6uAwu8u3pnla3l/+UVUrDDO1HIGxHTYmFH6w+X9nsyw= +modernc.org/ccgo/v4 v4.23.5/go.mod h1:FogrWfBdzqLWm1ku6cfr4IzEFouq2fSAPf6aSAHdAJQ= modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= -modernc.org/gc/v2 v2.5.0 h1:bJ9ChznK1L1mUtAQtxi0wi5AtAs5jQuw4PrPHO5pb6M= -modernc.org/gc/v2 v2.5.0/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU= -modernc.org/libc v1.61.4 h1:wVyqEx6tlltte9lPTjq0kDAdtdM9c4JH8rU6M1ZVawA= -modernc.org/libc v1.61.4/go.mod h1:VfXVuM/Shh5XsMNrh3C6OkfL78G3loa4ZC/Ljv9k7xc= -modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= -modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/gc/v2 v2.6.0 h1:Tiw3pezQj7PfV8k4Dzyu/vhRHR2e92kOXtTFU8pbCl4= +modernc.org/gc/v2 v2.6.0/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU= +modernc.org/libc v1.61.6 h1:L2jW0wxHPCyHK0YSHaGaVlY0WxjpG/TTVdg6gRJOPqw= +modernc.org/libc v1.61.6/go.mod h1:G+DzuaCcReUYYg4nNSfigIfTDCENdj9EByglvaRx53A= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E= modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU= modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc= modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss= -modernc.org/sqlite v1.34.2 h1:J9n76TPsfYYkFkZ9Uy1QphILYifiVEwwOT7yP5b++2Y= -modernc.org/sqlite v1.34.2/go.mod h1:dnR723UrTtjKpoHCAMN0Q/gZ9MT4r+iRvIBb9umWFkU= +modernc.org/sqlite v1.34.4 h1:sjdARozcL5KJBvYQvLlZEmctRgW9xqIZc2ncN7PU0P8= +modernc.org/sqlite v1.34.4/go.mod h1:3QQFCG2SEMtc2nv+Wq4cQCH7Hjcg+p/RMlS1XK+zwbk= modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= From ad47c078e07426bea567239f39229fb9ba46c919 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 31 Dec 2024 10:40:14 +0800 Subject: [PATCH 053/167] chore: bump go mod --- service/go.work.sum | 86 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/service/go.work.sum b/service/go.work.sum index b8631243819..791354d62c8 100644 --- a/service/go.work.sum +++ b/service/go.work.sum @@ -1,5 +1,7 @@ cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g= cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= +cel.dev/expr v0.16.2/go.mod h1:gXngZQMkWJoSbE8mOzehJlXQyubn/Vg0vR9/F3W7iw8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -179,6 +181,7 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/contactcenterinsights v1.6.0 h1:jXIpfcH/VYSE1SYcPzO0n1VVb+sAamiLOgCw45JbOQk= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= @@ -684,10 +687,12 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.2/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.12.0-rc.0 h1:wX/F5huJxH9APBkhKSEAqaiZsuBvbbDnyBROZAqsSaY= @@ -782,6 +787,7 @@ github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8 github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= @@ -811,6 +817,8 @@ github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2u github.com/clbanning/mxj/v2 v2.5.7 h1:7q5lvUpaPF/WOkqgIDiwjBJaznaLCCBd78pi8ZyAnE0= github.com/clbanning/mxj/v2 v2.5.7/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -878,6 +886,9 @@ github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= @@ -890,7 +901,9 @@ github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1 github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= @@ -940,6 +953,7 @@ github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= @@ -948,8 +962,11 @@ github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -958,10 +975,19 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -978,8 +1004,14 @@ github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6 github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -1017,6 +1049,7 @@ github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= @@ -1270,6 +1303,7 @@ github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+L github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= @@ -1455,6 +1489,9 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0/go.mod h1:tzQL6E1l+iV44YFTkcAeNQqzXUiekSYP9jjJjXwEd00= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= @@ -1467,6 +1504,7 @@ go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= @@ -1481,6 +1519,7 @@ go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9s go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= @@ -1491,6 +1530,7 @@ go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zA go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= @@ -1521,6 +1561,7 @@ golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= @@ -1534,8 +1575,11 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMk golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -1562,9 +1606,13 @@ golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1582,6 +1630,7 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= @@ -1600,6 +1649,8 @@ golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1622,6 +1673,8 @@ golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2 golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1632,6 +1685,7 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1680,6 +1734,7 @@ golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -1699,15 +1754,20 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1757,6 +1817,7 @@ golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= @@ -1793,17 +1854,21 @@ google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY= google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -1822,6 +1887,7 @@ google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1855,6 +1921,7 @@ google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy google.golang.org/genproto v0.0.0-20231212172506-995d672761c0/go.mod h1:l/k7rMz0vFTBPy+tFSGvXEd3z+BcoG1k7EHbqm+YBsY= google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac h1:ZL/Teoy/ZGnzyrqK/Optxxp2pmVh+fmJ97slxSRyzUg= google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k= +google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 h1:Df6WuGvthPzc+JiQ/G+m+sNX24kc0aTBqoDN/0yyykE= google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 h1:Q3nlH8iSQSRUwOskjbcSMcF2jiYMNiQYZ0c2KEJLKKU= google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38/go.mod h1:xBI+tzfqGGN2JBeSebfKXFSdBpWVQ7sLW40PTupVRm4= google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= @@ -1871,6 +1938,7 @@ google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go. google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 h1:rcS6EyEaoCO52hQDupoSfrxI3R6C2Tq741is7X8OvnM= google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0= google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc h1:g3hIDl0jRNd9PPTs2uBzYuaD5mQuwOkZY0vSc0LR32o= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405 h1:o4S3HvTUEXgRsNSUQsALDVog0O9F/U1JJlHmmUN8Uas= @@ -1892,12 +1960,18 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe h1:bQnxqljG/wqi4NTXu2+DJ3n7APcEA882QZ1JvhQAq9o= google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/genproto/googleapis/rpc v0.0.0-20241206012308-a4fef0638583/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= @@ -1905,6 +1979,7 @@ google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= @@ -1924,7 +1999,16 @@ google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFL google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= @@ -1949,9 +2033,11 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= From 0a90e47dafb0f112c3345155cd52decd30fe2c05 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 31 Dec 2024 11:06:24 +0800 Subject: [PATCH 054/167] fix: save model time parse --- service/aiproxy/controller/modelconfig.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/service/aiproxy/controller/modelconfig.go b/service/aiproxy/controller/modelconfig.go index 94501cd4806..b79d130a70a 100644 --- a/service/aiproxy/controller/modelconfig.go +++ b/service/aiproxy/controller/modelconfig.go @@ -87,13 +87,23 @@ func SearchModelConfigs(c *gin.Context) { }) } +type SaveModelConfigsRequest struct { + CreatedAt int64 `json:"created_at"` + UpdatedAt int64 `json:"updated_at"` + *model.ModelConfig +} + func SaveModelConfigs(c *gin.Context) { - var configs []*model.ModelConfig + var configs []*SaveModelConfigsRequest if err := c.ShouldBindJSON(&configs); err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - err := model.SaveModelConfigs(configs) + modelConfigs := make([]*model.ModelConfig, len(configs)) + for i, config := range configs { + modelConfigs[i] = config.ModelConfig + } + err := model.SaveModelConfigs(modelConfigs) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return @@ -102,12 +112,12 @@ func SaveModelConfigs(c *gin.Context) { } func SaveModelConfig(c *gin.Context) { - var config model.ModelConfig + var config SaveModelConfigsRequest if err := c.ShouldBindJSON(&config); err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - err := model.SaveModelConfig(&config) + err := model.SaveModelConfig(config.ModelConfig) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return From 26626df16e7259acd7e42d6002ea43e5c22bbb25 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 31 Dec 2024 11:30:59 +0800 Subject: [PATCH 055/167] feat: fill dash carts gaps --- service/aiproxy/controller/dashboard.go | 58 +++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/service/aiproxy/controller/dashboard.go b/service/aiproxy/controller/dashboard.go index 454bcf14c12..1600b78564e 100644 --- a/service/aiproxy/controller/dashboard.go +++ b/service/aiproxy/controller/dashboard.go @@ -27,6 +27,60 @@ func getDashboardStartEndTime(t string) (time.Time, time.Time) { return start, end } +func fillGaps(data []*model.HourlyChartData) []*model.HourlyChartData { + if len(data) <= 1 { + return data + } + + var result []*model.HourlyChartData + result = append(result, data[0]) + + for i := 1; i < len(data); i++ { + curr := data[i] + prev := data[i-1] + hourDiff := (curr.Timestamp - prev.Timestamp) / 3600 + + // If gap is 1 hour or less, continue + if hourDiff <= 1 { + result = append(result, curr) + continue + } + + // If gap is more than 3 hours, only add boundary points + if hourDiff > 3 { + // Add point for hour after prev + result = append(result, &model.HourlyChartData{ + Timestamp: prev.Timestamp + 3600, + RequestCount: 0, + UsedAmount: 0, + ExceptionCount: 0, + }) + // Add point for hour before curr + result = append(result, &model.HourlyChartData{ + Timestamp: curr.Timestamp - 3600, + RequestCount: 0, + UsedAmount: 0, + ExceptionCount: 0, + }) + result = append(result, curr) + continue + } + + // Fill gaps of 2-3 hours with zero points + for j := prev.Timestamp + 3600; j < curr.Timestamp; j += 3600 { + result = append(result, &model.HourlyChartData{ + Timestamp: j, + RequestCount: 0, + UsedAmount: 0, + ExceptionCount: 0, + }) + } + result = append(result, curr) + } + + return result +} + func GetDashboard(c *gin.Context) { start, end := getDashboardStartEndTime(c.Query("type")) modelName := c.Query("model") @@ -35,6 +89,8 @@ func GetDashboard(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } + + dashboards.ChartData = fillGaps(dashboards.ChartData) middleware.SuccessResponse(c, dashboards) } @@ -54,5 +110,7 @@ func GetGroupDashboard(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, "failed to get statistics") return } + + dashboards.ChartData = fillGaps(dashboards.ChartData) middleware.SuccessResponse(c, dashboards) } From 54b0ae33208a1fef8ebb48686486526e1c7601a3 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 31 Dec 2024 11:57:35 +0800 Subject: [PATCH 056/167] feat: fill dash carts gaps --- service/aiproxy/controller/dashboard.go | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/service/aiproxy/controller/dashboard.go b/service/aiproxy/controller/dashboard.go index 1600b78564e..7c79f6d4261 100644 --- a/service/aiproxy/controller/dashboard.go +++ b/service/aiproxy/controller/dashboard.go @@ -27,18 +27,22 @@ func getDashboardStartEndTime(t string) (time.Time, time.Time) { return start, end } +const ( + fillGapsInterval = 3600 +) + func fillGaps(data []*model.HourlyChartData) []*model.HourlyChartData { if len(data) <= 1 { return data } - var result []*model.HourlyChartData + result := make([]*model.HourlyChartData, 0, len(data)) result = append(result, data[0]) for i := 1; i < len(data); i++ { curr := data[i] prev := data[i-1] - hourDiff := (curr.Timestamp - prev.Timestamp) / 3600 + hourDiff := (curr.Timestamp - prev.Timestamp) / fillGapsInterval // If gap is 1 hour or less, continue if hourDiff <= 1 { @@ -50,29 +54,20 @@ func fillGaps(data []*model.HourlyChartData) []*model.HourlyChartData { if hourDiff > 3 { // Add point for hour after prev result = append(result, &model.HourlyChartData{ - Timestamp: prev.Timestamp + 3600, - RequestCount: 0, - UsedAmount: 0, - ExceptionCount: 0, + Timestamp: prev.Timestamp + fillGapsInterval, }) // Add point for hour before curr result = append(result, &model.HourlyChartData{ - Timestamp: curr.Timestamp - 3600, - RequestCount: 0, - UsedAmount: 0, - ExceptionCount: 0, + Timestamp: curr.Timestamp - fillGapsInterval, }) result = append(result, curr) continue } // Fill gaps of 2-3 hours with zero points - for j := prev.Timestamp + 3600; j < curr.Timestamp; j += 3600 { + for j := prev.Timestamp + fillGapsInterval; j < curr.Timestamp; j += fillGapsInterval { result = append(result, &model.HourlyChartData{ - Timestamp: j, - RequestCount: 0, - UsedAmount: 0, - ExceptionCount: 0, + Timestamp: j, }) } result = append(result, curr) From 9b0840b09f8d637ae836bcb890be985714537154 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 31 Dec 2024 14:04:12 +0800 Subject: [PATCH 057/167] chore: go mod tidy --- service/go.work.sum | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/service/go.work.sum b/service/go.work.sum index 791354d62c8..792537d060c 100644 --- a/service/go.work.sum +++ b/service/go.work.sum @@ -753,21 +753,26 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19 h1:woXadbf0c7enQ2UGCi8gW/ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19/go.mod h1:zminj5ucw7w0r65bP6nhyOd3xL6veAUMc3ElGMoLVb4= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22/go.mod h1:NtSFajXVVL8TA2QNngagVZmUtXciyrHOt7xgz4faS/M= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4/go.mod h1:4GQbF1vJzG60poZqWatZlhP31y8PGCCVTvIGPdaaYJ0= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.7/go.mod h1:kLPQvGUmxn/fqiCrDeohwG33bq2pQpGeY62yRO6Nrh0= github.com/aws/aws-sdk-go-v2/service/sso v1.24.5/go.mod h1:wrMCEwjFPms+V86TCQQeOxQF/If4vT44FGIOFiMC2ck= github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.8/go.mod h1:XDeGv1opzwm8ubxddF0cgqkZWsyOtw4lr6dxwmb6YQg= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.4/go.mod h1:Tp/ly1cTjRLGBBmNccFumbZ8oqpZlpdhFf80SrRh4is= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.7/go.mod h1:JfyQ0g2JG8+Krq0EuZNnRwX0mU0HrwY/tG6JNfcqh4k= github.com/aws/aws-sdk-go-v2/service/sts v1.32.4/go.mod h1:9XEUty5v5UAsMiFOBJrNibZgwCeOma73jgGwwhgffa8= github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.3/go.mod h1:5Gn+d+VaaRgsjewpMvGazt0WfcFO+Md4wLOuBfGR9Bc= github.com/bazelbuild/rules_go v0.49.0 h1:5vCbuvy8Q11g41lseGJDc5vxhDjJtfxr6nM/IC4VmqM= github.com/bazelbuild/rules_go v0.49.0/go.mod h1:Dhcz716Kqg1RHNWos+N6MlXNkjNP2EwZQ0LukRKJfMs= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= @@ -1922,6 +1927,7 @@ google.golang.org/genproto v0.0.0-20231212172506-995d672761c0/go.mod h1:l/k7rMz0 google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac h1:ZL/Teoy/ZGnzyrqK/Optxxp2pmVh+fmJ97slxSRyzUg= google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k= google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 h1:Df6WuGvthPzc+JiQ/G+m+sNX24kc0aTBqoDN/0yyykE= +google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53/go.mod h1:fheguH3Am2dGp1LfXkrvwqC/KlFq8F0nLq3LryOMrrE= google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 h1:Q3nlH8iSQSRUwOskjbcSMcF2jiYMNiQYZ0c2KEJLKKU= google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38/go.mod h1:xBI+tzfqGGN2JBeSebfKXFSdBpWVQ7sLW40PTupVRm4= google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= @@ -1947,6 +1953,7 @@ google.golang.org/genproto/googleapis/bytestream v0.0.0-20241021214115-324edc3d5 google.golang.org/genproto/googleapis/bytestream v0.0.0-20241021214115-324edc3d5d38/go.mod h1:T8O3fECQbif8cez15vxAcjbwXxvL2xbnvbQ7ZfiMAMs= google.golang.org/genproto/googleapis/bytestream v0.0.0-20241118233622-e639e219e697 h1:rY93Be8/KL+EtFM4im9lxMzjGn796GnwVUd75cyFCJg= google.golang.org/genproto/googleapis/bytestream v0.0.0-20241118233622-e639e219e697/go.mod h1:qUsLYwbwz5ostUWtuFuXPlHmSJodC5NI/88ZlHj4M1o= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20241209162323-e6fa225c2576/go.mod h1:qUsLYwbwz5ostUWtuFuXPlHmSJodC5NI/88ZlHj4M1o= google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= From e5190b1800830aa62a9a255fac97bbf67cb28159 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 31 Dec 2024 14:38:24 +0800 Subject: [PATCH 058/167] feat: dashboard timespan --- service/aiproxy/controller/dashboard.go | 35 +++++++++++++------------ service/aiproxy/model/log.go | 26 +++++++++--------- 2 files changed, 31 insertions(+), 30 deletions(-) diff --git a/service/aiproxy/controller/dashboard.go b/service/aiproxy/controller/dashboard.go index 7c79f6d4261..c3d81f5d00c 100644 --- a/service/aiproxy/controller/dashboard.go +++ b/service/aiproxy/controller/dashboard.go @@ -9,29 +9,30 @@ import ( "github.com/labring/sealos/service/aiproxy/model" ) -func getDashboardStartEndTime(t string) (time.Time, time.Time) { +func getDashboardTime(t string) (time.Time, time.Time, time.Duration) { end := time.Now() var start time.Time + var timeSpan time.Duration switch t { case "month": start = end.AddDate(0, 0, -30) + timeSpan = time.Hour * 24 case "two_week": start = end.AddDate(0, 0, -15) + timeSpan = time.Hour * 12 case "week": start = end.AddDate(0, 0, -7) + timeSpan = time.Hour * 6 case "day": fallthrough default: start = end.AddDate(0, 0, -1) + timeSpan = time.Hour * 1 } - return start, end + return start, end, timeSpan } -const ( - fillGapsInterval = 3600 -) - -func fillGaps(data []*model.HourlyChartData) []*model.HourlyChartData { +func fillGaps(data []*model.HourlyChartData, timeSpan time.Duration) []*model.HourlyChartData { if len(data) <= 1 { return data } @@ -42,7 +43,7 @@ func fillGaps(data []*model.HourlyChartData) []*model.HourlyChartData { for i := 1; i < len(data); i++ { curr := data[i] prev := data[i-1] - hourDiff := (curr.Timestamp - prev.Timestamp) / fillGapsInterval + hourDiff := (curr.Timestamp - prev.Timestamp) / int64(timeSpan.Seconds()) // If gap is 1 hour or less, continue if hourDiff <= 1 { @@ -54,18 +55,18 @@ func fillGaps(data []*model.HourlyChartData) []*model.HourlyChartData { if hourDiff > 3 { // Add point for hour after prev result = append(result, &model.HourlyChartData{ - Timestamp: prev.Timestamp + fillGapsInterval, + Timestamp: prev.Timestamp + int64(timeSpan.Seconds()), }) // Add point for hour before curr result = append(result, &model.HourlyChartData{ - Timestamp: curr.Timestamp - fillGapsInterval, + Timestamp: curr.Timestamp - int64(timeSpan.Seconds()), }) result = append(result, curr) continue } // Fill gaps of 2-3 hours with zero points - for j := prev.Timestamp + fillGapsInterval; j < curr.Timestamp; j += fillGapsInterval { + for j := prev.Timestamp + int64(timeSpan.Seconds()); j < curr.Timestamp; j += int64(timeSpan.Seconds()) { result = append(result, &model.HourlyChartData{ Timestamp: j, }) @@ -77,15 +78,15 @@ func fillGaps(data []*model.HourlyChartData) []*model.HourlyChartData { } func GetDashboard(c *gin.Context) { - start, end := getDashboardStartEndTime(c.Query("type")) + start, end, timeSpan := getDashboardTime(c.Query("type")) modelName := c.Query("model") - dashboards, err := model.GetDashboardData(start, end, modelName) + dashboards, err := model.GetDashboardData(start, end, modelName, timeSpan) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return } - dashboards.ChartData = fillGaps(dashboards.ChartData) + dashboards.ChartData = fillGaps(dashboards.ChartData, timeSpan) middleware.SuccessResponse(c, dashboards) } @@ -96,16 +97,16 @@ func GetGroupDashboard(c *gin.Context) { return } - start, end := getDashboardStartEndTime(c.Query("type")) + start, end, timeSpan := getDashboardTime(c.Query("type")) tokenName := c.Query("token_name") modelName := c.Query("model") - dashboards, err := model.GetGroupDashboardData(group, start, end, tokenName, modelName) + dashboards, err := model.GetGroupDashboardData(group, start, end, tokenName, modelName, timeSpan) if err != nil { middleware.ErrorResponse(c, http.StatusOK, "failed to get statistics") return } - dashboards.ChartData = fillGaps(dashboards.ChartData) + dashboards.ChartData = fillGaps(dashboards.ChartData, timeSpan) middleware.SuccessResponse(c, dashboards) } diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index 80490ee9505..e2bbba7b9ba 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -627,29 +627,29 @@ type GroupDashboardResponse struct { TokenNames []string `json:"token_names"` } -func getHourTimestamp() string { +func getTimeSpanFormat(timeSpan time.Duration) string { switch { case common.UsingMySQL: - return "UNIX_TIMESTAMP(DATE_FORMAT(request_at, '%Y-%m-%d %H:00:00'))" + return fmt.Sprintf("UNIX_TIMESTAMP(DATE_FORMAT(request_at, '%%Y-%%m-%%d %%H:%%i:00')) DIV %d * %d", int64(timeSpan.Seconds()), int64(timeSpan.Seconds())) case common.UsingPostgreSQL: - return "FLOOR(EXTRACT(EPOCH FROM date_trunc('hour', request_at)))" + return fmt.Sprintf("FLOOR(EXTRACT(EPOCH FROM date_trunc('minute', request_at)) / %d) * %d", int64(timeSpan.Seconds()), int64(timeSpan.Seconds())) case common.UsingSQLite: - return "STRFTIME('%s', STRFTIME('%Y-%m-%d %H:00:00', request_at))" + return fmt.Sprintf("CAST(STRFTIME('%%s', STRFTIME('%%Y-%%m-%%d %%H:%%M:00', request_at)) AS INTEGER) / %d * %d", int64(timeSpan.Seconds()), int64(timeSpan.Seconds())) default: return "" } } -func getChartData(group string, start, end time.Time, tokenName, modelName string) ([]*HourlyChartData, error) { +func getChartData(group string, start, end time.Time, tokenName, modelName string, timeSpan time.Duration) ([]*HourlyChartData, error) { var chartData []*HourlyChartData - hourTimestamp := getHourTimestamp() - if hourTimestamp == "" { - return nil, errors.New("unsupported hour format") + timeSpanFormat := getTimeSpanFormat(timeSpan) + if timeSpanFormat == "" { + return nil, errors.New("unsupported time format") } query := LogDB.Table("logs"). - Select(hourTimestamp + " as timestamp, count(*) as request_count, sum(used_amount) as used_amount, sum(case when code != 200 then 1 else 0 end) as exception_count"). + Select(timeSpanFormat + " as timestamp, count(*) as request_count, sum(used_amount) as used_amount, sum(case when code != 200 then 1 else 0 end) as exception_count"). Group("timestamp"). Order("timestamp ASC") @@ -758,14 +758,14 @@ func getTPM(group string, end time.Time, tokenName, modelName string) (int64, er return tpm, err } -func GetDashboardData(start, end time.Time, modelName string) (*DashboardResponse, error) { +func GetDashboardData(start, end time.Time, modelName string, timeSpan time.Duration) (*DashboardResponse, error) { if end.IsZero() { end = time.Now() } else if end.Before(start) { return nil, errors.New("end time is before start time") } - chartData, err := getChartData("", start, end, "", modelName) + chartData, err := getChartData("", start, end, "", modelName, timeSpan) if err != nil { return nil, err } @@ -800,7 +800,7 @@ func GetDashboardData(start, end time.Time, modelName string) (*DashboardRespons }, nil } -func GetGroupDashboardData(group string, start, end time.Time, tokenName string, modelName string) (*GroupDashboardResponse, error) { +func GetGroupDashboardData(group string, start, end time.Time, tokenName string, modelName string, timeSpan time.Duration) (*GroupDashboardResponse, error) { if group == "" { return nil, errors.New("group is required") } @@ -811,7 +811,7 @@ func GetGroupDashboardData(group string, start, end time.Time, tokenName string, return nil, errors.New("end time is before start time") } - chartData, err := getChartData(group, start, end, tokenName, modelName) + chartData, err := getChartData(group, start, end, tokenName, modelName, timeSpan) if err != nil { return nil, err } From fad3bdaac6944595f440ebfd8860dbb505bd6d40 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 31 Dec 2024 14:56:30 +0800 Subject: [PATCH 059/167] feat: dashboard timespan from query --- service/aiproxy/controller/dashboard.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/service/aiproxy/controller/dashboard.go b/service/aiproxy/controller/dashboard.go index c3d81f5d00c..6f3472a3f36 100644 --- a/service/aiproxy/controller/dashboard.go +++ b/service/aiproxy/controller/dashboard.go @@ -2,6 +2,7 @@ package controller import ( "net/http" + "strconv" "time" "github.com/gin-gonic/gin" @@ -77,9 +78,26 @@ func fillGaps(data []*model.HourlyChartData, timeSpan time.Duration) []*model.Ho return result } +func getTimeSpanWithDefault(c *gin.Context, defaultTimeSpan time.Duration) time.Duration { + spanStr := c.Query("span") + if spanStr == "" { + return defaultTimeSpan + } + span, err := strconv.Atoi(spanStr) + if err != nil { + return defaultTimeSpan + } + if span < 1 || span > 48 { + return defaultTimeSpan + } + return time.Duration(span) * time.Hour +} + func GetDashboard(c *gin.Context) { start, end, timeSpan := getDashboardTime(c.Query("type")) modelName := c.Query("model") + timeSpan = getTimeSpanWithDefault(c, timeSpan) + dashboards, err := model.GetDashboardData(start, end, modelName, timeSpan) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) @@ -100,6 +118,7 @@ func GetGroupDashboard(c *gin.Context) { start, end, timeSpan := getDashboardTime(c.Query("type")) tokenName := c.Query("token_name") modelName := c.Query("model") + timeSpan = getTimeSpanWithDefault(c, timeSpan) dashboards, err := model.GetGroupDashboardData(group, start, end, tokenName, modelName, timeSpan) if err != nil { From cbc620a6608c1f8c1be5bf5327773d175c88d1dd Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 31 Dec 2024 16:50:25 +0800 Subject: [PATCH 060/167] feat: decouple request paths --- service/aiproxy/controller/channel-test.go | 16 ++++++------ service/aiproxy/controller/relay.go | 3 ++- service/aiproxy/middleware/distributor.go | 16 +++++++++--- service/aiproxy/model/modelconfig.go | 10 +++---- service/aiproxy/relay/adaptor/ali/adaptor.go | 4 +-- .../aiproxy/relay/adaptor/ali/embeddings.go | 10 +++---- service/aiproxy/relay/adaptor/ali/image.go | 8 +++--- service/aiproxy/relay/adaptor/ali/rerank.go | 8 +++--- .../aiproxy/relay/adaptor/ali/stt-realtime.go | 16 ++++++------ service/aiproxy/relay/adaptor/ali/tts.go | 10 +++---- .../relay/adaptor/anthropic/adaptor.go | 8 +++--- service/aiproxy/relay/adaptor/aws/adaptor.go | 4 +-- .../relay/adaptor/aws/claude/adapter.go | 6 ++--- .../relay/adaptor/aws/llama3/adapter.go | 6 ++--- .../relay/adaptor/aws/utils/adaptor.go | 2 +- .../aiproxy/relay/adaptor/baidu/adaptor.go | 2 +- service/aiproxy/relay/adaptor/baidu/main.go | 8 +++--- .../aiproxy/relay/adaptor/baiduv2/adaptor.go | 4 +-- .../aiproxy/relay/adaptor/cohere/adaptor.go | 10 +++---- service/aiproxy/relay/adaptor/coze/adaptor.go | 12 ++++----- .../aiproxy/relay/adaptor/doubaoaudio/main.go | 4 +-- .../aiproxy/relay/adaptor/doubaoaudio/tts.go | 14 +++++----- .../aiproxy/relay/adaptor/gemini/adaptor.go | 4 +-- .../relay/adaptor/gemini/embeddings.go | 8 +++--- service/aiproxy/relay/adaptor/gemini/main.go | 12 ++++----- service/aiproxy/relay/adaptor/interface.go | 2 +- .../aiproxy/relay/adaptor/minimax/adaptor.go | 2 +- service/aiproxy/relay/adaptor/minimax/tts.go | 8 +++--- .../aiproxy/relay/adaptor/ollama/adaptor.go | 6 ++--- service/aiproxy/relay/adaptor/ollama/main.go | 18 ++++++------- .../aiproxy/relay/adaptor/openai/adaptor.go | 18 ++++++------- .../relay/adaptor/openai/embeddings.go | 8 +++--- service/aiproxy/relay/adaptor/openai/image.go | 8 +++--- .../aiproxy/relay/adaptor/openai/rerank.go | 8 +++--- service/aiproxy/relay/adaptor/openai/stt.go | 16 ++++++------ service/aiproxy/relay/adaptor/openai/tts.go | 12 ++++----- .../relay/adaptor/siliconflow/adaptor.go | 2 +- .../aiproxy/relay/adaptor/vertexai/adaptor.go | 4 +-- .../relay/adaptor/vertexai/claude/adapter.go | 10 +++---- .../relay/adaptor/vertexai/gemini/adapter.go | 2 +- .../relay/adaptor/vertexai/registry.go | 2 +- .../aiproxy/relay/adaptor/xunfei/adaptor.go | 10 +++---- service/aiproxy/relay/controller/helper.go | 4 +-- service/aiproxy/relay/utils/testreq.go | 26 ------------------- 44 files changed, 177 insertions(+), 194 deletions(-) diff --git a/service/aiproxy/controller/channel-test.go b/service/aiproxy/controller/channel-test.go index 4f7ba500db3..fe94fad8cdf 100644 --- a/service/aiproxy/controller/channel-test.go +++ b/service/aiproxy/controller/channel-test.go @@ -38,8 +38,7 @@ func testSingleModel(channel *model.Channel, modelName string) (*model.ChannelTe w := httptest.NewRecorder() newc, _ := gin.CreateTestContext(w) newc.Request = &http.Request{ - Method: http.MethodPost, - URL: &url.URL{Path: utils.BuildModeDefaultPath(mode)}, + URL: &url.URL{}, Body: io.NopCloser(body), Header: make(http.Header), } @@ -59,11 +58,6 @@ func testSingleModel(channel *model.Channel, modelName string) (*model.ChannelTe if success { respStr = w.Body.String() code = w.Code - log.Infof("model %s(%d) test success, unban it", modelName, channel.ID) - err := monitor.ClearChannelModelErrors(context.Background(), modelName, channel.ID) - if err != nil { - log.Errorf("clear channel errors failed: %+v", err) - } } else { respStr = bizErr.String() code = bizErr.StatusCode @@ -382,7 +376,13 @@ func AutoTestBannedModels() { if err != nil { log.Errorf("failed to test channel %s(%d) model %s: %s", channel.Name, channel.ID, modelName, err.Error()) } - if !result.Success { + if result.Success { + log.Infof("model %s(%d) test success, unban it", modelName, channel.ID) + err = monitor.ClearChannelModelErrors(context.Background(), modelName, channel.ID) + if err != nil { + log.Errorf("clear channel errors failed: %+v", err) + } + } else { log.Infof("model %s(%d) test failed", modelName, channel.ID) } } diff --git a/service/aiproxy/controller/relay.go b/service/aiproxy/controller/relay.go index 1fb569f6ff7..a6e20554f73 100644 --- a/service/aiproxy/controller/relay.go +++ b/service/aiproxy/controller/relay.go @@ -108,7 +108,8 @@ func Relay(c *gin.Context) { return } - meta := middleware.NewMetaByContext(c, channel) + mode := relaymode.GetByPath(c.Request.URL.Path) + meta := middleware.NewMetaByContext(c, channel, requestModel, mode) bizErr, retry := RelayHelper(meta, c) if bizErr == nil { return diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 0b98c913ee1..eae4e8009f5 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -97,6 +97,15 @@ func Distribute(c *gin.Context) { SetLogModelFields(log.Data, requestModel) + mode := relaymode.GetByPath(c.Request.URL.Path) + if mode == relaymode.Unknown { + abortWithMessage(c, + http.StatusServiceUnavailable, + fmt.Sprintf("%s api not implemented", c.Request.URL.Path), + ) + return + } + token := c.MustGet(ctxkey.Token).(*model.TokenCache) if len(token.Models) == 0 || !slices.Contains(token.Models, requestModel) { abortWithMessage(c, @@ -123,15 +132,14 @@ func Distribute(c *gin.Context) { c.Next() } -func NewMetaByContext(c *gin.Context, channel *model.Channel) *meta.Meta { - originalModel := c.MustGet(ctxkey.OriginalModel).(string) +func NewMetaByContext(c *gin.Context, channel *model.Channel, modelName string, mode int) *meta.Meta { requestID := c.GetString(ctxkey.RequestID) group := c.MustGet(ctxkey.Group).(*model.GroupCache) token := c.MustGet(ctxkey.Token).(*model.TokenCache) return meta.NewMeta( channel, - relaymode.GetByPath(c.Request.URL.Path), - originalModel, + mode, + modelName, meta.WithRequestID(requestID), meta.WithGroup(group), meta.WithToken(token), diff --git a/service/aiproxy/model/modelconfig.go b/service/aiproxy/model/modelconfig.go index 1fb5c0aa43e..a1b51f886be 100644 --- a/service/aiproxy/model/modelconfig.go +++ b/service/aiproxy/model/modelconfig.go @@ -19,11 +19,11 @@ type ModelConfig struct { Model string `gorm:"primaryKey" json:"model"` Owner ModelOwner `gorm:"type:varchar(255);index" json:"owner"` ImageMaxBatchSize int `json:"image_batch_size,omitempty"` - // relaymode/define.go - Type int `json:"type"` - InputPrice float64 `json:"input_price,omitempty"` - OutputPrice float64 `json:"output_price,omitempty"` - RPM int64 `json:"rpm"` + Type int `json:"type"` // relaymode/define.go + InputPrice float64 `json:"input_price,omitempty"` + OutputPrice float64 `json:"output_price,omitempty"` + RPM int64 `json:"rpm,omitempty"` + TPM int64 `json:"tpm,omitempty"` } func (c *ModelConfig) MarshalJSON() ([]byte, error) { diff --git a/service/aiproxy/relay/adaptor/ali/adaptor.go b/service/aiproxy/relay/adaptor/ali/adaptor.go index 8eb1f723119..a8837295c9c 100644 --- a/service/aiproxy/relay/adaptor/ali/adaptor.go +++ b/service/aiproxy/relay/adaptor/ali/adaptor.go @@ -50,7 +50,7 @@ func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http. return nil } -func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { switch meta.Mode { case relaymode.ImagesGenerations: return ConvertImageRequest(meta, req) @@ -65,7 +65,7 @@ func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Heade case relaymode.AudioTranscription: return ConvertSTTRequest(meta, req) default: - return nil, nil, errors.New("unsupported convert request mode") + return "", nil, nil, errors.New("unsupported convert request mode") } } diff --git a/service/aiproxy/relay/adaptor/ali/embeddings.go b/service/aiproxy/relay/adaptor/ali/embeddings.go index d460b87bc25..c6863d536b4 100644 --- a/service/aiproxy/relay/adaptor/ali/embeddings.go +++ b/service/aiproxy/relay/adaptor/ali/embeddings.go @@ -15,16 +15,16 @@ import ( relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" ) -func ConvertEmbeddingsRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func ConvertEmbeddingsRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { var reqMap map[string]any err := common.UnmarshalBodyReusable(req, &reqMap) if err != nil { - return nil, nil, err + return "", nil, nil, err } reqMap["model"] = meta.ActualModelName input, ok := reqMap["input"] if !ok { - return nil, nil, errors.New("input is required") + return "", nil, nil, errors.New("input is required") } switch v := input.(type) { case string: @@ -47,9 +47,9 @@ func ConvertEmbeddingsRequest(meta *meta.Meta, req *http.Request) (http.Header, reqMap["parameters"] = parameters jsonData, err := json.Marshal(reqMap) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return nil, bytes.NewReader(jsonData), nil + return http.MethodPost, nil, bytes.NewReader(jsonData), nil } func embeddingResponse2OpenAI(meta *meta.Meta, response *EmbeddingResponse) *openai.EmbeddingResponse { diff --git a/service/aiproxy/relay/adaptor/ali/image.go b/service/aiproxy/relay/adaptor/ali/image.go index 55e86bad655..21d1574851c 100644 --- a/service/aiproxy/relay/adaptor/ali/image.go +++ b/service/aiproxy/relay/adaptor/ali/image.go @@ -22,10 +22,10 @@ import ( const MetaResponseFormat = "response_format" -func ConvertImageRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func ConvertImageRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { request, err := utils.UnmarshalImageRequest(req) if err != nil { - return nil, nil, err + return "", nil, nil, err } request.Model = meta.ActualModelName @@ -40,9 +40,9 @@ func ConvertImageRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Re data, err := json.Marshal(&imageRequest) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return http.Header{ + return http.MethodPost, http.Header{ "X-Dashscope-Async": {"enable"}, }, bytes.NewReader(data), nil } diff --git a/service/aiproxy/relay/adaptor/ali/rerank.go b/service/aiproxy/relay/adaptor/ali/rerank.go index 4fb4412af00..a33413d8500 100644 --- a/service/aiproxy/relay/adaptor/ali/rerank.go +++ b/service/aiproxy/relay/adaptor/ali/rerank.go @@ -26,11 +26,11 @@ type RerankUsage struct { TotalTokens int `json:"total_tokens"` } -func ConvertRerankRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func ConvertRerankRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { reqMap := make(map[string]any) err := common.UnmarshalBodyReusable(req, &reqMap) if err != nil { - return nil, nil, err + return "", nil, nil, err } reqMap["model"] = meta.ActualModelName reqMap["input"] = map[string]any{ @@ -50,9 +50,9 @@ func ConvertRerankRequest(meta *meta.Meta, req *http.Request) (http.Header, io.R reqMap["parameters"] = parameters jsonData, err := json.Marshal(reqMap) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return nil, bytes.NewReader(jsonData), nil + return http.MethodPost, nil, bytes.NewReader(jsonData), nil } func RerankHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*relaymodel.Usage, *relaymodel.ErrorWithStatusCode) { diff --git a/service/aiproxy/relay/adaptor/ali/stt-realtime.go b/service/aiproxy/relay/adaptor/ali/stt-realtime.go index b8bc38e0950..1d45920f7b5 100644 --- a/service/aiproxy/relay/adaptor/ali/stt-realtime.go +++ b/service/aiproxy/relay/adaptor/ali/stt-realtime.go @@ -59,27 +59,27 @@ type STTUsage struct { Characters int `json:"characters"` } -func ConvertSTTRequest(meta *meta.Meta, request *http.Request) (http.Header, io.Reader, error) { +func ConvertSTTRequest(meta *meta.Meta, request *http.Request) (string, http.Header, io.Reader, error) { err := request.ParseMultipartForm(1024 * 1024 * 4) if err != nil { - return nil, nil, err + return "", nil, nil, err } var audioData []byte if files, ok := request.MultipartForm.File["file"]; !ok { - return nil, nil, errors.New("audio file is required") + return "", nil, nil, errors.New("audio file is required") } else if len(files) == 1 { file, err := files[0].Open() if err != nil { - return nil, nil, err + return "", nil, nil, err } audioData, err = io.ReadAll(file) file.Close() if err != nil { - return nil, nil, err + return "", nil, nil, err } } else { - return nil, nil, errors.New("audio file is required") + return "", nil, nil, errors.New("audio file is required") } sttRequest := STTMessage{ @@ -103,11 +103,11 @@ func ConvertSTTRequest(meta *meta.Meta, request *http.Request) (http.Header, io. data, err := json.Marshal(sttRequest) if err != nil { - return nil, nil, err + return "", nil, nil, err } meta.Set("audio_data", audioData) meta.Set("task_id", sttRequest.Header.TaskID) - return http.Header{ + return http.MethodPost, http.Header{ "X-DashScope-DataInspection": {"enable"}, }, bytes.NewReader(data), nil } diff --git a/service/aiproxy/relay/adaptor/ali/tts.go b/service/aiproxy/relay/adaptor/ali/tts.go index b1ecf4d31fe..3c685a081c6 100644 --- a/service/aiproxy/relay/adaptor/ali/tts.go +++ b/service/aiproxy/relay/adaptor/ali/tts.go @@ -93,14 +93,14 @@ var ttsSupportedFormat = map[string]struct{}{ "mp3": {}, } -func ConvertTTSRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func ConvertTTSRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { request, err := utils.UnmarshalTTSRequest(req) if err != nil { - return nil, nil, err + return "", nil, nil, err } reqMap, err := utils.UnmarshalMap(req) if err != nil { - return nil, nil, err + return "", nil, nil, err } var sampleRate int sampleRateI, ok := reqMap["sample_rate"].(float64) @@ -156,9 +156,9 @@ func ConvertTTSRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Read data, err := json.Marshal(ttsRequest) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return http.Header{ + return http.MethodPost, http.Header{ "X-DashScope-DataInspection": {"enable"}, }, bytes.NewReader(data), nil } diff --git a/service/aiproxy/relay/adaptor/anthropic/adaptor.go b/service/aiproxy/relay/adaptor/anthropic/adaptor.go index b265b039ccb..73601212833 100644 --- a/service/aiproxy/relay/adaptor/anthropic/adaptor.go +++ b/service/aiproxy/relay/adaptor/anthropic/adaptor.go @@ -44,17 +44,17 @@ func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, c *gin.Context, req *http. return nil } -func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { data, err := ConvertRequest(meta, req) if err != nil { - return nil, nil, err + return "", nil, nil, err } data2, err := json.Marshal(data) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return nil, bytes.NewReader(data2), nil + return http.MethodPost, nil, bytes.NewReader(data2), nil } func (a *Adaptor) DoRequest(_ *meta.Meta, _ *gin.Context, req *http.Request) (*http.Response, error) { diff --git a/service/aiproxy/relay/adaptor/aws/adaptor.go b/service/aiproxy/relay/adaptor/aws/adaptor.go index 3295c7dd9d7..cb79e41cab1 100644 --- a/service/aiproxy/relay/adaptor/aws/adaptor.go +++ b/service/aiproxy/relay/adaptor/aws/adaptor.go @@ -17,10 +17,10 @@ var _ adaptor.Adaptor = new(Adaptor) type Adaptor struct{} -func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { adaptor := GetAdaptor(meta.ActualModelName) if adaptor == nil { - return nil, nil, errors.New("adaptor not found") + return "", nil, nil, errors.New("adaptor not found") } meta.Set("awsAdapter", adaptor) return adaptor.ConvertRequest(meta, req) diff --git a/service/aiproxy/relay/adaptor/aws/claude/adapter.go b/service/aiproxy/relay/adaptor/aws/claude/adapter.go index 8b0126f93df..1cf0813a2e6 100644 --- a/service/aiproxy/relay/adaptor/aws/claude/adapter.go +++ b/service/aiproxy/relay/adaptor/aws/claude/adapter.go @@ -19,13 +19,13 @@ var _ utils.AwsAdapter = new(Adaptor) type Adaptor struct{} -func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { r, err := anthropic.ConvertRequest(meta, req) if err != nil { - return nil, nil, err + return "", nil, nil, err } meta.Set(ConvertedRequest, r) - return nil, nil, nil + return "", nil, nil, nil } func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context) (usage *model.Usage, err *model.ErrorWithStatusCode) { diff --git a/service/aiproxy/relay/adaptor/aws/llama3/adapter.go b/service/aiproxy/relay/adaptor/aws/llama3/adapter.go index 1524a92e7b9..bca6e0d8561 100644 --- a/service/aiproxy/relay/adaptor/aws/llama3/adapter.go +++ b/service/aiproxy/relay/adaptor/aws/llama3/adapter.go @@ -19,16 +19,16 @@ var _ utils.AwsAdapter = new(Adaptor) type Adaptor struct{} -func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { request, err := relayutils.UnmarshalGeneralOpenAIRequest(req) if err != nil { - return nil, nil, err + return "", nil, nil, err } request.Model = meta.ActualModelName meta.Set("stream", request.Stream) llamaReq := ConvertRequest(request) meta.Set(ConvertedRequest, llamaReq) - return nil, nil, nil + return "", nil, nil, nil } func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context) (usage *model.Usage, err *model.ErrorWithStatusCode) { diff --git a/service/aiproxy/relay/adaptor/aws/utils/adaptor.go b/service/aiproxy/relay/adaptor/aws/utils/adaptor.go index 585d0acdc36..cee82c555b7 100644 --- a/service/aiproxy/relay/adaptor/aws/utils/adaptor.go +++ b/service/aiproxy/relay/adaptor/aws/utils/adaptor.go @@ -15,7 +15,7 @@ import ( ) type AwsAdapter interface { - ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) + ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) DoResponse(meta *meta.Meta, c *gin.Context) (usage *model.Usage, err *model.ErrorWithStatusCode) } diff --git a/service/aiproxy/relay/adaptor/baidu/adaptor.go b/service/aiproxy/relay/adaptor/baidu/adaptor.go index da6ed65d75b..a68633c6363 100644 --- a/service/aiproxy/relay/adaptor/baidu/adaptor.go +++ b/service/aiproxy/relay/adaptor/baidu/adaptor.go @@ -86,7 +86,7 @@ func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http. return nil } -func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { switch meta.Mode { case relaymode.Embeddings: meta.Set(openai.MetaEmbeddingsPatchInputToSlices, true) diff --git a/service/aiproxy/relay/adaptor/baidu/main.go b/service/aiproxy/relay/adaptor/baidu/main.go index c608803dfa6..46d691f9298 100644 --- a/service/aiproxy/relay/adaptor/baidu/main.go +++ b/service/aiproxy/relay/adaptor/baidu/main.go @@ -41,10 +41,10 @@ type ChatRequest struct { EnableCitation bool `json:"enable_citation,omitempty"` } -func ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { request, err := utils.UnmarshalGeneralOpenAIRequest(req) if err != nil { - return nil, nil, err + return "", nil, nil, err } request.Model = meta.ActualModelName baiduRequest := ChatRequest{ @@ -81,9 +81,9 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, data, err := json.Marshal(baiduRequest) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return nil, bytes.NewReader(data), nil + return http.MethodPost, nil, bytes.NewReader(data), nil } func responseBaidu2OpenAI(response *ChatResponse) *openai.TextResponse { diff --git a/service/aiproxy/relay/adaptor/baiduv2/adaptor.go b/service/aiproxy/relay/adaptor/baiduv2/adaptor.go index 805de27b268..9f844488d9d 100644 --- a/service/aiproxy/relay/adaptor/baiduv2/adaptor.go +++ b/service/aiproxy/relay/adaptor/baiduv2/adaptor.go @@ -58,7 +58,7 @@ func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http. return nil } -func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { switch meta.Mode { case relaymode.ChatCompletions: actModel := meta.ActualModelName @@ -69,7 +69,7 @@ func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Heade } return openai.ConvertRequest(meta, req) default: - return nil, nil, fmt.Errorf("unsupported mode: %d", meta.Mode) + return "", nil, nil, fmt.Errorf("unsupported mode: %d", meta.Mode) } } diff --git a/service/aiproxy/relay/adaptor/cohere/adaptor.go b/service/aiproxy/relay/adaptor/cohere/adaptor.go index 93882964de4..896bded7615 100644 --- a/service/aiproxy/relay/adaptor/cohere/adaptor.go +++ b/service/aiproxy/relay/adaptor/cohere/adaptor.go @@ -33,21 +33,21 @@ func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http. return nil } -func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { request, err := utils.UnmarshalGeneralOpenAIRequest(req) if err != nil { - return nil, nil, err + return "", nil, nil, err } request.Model = meta.ActualModelName requestBody := ConvertRequest(request) if requestBody == nil { - return nil, nil, errors.New("request body is nil") + return "", nil, nil, errors.New("request body is nil") } data, err := json.Marshal(requestBody) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return nil, bytes.NewReader(data), nil + return http.MethodPost, nil, bytes.NewReader(data), nil } func (a *Adaptor) DoRequest(_ *meta.Meta, _ *gin.Context, req *http.Request) (*http.Response, error) { diff --git a/service/aiproxy/relay/adaptor/coze/adaptor.go b/service/aiproxy/relay/adaptor/coze/adaptor.go index 5e988b0eaeb..cdc3b7d45b2 100644 --- a/service/aiproxy/relay/adaptor/coze/adaptor.go +++ b/service/aiproxy/relay/adaptor/coze/adaptor.go @@ -38,17 +38,17 @@ func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http. return nil } -func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { if meta.Mode != relaymode.ChatCompletions { - return nil, nil, errors.New("coze only support chat completions") + return "", nil, nil, errors.New("coze only support chat completions") } request, err := utils.UnmarshalGeneralOpenAIRequest(req) if err != nil { - return nil, nil, err + return "", nil, nil, err } _, userID, err := getTokenAndUserID(meta.Channel.Key) if err != nil { - return nil, nil, err + return "", nil, nil, err } request.User = userID request.Model = meta.ActualModelName @@ -70,9 +70,9 @@ func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Heade } data, err := json.Marshal(cozeRequest) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return nil, bytes.NewReader(data), nil + return http.MethodPost, nil, bytes.NewReader(data), nil } func (a *Adaptor) DoRequest(_ *meta.Meta, _ *gin.Context, req *http.Request) (*http.Response, error) { diff --git a/service/aiproxy/relay/adaptor/doubaoaudio/main.go b/service/aiproxy/relay/adaptor/doubaoaudio/main.go index 42aac95d93d..8c76d3db4da 100644 --- a/service/aiproxy/relay/adaptor/doubaoaudio/main.go +++ b/service/aiproxy/relay/adaptor/doubaoaudio/main.go @@ -38,12 +38,12 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { return GetRequestURL(meta) } -func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { switch meta.Mode { case relaymode.AudioSpeech: return ConvertTTSRequest(meta, req) default: - return nil, nil, fmt.Errorf("unsupported relay mode %d for doubao", meta.Mode) + return "", nil, nil, fmt.Errorf("unsupported relay mode %d for doubao", meta.Mode) } } diff --git a/service/aiproxy/relay/adaptor/doubaoaudio/tts.go b/service/aiproxy/relay/adaptor/doubaoaudio/tts.go index 85267704de3..1f6ff2e81d2 100644 --- a/service/aiproxy/relay/adaptor/doubaoaudio/tts.go +++ b/service/aiproxy/relay/adaptor/doubaoaudio/tts.go @@ -62,20 +62,20 @@ type RequestConfig struct { var defaultHeader = []byte{0x11, 0x10, 0x11, 0x00} //nolint:gosec -func ConvertTTSRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func ConvertTTSRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { request, err := utils.UnmarshalTTSRequest(req) if err != nil { - return nil, nil, err + return "", nil, nil, err } reqMap, err := utils.UnmarshalMap(req) if err != nil { - return nil, nil, err + return "", nil, nil, err } appID, token, err := getAppIDAndToken(meta.Channel.Key) if err != nil { - return nil, nil, err + return "", nil, nil, err } doubaoRequest := DoubaoTTSRequest{ @@ -119,12 +119,12 @@ func ConvertTTSRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Read data, err := json.Marshal(doubaoRequest) if err != nil { - return nil, nil, err + return "", nil, nil, err } compressedData, err := gzipCompress(data) if err != nil { - return nil, nil, err + return "", nil, nil, err } payloadArr := make([]byte, 4) @@ -134,7 +134,7 @@ func ConvertTTSRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Read clientRequest = append(clientRequest, payloadArr...) clientRequest = append(clientRequest, compressedData...) - return nil, bytes.NewReader(clientRequest), nil + return http.MethodPost, nil, bytes.NewReader(clientRequest), nil } func TTSDoRequest(meta *meta.Meta, req *http.Request) (*http.Response, error) { diff --git a/service/aiproxy/relay/adaptor/gemini/adaptor.go b/service/aiproxy/relay/adaptor/gemini/adaptor.go index 969be992d82..556eaf28dac 100644 --- a/service/aiproxy/relay/adaptor/gemini/adaptor.go +++ b/service/aiproxy/relay/adaptor/gemini/adaptor.go @@ -53,14 +53,14 @@ func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http. return nil } -func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { switch meta.Mode { case relaymode.Embeddings: return ConvertEmbeddingRequest(meta, req) case relaymode.ChatCompletions: return ConvertRequest(meta, req) default: - return nil, nil, errors.New("unsupported mode") + return "", nil, nil, errors.New("unsupported mode") } } diff --git a/service/aiproxy/relay/adaptor/gemini/embeddings.go b/service/aiproxy/relay/adaptor/gemini/embeddings.go index 1f8f13909d6..54ff391e4bd 100644 --- a/service/aiproxy/relay/adaptor/gemini/embeddings.go +++ b/service/aiproxy/relay/adaptor/gemini/embeddings.go @@ -13,10 +13,10 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/utils" ) -func ConvertEmbeddingRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func ConvertEmbeddingRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { request, err := utils.UnmarshalGeneralOpenAIRequest(req) if err != nil { - return nil, nil, err + return "", nil, nil, err } request.Model = meta.ActualModelName @@ -41,9 +41,9 @@ func ConvertEmbeddingRequest(meta *meta.Meta, req *http.Request) (http.Header, i Requests: requests, }) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return nil, bytes.NewReader(data), nil + return http.MethodPost, nil, bytes.NewReader(data), nil } func EmbeddingHandler(c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode) { diff --git a/service/aiproxy/relay/adaptor/gemini/main.go b/service/aiproxy/relay/adaptor/gemini/main.go index 43ee9234e17..6416523dbcd 100644 --- a/service/aiproxy/relay/adaptor/gemini/main.go +++ b/service/aiproxy/relay/adaptor/gemini/main.go @@ -184,10 +184,10 @@ func buildContents(ctx context.Context, textRequest *model.GeneralOpenAIRequest) } // Setting safety to the lowest possible values since Gemini is already powerless enough -func ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { textRequest, err := utils.UnmarshalGeneralOpenAIRequest(req) if err != nil { - return nil, nil, err + return "", nil, nil, err } textRequest.Model = meta.ActualModelName @@ -195,12 +195,12 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, systemContent, contents, err := buildContents(req.Context(), textRequest) if err != nil { - return nil, nil, err + return "", nil, nil, err } tokenCount, err := CountTokens(req.Context(), meta, contents) if err != nil { - return nil, nil, err + return "", nil, nil, err } meta.PromptTokens = tokenCount @@ -216,10 +216,10 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, data, err := json.Marshal(geminiRequest) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return nil, bytes.NewReader(data), nil + return http.MethodPost, nil, bytes.NewReader(data), nil } func CountTokens(ctx context.Context, meta *meta.Meta, chat []*ChatContent) (int, error) { diff --git a/service/aiproxy/relay/adaptor/interface.go b/service/aiproxy/relay/adaptor/interface.go index 373a527767d..8fa9b5e992d 100644 --- a/service/aiproxy/relay/adaptor/interface.go +++ b/service/aiproxy/relay/adaptor/interface.go @@ -14,7 +14,7 @@ type Adaptor interface { GetChannelName() string GetRequestURL(meta *meta.Meta) (string, error) SetupRequestHeader(meta *meta.Meta, c *gin.Context, req *http.Request) error - ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) + ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) DoRequest(meta *meta.Meta, c *gin.Context, req *http.Request) (*http.Response, error) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (*relaymodel.Usage, *relaymodel.ErrorWithStatusCode) GetModelList() []*model.ModelConfig diff --git a/service/aiproxy/relay/adaptor/minimax/adaptor.go b/service/aiproxy/relay/adaptor/minimax/adaptor.go index 86bb91562f1..40050cc101c 100644 --- a/service/aiproxy/relay/adaptor/minimax/adaptor.go +++ b/service/aiproxy/relay/adaptor/minimax/adaptor.go @@ -52,7 +52,7 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { } } -func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { switch meta.Mode { case relaymode.ChatCompletions: meta.Set(openai.DoNotPatchStreamOptionsIncludeUsageMetaKey, true) diff --git a/service/aiproxy/relay/adaptor/minimax/tts.go b/service/aiproxy/relay/adaptor/minimax/tts.go index 87ec58fa3f7..0ab53ef78a7 100644 --- a/service/aiproxy/relay/adaptor/minimax/tts.go +++ b/service/aiproxy/relay/adaptor/minimax/tts.go @@ -19,10 +19,10 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/utils" ) -func ConvertTTSRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func ConvertTTSRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { reqMap, err := utils.UnmarshalMap(req) if err != nil { - return nil, nil, err + return "", nil, nil, err } reqMap["model"] = meta.ActualModelName @@ -79,10 +79,10 @@ func ConvertTTSRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Read body, err := json.Marshal(reqMap) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return nil, bytes.NewReader(body), nil + return http.MethodPost, nil, bytes.NewReader(body), nil } type TTSExtraInfo struct { diff --git a/service/aiproxy/relay/adaptor/ollama/adaptor.go b/service/aiproxy/relay/adaptor/ollama/adaptor.go index d281190f726..8e3761a5821 100644 --- a/service/aiproxy/relay/adaptor/ollama/adaptor.go +++ b/service/aiproxy/relay/adaptor/ollama/adaptor.go @@ -40,9 +40,9 @@ func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http. return nil } -func (a *Adaptor) ConvertRequest(meta *meta.Meta, request *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, request *http.Request) (string, http.Header, io.Reader, error) { if request == nil { - return nil, nil, errors.New("request is nil") + return "", nil, nil, errors.New("request is nil") } switch meta.Mode { case relaymode.Embeddings: @@ -50,7 +50,7 @@ func (a *Adaptor) ConvertRequest(meta *meta.Meta, request *http.Request) (http.H case relaymode.ChatCompletions: return ConvertRequest(meta, request) default: - return nil, nil, fmt.Errorf("unsupported mode: %d", meta.Mode) + return "", nil, nil, fmt.Errorf("unsupported mode: %d", meta.Mode) } } diff --git a/service/aiproxy/relay/adaptor/ollama/main.go b/service/aiproxy/relay/adaptor/ollama/main.go index 008a7fb32c1..0b16a837d11 100644 --- a/service/aiproxy/relay/adaptor/ollama/main.go +++ b/service/aiproxy/relay/adaptor/ollama/main.go @@ -23,11 +23,11 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/utils" ) -func ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { var request relaymodel.GeneralOpenAIRequest err := common.UnmarshalBodyReusable(req, &request) if err != nil { - return nil, nil, err + return "", nil, nil, err } request.Model = meta.ActualModelName @@ -55,7 +55,7 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, case relaymodel.ContentTypeImageURL: _, data, err := image.GetImageFromURL(req.Context(), part.ImageURL.URL) if err != nil { - return nil, nil, err + return "", nil, nil, err } imageUrls = append(imageUrls, data) } @@ -69,10 +69,10 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, data, err := json.Marshal(ollamaRequest) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return nil, bytes.NewReader(data), nil + return http.MethodPost, nil, bytes.NewReader(data), nil } func responseOllama2OpenAI(response *ChatResponse) *openai.TextResponse { @@ -175,10 +175,10 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*relaymodel.ErrorWithSt return nil, &usage } -func ConvertEmbeddingRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func ConvertEmbeddingRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { request, err := utils.UnmarshalGeneralOpenAIRequest(req) if err != nil { - return nil, nil, err + return "", nil, nil, err } request.Model = meta.ActualModelName data, err := json.Marshal(&EmbeddingRequest{ @@ -193,9 +193,9 @@ func ConvertEmbeddingRequest(meta *meta.Meta, req *http.Request) (http.Header, i }, }) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return nil, bytes.NewReader(data), nil + return http.MethodPost, nil, bytes.NewReader(data), nil } func EmbeddingHandler(c *gin.Context, resp *http.Response) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) { diff --git a/service/aiproxy/relay/adaptor/openai/adaptor.go b/service/aiproxy/relay/adaptor/openai/adaptor.go index e4d3a5bd370..cb2fbee34b7 100644 --- a/service/aiproxy/relay/adaptor/openai/adaptor.go +++ b/service/aiproxy/relay/adaptor/openai/adaptor.go @@ -71,13 +71,13 @@ func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http. return nil } -func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { return ConvertRequest(meta, req) } -func ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { if req == nil { - return nil, nil, errors.New("request is nil") + return "", nil, nil, errors.New("request is nil") } switch meta.Mode { case relaymode.Moderations: @@ -96,7 +96,7 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, case relaymode.Rerank: return ConvertRerankRequest(meta, req) default: - return nil, nil, errors.New("unsupported convert request mode") + return "", nil, nil, errors.New("unsupported convert request mode") } } @@ -128,25 +128,25 @@ func DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *re const DoNotPatchStreamOptionsIncludeUsageMetaKey = "do_not_patch_stream_options_include_usage" -func ConvertTextRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func ConvertTextRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { reqMap := make(map[string]any) err := common.UnmarshalBodyReusable(req, &reqMap) if err != nil { - return nil, nil, err + return "", nil, nil, err } if !meta.GetBool(DoNotPatchStreamOptionsIncludeUsageMetaKey) { if err := patchStreamOptions(reqMap); err != nil { - return nil, nil, err + return "", nil, nil, err } } reqMap["model"] = meta.ActualModelName jsonData, err := json.Marshal(reqMap) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return nil, bytes.NewReader(jsonData), nil + return http.MethodPost, nil, bytes.NewReader(jsonData), nil } func patchStreamOptions(reqMap map[string]any) error { diff --git a/service/aiproxy/relay/adaptor/openai/embeddings.go b/service/aiproxy/relay/adaptor/openai/embeddings.go index d0c2997682e..b368fe87597 100644 --- a/service/aiproxy/relay/adaptor/openai/embeddings.go +++ b/service/aiproxy/relay/adaptor/openai/embeddings.go @@ -13,11 +13,11 @@ import ( const MetaEmbeddingsPatchInputToSlices = "embeddings_input_to_slices" //nolint:gocritic -func ConvertEmbeddingsRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func ConvertEmbeddingsRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { reqMap := make(map[string]any) err := common.UnmarshalBodyReusable(req, &reqMap) if err != nil { - return nil, nil, err + return "", nil, nil, err } reqMap["model"] = meta.ActualModelName @@ -31,7 +31,7 @@ func ConvertEmbeddingsRequest(meta *meta.Meta, req *http.Request) (http.Header, jsonData, err := json.Marshal(reqMap) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return nil, bytes.NewReader(jsonData), nil + return http.MethodPost, nil, bytes.NewReader(jsonData), nil } diff --git a/service/aiproxy/relay/adaptor/openai/image.go b/service/aiproxy/relay/adaptor/openai/image.go index 9c2ea8d8f97..1614421987f 100644 --- a/service/aiproxy/relay/adaptor/openai/image.go +++ b/service/aiproxy/relay/adaptor/openai/image.go @@ -14,20 +14,20 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/model" ) -func ConvertImageRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func ConvertImageRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { reqMap := make(map[string]any) err := common.UnmarshalBodyReusable(req, &reqMap) if err != nil { - return nil, nil, err + return "", nil, nil, err } meta.Set(MetaResponseFormat, reqMap["response_format"]) reqMap["model"] = meta.ActualModelName jsonData, err := json.Marshal(reqMap) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return nil, bytes.NewReader(jsonData), nil + return http.MethodPost, nil, bytes.NewReader(jsonData), nil } func ImageHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode) { diff --git a/service/aiproxy/relay/adaptor/openai/rerank.go b/service/aiproxy/relay/adaptor/openai/rerank.go index 18f2cd04e64..b92e31beb2b 100644 --- a/service/aiproxy/relay/adaptor/openai/rerank.go +++ b/service/aiproxy/relay/adaptor/openai/rerank.go @@ -13,18 +13,18 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/model" ) -func ConvertRerankRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func ConvertRerankRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { reqMap := make(map[string]any) err := common.UnmarshalBodyReusable(req, &reqMap) if err != nil { - return nil, nil, err + return "", nil, nil, err } reqMap["model"] = meta.ActualModelName jsonData, err := json.Marshal(reqMap) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return nil, bytes.NewReader(jsonData), nil + return http.MethodPost, nil, bytes.NewReader(jsonData), nil } func RerankHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode) { diff --git a/service/aiproxy/relay/adaptor/openai/stt.go b/service/aiproxy/relay/adaptor/openai/stt.go index a4f8fec6673..c8f070bd18f 100644 --- a/service/aiproxy/relay/adaptor/openai/stt.go +++ b/service/aiproxy/relay/adaptor/openai/stt.go @@ -17,10 +17,10 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/model" ) -func ConvertSTTRequest(meta *meta.Meta, request *http.Request) (http.Header, io.Reader, error) { +func ConvertSTTRequest(meta *meta.Meta, request *http.Request) (string, http.Header, io.Reader, error) { err := request.ParseMultipartForm(1024 * 1024 * 4) if err != nil { - return nil, nil, err + return "", nil, nil, err } multipartBody := &bytes.Buffer{} @@ -34,7 +34,7 @@ func ConvertSTTRequest(meta *meta.Meta, request *http.Request) (http.Header, io. if key == "model" { err = multipartWriter.WriteField(key, meta.ActualModelName) if err != nil { - return nil, nil, err + return "", nil, nil, err } continue } @@ -44,7 +44,7 @@ func ConvertSTTRequest(meta *meta.Meta, request *http.Request) (http.Header, io. } err = multipartWriter.WriteField(key, value) if err != nil { - return nil, nil, err + return "", nil, nil, err } } @@ -55,23 +55,23 @@ func ConvertSTTRequest(meta *meta.Meta, request *http.Request) (http.Header, io. fileHeader := files[0] file, err := fileHeader.Open() if err != nil { - return nil, nil, err + return "", nil, nil, err } w, err := multipartWriter.CreateFormFile(key, fileHeader.Filename) if err != nil { file.Close() - return nil, nil, err + return "", nil, nil, err } _, err = io.Copy(w, file) file.Close() if err != nil { - return nil, nil, err + return "", nil, nil, err } } multipartWriter.Close() ContentType := multipartWriter.FormDataContentType() - return http.Header{ + return http.MethodPost, http.Header{ "Content-Type": {ContentType}, }, multipartBody, nil } diff --git a/service/aiproxy/relay/adaptor/openai/tts.go b/service/aiproxy/relay/adaptor/openai/tts.go index 60fe18094e0..df7f3ee472f 100644 --- a/service/aiproxy/relay/adaptor/openai/tts.go +++ b/service/aiproxy/relay/adaptor/openai/tts.go @@ -14,26 +14,26 @@ import ( relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" ) -func ConvertTTSRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func ConvertTTSRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { textRequest := relaymodel.TextToSpeechRequest{} err := common.UnmarshalBodyReusable(req, &textRequest) if err != nil { - return nil, nil, err + return "", nil, nil, err } if len(textRequest.Input) > 4096 { - return nil, nil, errors.New("input is too long (over 4096 characters)") + return "", nil, nil, errors.New("input is too long (over 4096 characters)") } reqMap := make(map[string]any) err = common.UnmarshalBodyReusable(req, &reqMap) if err != nil { - return nil, nil, err + return "", nil, nil, err } reqMap["model"] = meta.ActualModelName jsonData, err := json.Marshal(reqMap) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return nil, bytes.NewReader(jsonData), nil + return http.MethodPost, nil, bytes.NewReader(jsonData), nil } func TTSHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*relaymodel.Usage, *relaymodel.ErrorWithStatusCode) { diff --git a/service/aiproxy/relay/adaptor/siliconflow/adaptor.go b/service/aiproxy/relay/adaptor/siliconflow/adaptor.go index f2816ef5a0f..002aa9fb7d8 100644 --- a/service/aiproxy/relay/adaptor/siliconflow/adaptor.go +++ b/service/aiproxy/relay/adaptor/siliconflow/adaptor.go @@ -36,7 +36,7 @@ func (a *Adaptor) GetChannelName() string { return "siliconflow" } -func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { return a.Adaptor.ConvertRequest(meta, req) } diff --git a/service/aiproxy/relay/adaptor/vertexai/adaptor.go b/service/aiproxy/relay/adaptor/vertexai/adaptor.go index f3516ac79ae..737c330a643 100644 --- a/service/aiproxy/relay/adaptor/vertexai/adaptor.go +++ b/service/aiproxy/relay/adaptor/vertexai/adaptor.go @@ -29,10 +29,10 @@ type Config struct { ADCJSON string } -func (a *Adaptor) ConvertRequest(meta *meta.Meta, request *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, request *http.Request) (string, http.Header, io.Reader, error) { adaptor := GetAdaptor(meta.ActualModelName) if adaptor == nil { - return nil, nil, errors.New("adaptor not found") + return "", nil, nil, errors.New("adaptor not found") } return adaptor.ConvertRequest(meta, request) diff --git a/service/aiproxy/relay/adaptor/vertexai/claude/adapter.go b/service/aiproxy/relay/adaptor/vertexai/claude/adapter.go index 8337d7e6ee8..58599896804 100644 --- a/service/aiproxy/relay/adaptor/vertexai/claude/adapter.go +++ b/service/aiproxy/relay/adaptor/vertexai/claude/adapter.go @@ -55,14 +55,14 @@ const anthropicVersion = "vertex-2023-10-16" type Adaptor struct{} -func (a *Adaptor) ConvertRequest(meta *meta.Meta, request *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, request *http.Request) (string, http.Header, io.Reader, error) { if request == nil { - return nil, nil, errors.New("request is nil") + return "", nil, nil, errors.New("request is nil") } claudeReq, err := anthropic.ConvertRequest(meta, request) if err != nil { - return nil, nil, err + return "", nil, nil, err } meta.Set("stream", claudeReq.Stream) req := Request{ @@ -79,9 +79,9 @@ func (a *Adaptor) ConvertRequest(meta *meta.Meta, request *http.Request) (http.H } data, err := json.Marshal(req) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return nil, bytes.NewReader(data), nil + return http.MethodPost, nil, bytes.NewReader(data), nil } func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode) { diff --git a/service/aiproxy/relay/adaptor/vertexai/gemini/adapter.go b/service/aiproxy/relay/adaptor/vertexai/gemini/adapter.go index 8d50a3d4f94..e2e6ccf4940 100644 --- a/service/aiproxy/relay/adaptor/vertexai/gemini/adapter.go +++ b/service/aiproxy/relay/adaptor/vertexai/gemini/adapter.go @@ -39,7 +39,7 @@ var ModelList = []*model.ModelConfig{ type Adaptor struct{} -func (a *Adaptor) ConvertRequest(meta *meta.Meta, request *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, request *http.Request) (string, http.Header, io.Reader, error) { return gemini.ConvertRequest(meta, request) } diff --git a/service/aiproxy/relay/adaptor/vertexai/registry.go b/service/aiproxy/relay/adaptor/vertexai/registry.go index 4173bf8f803..7253a16e08e 100644 --- a/service/aiproxy/relay/adaptor/vertexai/registry.go +++ b/service/aiproxy/relay/adaptor/vertexai/registry.go @@ -37,7 +37,7 @@ func init() { } type innerAIAdapter interface { - ConvertRequest(meta *meta.Meta, request *http.Request) (http.Header, io.Reader, error) + ConvertRequest(meta *meta.Meta, request *http.Request) (string, http.Header, io.Reader, error) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode) } diff --git a/service/aiproxy/relay/adaptor/xunfei/adaptor.go b/service/aiproxy/relay/adaptor/xunfei/adaptor.go index 46d966df711..faa51f5d4ca 100644 --- a/service/aiproxy/relay/adaptor/xunfei/adaptor.go +++ b/service/aiproxy/relay/adaptor/xunfei/adaptor.go @@ -22,21 +22,21 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { return a.Adaptor.GetRequestURL(meta) } -func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error) { +func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { domain, err := getXunfeiDomain(meta.ActualModelName) if err != nil { - return nil, nil, err + return "", nil, nil, err } model := meta.ActualModelName meta.ActualModelName = domain defer func() { meta.ActualModelName = model }() - h, body, err := a.Adaptor.ConvertRequest(meta, req) + method, h, body, err := a.Adaptor.ConvertRequest(meta, req) if err != nil { - return nil, nil, err + return "", nil, nil, err } - return h, body, nil + return method, h, body, nil } func (a *Adaptor) GetModelList() []*model.ModelConfig { diff --git a/service/aiproxy/relay/controller/helper.go b/service/aiproxy/relay/controller/helper.go index ffbc50c44ae..a1d2bae499c 100644 --- a/service/aiproxy/relay/controller/helper.go +++ b/service/aiproxy/relay/controller/helper.go @@ -233,7 +233,7 @@ func DoHelper(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*relaymodel.U detail.RequestBody = conv.BytesToString(reqBody) } - header, body, err := a.ConvertRequest(meta, c.Request) + method, header, body, err := a.ConvertRequest(meta, c.Request) if err != nil { return nil, &detail, openai.ErrorWrapperWithMessage("convert request failed: "+err.Error(), "convert_request_failed", http.StatusBadRequest) } @@ -252,7 +252,7 @@ func DoHelper(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*relaymodel.U defer func() { c.Request = rawRequest }() } - req, err := http.NewRequestWithContext(c.Request.Context(), c.Request.Method, fullRequestURL, body) + req, err := http.NewRequestWithContext(c.Request.Context(), method, fullRequestURL, body) if err != nil { return nil, &detail, openai.ErrorWrapperWithMessage("new request failed: "+err.Error(), "new_request_failed", http.StatusBadRequest) } diff --git a/service/aiproxy/relay/utils/testreq.go b/service/aiproxy/relay/utils/testreq.go index 23ff2dfaac9..9fc92d5fb04 100644 --- a/service/aiproxy/relay/utils/testreq.go +++ b/service/aiproxy/relay/utils/testreq.go @@ -163,29 +163,3 @@ func BuildRerankRequest(model string) (io.Reader, error) { } return bytes.NewReader(jsonBytes), nil } - -func BuildModeDefaultPath(mode int) string { - switch mode { - case relaymode.ChatCompletions: - return "/v1/chat/completions" - case relaymode.Completions: - return "/v1/completions" - case relaymode.Embeddings: - return "/v1/embeddings" - case relaymode.Moderations: - return "/v1/moderations" - case relaymode.ImagesGenerations: - return "/v1/images/generations" - case relaymode.Edits: - return "/v1/edits" - case relaymode.AudioSpeech: - return "/v1/audio/speech" - case relaymode.AudioTranscription: - return "/v1/audio/transcriptions" - case relaymode.AudioTranslation: - return "/v1/audio/translations" - case relaymode.Rerank: - return "/v1/rerank" - } - return "" -} From 12d60b4b716ba11f8fc03dbb7b25d1fdba3e0e6d Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 31 Dec 2024 17:40:56 +0800 Subject: [PATCH 061/167] feat: group model tmp limit --- service/aiproxy/middleware/distributor.go | 23 ++++++++++++-- service/aiproxy/model/cache.go | 38 +++++++++++++++++++++-- service/aiproxy/model/log.go | 12 +++++++ 3 files changed, 67 insertions(+), 6 deletions(-) diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index eae4e8009f5..8de7318daff 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -12,6 +12,7 @@ import ( "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/meta" "github.com/labring/sealos/service/aiproxy/relay/relaymode" + log "github.com/sirupsen/logrus" ) const ( @@ -49,7 +50,7 @@ func getGroupRPMRatio(group *model.GroupCache) float64 { return groupRPMRatio } -func checkModelRPM(c *gin.Context, group *model.GroupCache, requestModel string, modelRPM int64) bool { +func checkGroupModelRPMAndTPM(c *gin.Context, group *model.GroupCache, requestModel string, modelRPM int64, modelTPM int64) bool { if modelRPM <= 0 { return true } @@ -72,6 +73,22 @@ func checkModelRPM(c *gin.Context, group *model.GroupCache, requestModel string, ) return false } + + if modelTPM > 0 { + tpm, err := model.CacheGetGroupModelTPM(group.ID, requestModel) + if err != nil { + log.Errorf("get group model tpm (%s:%s) error: %s", group.ID, requestModel, err.Error()) + // ignore error + return true + } + + if tpm >= modelTPM { + abortWithMessage(c, http.StatusTooManyRequests, + group.ID+" tpm is too high", + ) + return false + } + } return true } @@ -101,7 +118,7 @@ func Distribute(c *gin.Context) { if mode == relaymode.Unknown { abortWithMessage(c, http.StatusServiceUnavailable, - fmt.Sprintf("%s api not implemented", c.Request.URL.Path), + c.Request.URL.Path+" api not implemented", ) return } @@ -123,7 +140,7 @@ func Distribute(c *gin.Context) { return } - if !checkModelRPM(c, group, requestModel, mc.RPM) { + if !checkGroupModelRPMAndTPM(c, group, requestModel, mc.RPM, mc.TPM) { return } diff --git a/service/aiproxy/model/cache.go b/service/aiproxy/model/cache.go index 159250ac0c8..235acff60d5 100644 --- a/service/aiproxy/model/cache.go +++ b/service/aiproxy/model/cache.go @@ -22,9 +22,10 @@ import ( ) const ( - SyncFrequency = time.Minute * 3 - TokenCacheKey = "token:%s" - GroupCacheKey = "group:%s" + SyncFrequency = time.Minute * 3 + TokenCacheKey = "token:%s" + GroupCacheKey = "group:%s" + GroupModelTPMKey = "group:%s:model_tpm" ) var ( @@ -294,6 +295,37 @@ func CacheUpdateGroupUsedAmountOnlyIncrease(id string, amount float64) error { return updateGroupUsedAmountOnlyIncreaseScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, amount).Err() } +//nolint:gosec +func CacheGetGroupModelTPM(id string, model string) (int64, error) { + if !common.RedisEnabled { + return GetGroupModelTPM(id, model) + } + + cacheKey := fmt.Sprintf(GroupModelTPMKey, id) + tpm, err := common.RDB.HGet(context.Background(), cacheKey, model).Int64() + if err == nil { + return tpm, nil + } else if !errors.Is(err, redis.Nil) { + log.Errorf("get group model tpm (%s:%s) from redis error: %s", id, model, err.Error()) + } + + tpm, err = GetGroupModelTPM(id, model) + if err != nil { + return 0, err + } + + pipe := common.RDB.Pipeline() + pipe.HSet(context.Background(), cacheKey, model, tpm) + // 2-5 seconds + pipe.Expire(context.Background(), cacheKey, 2*time.Second+time.Duration(rand.Int64N(3))*time.Second) + _, err = pipe.Exec(context.Background()) + if err != nil { + log.Errorf("set group model tpm (%s:%s) to redis error: %s", id, model, err.Error()) + } + + return tpm, nil +} + var ( enabledModel2channels map[string][]*Channel enabledModels []string diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index e2bbba7b9ba..4e2cfc8d005 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -869,3 +869,15 @@ func GetTokenLastRequestTime(id int) (time.Time, error) { err := tx.Where("token_id = ?", id).Order("request_at desc").First(&log).Error return log.RequestAt, err } + +func GetGroupModelTPM(group string, model string) (int64, error) { + end := time.Now() + start := end.Add(-time.Minute) + var tpm int64 + err := LogDB. + Model(&Log{}). + Where("group_id = ? AND request_at >= ? AND request_at <= ? AND model = ?", group, start, end, model). + Select("COALESCE(SUM(prompt_tokens + completion_tokens), 0)"). + Scan(&tpm).Error + return tpm, err +} From 6df69dac8e2390311d9b6b21ada19b6db45e5fdb Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 2 Jan 2025 13:59:37 +0800 Subject: [PATCH 062/167] feat: decoupling url paths --- service/aiproxy/controller/channel-test.go | 6 +- service/aiproxy/controller/relay.go | 64 ++++++++++++++-------- service/aiproxy/middleware/distributor.go | 12 +--- service/aiproxy/relay/controller/helper.go | 15 +---- service/aiproxy/relay/controller/image.go | 16 +++++- service/aiproxy/relay/controller/rerank.go | 2 - service/aiproxy/relay/controller/stt.go | 2 - service/aiproxy/relay/controller/text.go | 14 ++++- service/aiproxy/relay/controller/tts.go | 2 - service/aiproxy/relay/meta/meta.go | 7 +++ service/aiproxy/relay/model/misc.go | 19 ++++++- service/aiproxy/relay/relaymode/helper.go | 54 +++++++++--------- service/aiproxy/router/relay.go | 23 ++++---- 13 files changed, 137 insertions(+), 99 deletions(-) diff --git a/service/aiproxy/controller/channel-test.go b/service/aiproxy/controller/channel-test.go index fe94fad8cdf..4e4a6cfe176 100644 --- a/service/aiproxy/controller/channel-test.go +++ b/service/aiproxy/controller/channel-test.go @@ -51,7 +51,11 @@ func testSingleModel(channel *model.Channel, modelName string) (*model.ChannelTe meta.WithRequestID(channelTestRequestID), meta.WithChannelTest(true), ) - bizErr := relayHelper(meta, newc) + relayController, ok := relayController(mode) + if !ok { + return nil, fmt.Errorf("relay mode %d not implemented", mode) + } + bizErr := relayController(meta, newc) success := bizErr == nil var respStr string var code int diff --git a/service/aiproxy/controller/relay.go b/service/aiproxy/controller/relay.go index a6e20554f73..3edac7b44a8 100644 --- a/service/aiproxy/controller/relay.go +++ b/service/aiproxy/controller/relay.go @@ -22,27 +22,38 @@ import ( // https://platform.openai.com/docs/api-reference/chat -func relayHelper(meta *meta.Meta, c *gin.Context) *model.ErrorWithStatusCode { - log := middleware.GetLogger(c) - middleware.SetLogFieldsFromMeta(meta, log.Data) - switch meta.Mode { - case relaymode.ImagesGenerations: - return controller.RelayImageHelper(meta, c) +type RelayController func(*meta.Meta, *gin.Context) *model.ErrorWithStatusCode + +func relayController(mode int) (RelayController, bool) { + var relayController RelayController + switch mode { + case relaymode.ImagesGenerations, + relaymode.Edits: + relayController = controller.RelayImageHelper case relaymode.AudioSpeech: - return controller.RelayTTSHelper(meta, c) - case relaymode.AudioTranslation: - return controller.RelaySTTHelper(meta, c) - case relaymode.AudioTranscription: - return controller.RelaySTTHelper(meta, c) + relayController = controller.RelayTTSHelper + case relaymode.AudioTranslation, + relaymode.AudioTranscription: + relayController = controller.RelaySTTHelper case relaymode.Rerank: - return controller.RerankHelper(meta, c) + relayController = controller.RerankHelper + case relaymode.ChatCompletions, + relaymode.Embeddings, + relaymode.Completions, + relaymode.Moderations: + relayController = controller.RelayTextHelper default: - return controller.RelayTextHelper(meta, c) + return nil, false } + return func(meta *meta.Meta, c *gin.Context) *model.ErrorWithStatusCode { + log := middleware.GetLogger(c) + middleware.SetLogFieldsFromMeta(meta, log.Data) + return relayController(meta, c) + }, true } -func RelayHelper(meta *meta.Meta, c *gin.Context) (*model.ErrorWithStatusCode, bool) { - err := relayHelper(meta, c) +func RelayHelper(meta *meta.Meta, c *gin.Context, relayController RelayController) (*model.ErrorWithStatusCode, bool) { + err := relayController(meta, c) if err == nil { if err := monitor.AddRequest( c.Request.Context(), @@ -79,7 +90,17 @@ func getChannelWithFallback(model string, failedChannelIDs ...int) (*dbmodel.Cha return dbmodel.CacheGetRandomSatisfiedChannel(model) } -func Relay(c *gin.Context) { +func NewRelay(mode int) func(c *gin.Context) { + relayController, ok := relayController(mode) + if !ok { + log.Fatalf("relay mode %d not implemented", mode) + } + return func(c *gin.Context) { + relay(c, mode, relayController) + } +} + +func relay(c *gin.Context, mode int, relayController RelayController) { log := middleware.GetLogger(c) requestModel := c.MustGet(string(ctxkey.OriginalModel)).(string) @@ -108,9 +129,8 @@ func Relay(c *gin.Context) { return } - mode := relaymode.GetByPath(c.Request.URL.Path) meta := middleware.NewMetaByContext(c, channel, requestModel, mode) - bizErr, retry := RelayHelper(meta, c) + bizErr, retry := RelayHelper(meta, c, relayController) if bizErr == nil { return } @@ -139,7 +159,7 @@ func Relay(c *gin.Context) { } c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody)) meta.Reset(newChannel) - bizErr, retry = RelayHelper(meta, c) + bizErr, retry = RelayHelper(meta, c, relayController) if bizErr == nil { return } @@ -149,10 +169,8 @@ func Relay(c *gin.Context) { failedChannelIDs = append(failedChannelIDs, newChannel.ID) } if bizErr != nil { - bizErr.Message = middleware.MessageWithRequestID(bizErr.Message, requestID) - c.JSON(bizErr.StatusCode, gin.H{ - "error": bizErr, - }) + bizErr.Error.Message = middleware.MessageWithRequestID(bizErr.Error.Message, requestID) + c.JSON(bizErr.StatusCode, bizErr) } } diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 8de7318daff..ac2ce9b8219 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -11,7 +11,6 @@ import ( "github.com/labring/sealos/service/aiproxy/common/ctxkey" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/meta" - "github.com/labring/sealos/service/aiproxy/relay/relaymode" log "github.com/sirupsen/logrus" ) @@ -114,15 +113,6 @@ func Distribute(c *gin.Context) { SetLogModelFields(log.Data, requestModel) - mode := relaymode.GetByPath(c.Request.URL.Path) - if mode == relaymode.Unknown { - abortWithMessage(c, - http.StatusServiceUnavailable, - c.Request.URL.Path+" api not implemented", - ) - return - } - token := c.MustGet(ctxkey.Token).(*model.TokenCache) if len(token.Models) == 0 || !slices.Contains(token.Models, requestModel) { abortWithMessage(c, @@ -153,6 +143,7 @@ func NewMetaByContext(c *gin.Context, channel *model.Channel, modelName string, requestID := c.GetString(ctxkey.RequestID) group := c.MustGet(ctxkey.Group).(*model.GroupCache) token := c.MustGet(ctxkey.Token).(*model.TokenCache) + return meta.NewMeta( channel, mode, @@ -160,5 +151,6 @@ func NewMetaByContext(c *gin.Context, channel *model.Channel, modelName string, meta.WithRequestID(requestID), meta.WithGroup(group), meta.WithToken(token), + meta.WithEndpoint(c.Request.URL.Path), ) } diff --git a/service/aiproxy/relay/controller/helper.go b/service/aiproxy/relay/controller/helper.go index a1d2bae499c..53bb69387ff 100644 --- a/service/aiproxy/relay/controller/helper.go +++ b/service/aiproxy/relay/controller/helper.go @@ -3,7 +3,6 @@ package controller import ( "bytes" "context" - "encoding/json" "errors" "io" "net/http" @@ -72,7 +71,6 @@ func postConsumeAmount( consumeWaitGroup *sync.WaitGroup, postGroupConsumer balance.PostGroupConsumer, code int, - endpoint string, usage *relaymodel.Usage, meta *meta.Meta, price, @@ -101,7 +99,7 @@ func postConsumeAmount( 0, price, completionPrice, - endpoint, + meta.Endpoint, content, meta.Mode, requestDetail, @@ -161,7 +159,7 @@ func postConsumeAmount( amount, price, completionPrice, - endpoint, + meta.Endpoint, content, meta.Mode, requestDetail, @@ -309,14 +307,7 @@ func DoHelper(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*relaymodel.U detail.ResponseBody = rw.body.String() if relayErr != nil { if detail.ResponseBody == "" { - respData, err := json.Marshal(gin.H{ - "error": relayErr.Error, - }) - if err != nil { - detail.ResponseBody = relayErr.Error.String() - } else { - detail.ResponseBody = conv.BytesToString(respData) - } + detail.ResponseBody = relayErr.JSON() } return nil, &detail, relayErr } diff --git a/service/aiproxy/relay/controller/image.go b/service/aiproxy/relay/controller/image.go index a983c569556..86f4ff4a91e 100644 --- a/service/aiproxy/relay/controller/image.go +++ b/service/aiproxy/relay/controller/image.go @@ -58,7 +58,19 @@ func RelayImageHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStat imageRequest, err := getImageRequest(c) if err != nil { - log.Errorf("getImageRequest failed: %s", err.Error()) + log.Errorf("get image request failed: %s", err.Error()) + ConsumeWaitGroup.Add(1) + go postConsumeAmount(context.Background(), + &ConsumeWaitGroup, + nil, + http.StatusOK, + nil, + meta, + 0, + 0, + imageRequest.Size, + nil, + ) return openai.ErrorWrapper(err, "invalid_image_request", http.StatusBadRequest) } @@ -108,7 +120,6 @@ func RelayImageHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStat &ConsumeWaitGroup, postGroupConsumer, respErr.StatusCode, - c.Request.URL.Path, usage, meta, imageCostPrice, @@ -124,7 +135,6 @@ func RelayImageHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStat &ConsumeWaitGroup, postGroupConsumer, http.StatusOK, - c.Request.URL.Path, usage, meta, imageCostPrice, diff --git a/service/aiproxy/relay/controller/rerank.go b/service/aiproxy/relay/controller/rerank.go index fbffcc772a2..b46a48f439a 100644 --- a/service/aiproxy/relay/controller/rerank.go +++ b/service/aiproxy/relay/controller/rerank.go @@ -68,7 +68,6 @@ func RerankHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatusCo &ConsumeWaitGroup, postGroupConsumer, http.StatusInternalServerError, - c.Request.URL.Path, usage, meta, price, @@ -84,7 +83,6 @@ func RerankHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatusCo &ConsumeWaitGroup, postGroupConsumer, http.StatusOK, - c.Request.URL.Path, usage, meta, price, diff --git a/service/aiproxy/relay/controller/stt.go b/service/aiproxy/relay/controller/stt.go index f351f8d6278..07890a885d3 100644 --- a/service/aiproxy/relay/controller/stt.go +++ b/service/aiproxy/relay/controller/stt.go @@ -58,7 +58,6 @@ func RelaySTTHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatus &ConsumeWaitGroup, postGroupConsumer, respErr.StatusCode, - c.Request.URL.Path, usage, meta, price, @@ -74,7 +73,6 @@ func RelaySTTHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatus &ConsumeWaitGroup, postGroupConsumer, http.StatusOK, - c.Request.URL.Path, usage, meta, price, diff --git a/service/aiproxy/relay/controller/text.go b/service/aiproxy/relay/controller/text.go index 26426fc113b..7f7e47b7d9a 100644 --- a/service/aiproxy/relay/controller/text.go +++ b/service/aiproxy/relay/controller/text.go @@ -24,6 +24,18 @@ func RelayTextHelper(meta *meta.Meta, c *gin.Context) *model.ErrorWithStatusCode textRequest, err := utils.UnmarshalGeneralOpenAIRequest(c.Request) if err != nil { log.Errorf("get and validate text request failed: %s", err.Error()) + ConsumeWaitGroup.Add(1) + go postConsumeAmount(context.Background(), + &ConsumeWaitGroup, + nil, + http.StatusBadRequest, + nil, + meta, + 0, + 0, + err.Error(), + nil, + ) return openai.ErrorWrapper(err, "invalid_text_request", http.StatusBadRequest) } @@ -70,7 +82,6 @@ func RelayTextHelper(meta *meta.Meta, c *gin.Context) *model.ErrorWithStatusCode &ConsumeWaitGroup, postGroupConsumer, respErr.StatusCode, - c.Request.URL.Path, usage, meta, price, @@ -86,7 +97,6 @@ func RelayTextHelper(meta *meta.Meta, c *gin.Context) *model.ErrorWithStatusCode &ConsumeWaitGroup, postGroupConsumer, http.StatusOK, - c.Request.URL.Path, usage, meta, price, diff --git a/service/aiproxy/relay/controller/tts.go b/service/aiproxy/relay/controller/tts.go index 41571b95fce..3d8838c2afa 100644 --- a/service/aiproxy/relay/controller/tts.go +++ b/service/aiproxy/relay/controller/tts.go @@ -65,7 +65,6 @@ func RelayTTSHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatus &ConsumeWaitGroup, postGroupConsumer, respErr.StatusCode, - c.Request.URL.Path, usage, meta, price, @@ -81,7 +80,6 @@ func RelayTTSHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatus &ConsumeWaitGroup, postGroupConsumer, http.StatusOK, - c.Request.URL.Path, usage, meta, price, diff --git a/service/aiproxy/relay/meta/meta.go b/service/aiproxy/relay/meta/meta.go index 17e893bca4c..d077a471a35 100644 --- a/service/aiproxy/relay/meta/meta.go +++ b/service/aiproxy/relay/meta/meta.go @@ -21,6 +21,7 @@ type Meta struct { Group *model.GroupCache Token *model.TokenCache + Endpoint string RequestAt time.Time RequestID string OriginModelName string @@ -32,6 +33,12 @@ type Meta struct { type Option func(meta *Meta) +func WithEndpoint(endpoint string) Option { + return func(meta *Meta) { + meta.Endpoint = endpoint + } +} + func WithChannelTest(isChannelTest bool) Option { return func(meta *Meta) { meta.IsChannelTest = isChannelTest diff --git a/service/aiproxy/relay/model/misc.go b/service/aiproxy/relay/model/misc.go index 21252028680..15793353362 100644 --- a/service/aiproxy/relay/model/misc.go +++ b/service/aiproxy/relay/model/misc.go @@ -1,6 +1,11 @@ package model -import "fmt" +import ( + "fmt" + + json "github.com/json-iterator/go" + "github.com/labring/sealos/service/aiproxy/common/conv" +) type Usage struct { PromptTokens int `json:"prompt_tokens"` @@ -24,10 +29,18 @@ func (e *Error) Error() string { } type ErrorWithStatusCode struct { - Error - StatusCode int `json:"status_code"` + Error Error `json:"error"` + StatusCode int `json:"-"` } func (e *ErrorWithStatusCode) String() string { return fmt.Sprintf("%s, status_code: %d", e.Error.String(), e.StatusCode) } + +func (e *ErrorWithStatusCode) JSON() string { + json, err := json.Marshal(e) + if err != nil { + return "" + } + return conv.BytesToString(json) +} diff --git a/service/aiproxy/relay/relaymode/helper.go b/service/aiproxy/relay/relaymode/helper.go index 7a83ec53f73..20aa30a33e8 100644 --- a/service/aiproxy/relay/relaymode/helper.go +++ b/service/aiproxy/relay/relaymode/helper.go @@ -1,30 +1,28 @@ package relaymode -import "strings" - -func GetByPath(path string) int { - switch { - case strings.HasPrefix(path, "/v1/chat/completions"): - return ChatCompletions - case strings.HasPrefix(path, "/v1/completions"): - return Completions - case strings.HasSuffix(path, "embeddings"): - return Embeddings - case strings.HasPrefix(path, "/v1/moderations"): - return Moderations - case strings.HasPrefix(path, "/v1/images/generations"): - return ImagesGenerations - case strings.HasPrefix(path, "/v1/edits"): - return Edits - case strings.HasPrefix(path, "/v1/audio/speech"): - return AudioSpeech - case strings.HasPrefix(path, "/v1/audio/transcriptions"): - return AudioTranscription - case strings.HasPrefix(path, "/v1/audio/translations"): - return AudioTranslation - case strings.HasPrefix(path, "/v1/rerank"): - return Rerank - default: - return Unknown - } -} +// func GetByPath(path string) int { +// switch { +// case strings.HasPrefix(path, "/v1/chat/completions"): +// return ChatCompletions +// case strings.HasPrefix(path, "/v1/completions"): +// return Completions +// case strings.HasSuffix(path, "embeddings"): +// return Embeddings +// case strings.HasPrefix(path, "/v1/moderations"): +// return Moderations +// case strings.HasPrefix(path, "/v1/images/generations"): +// return ImagesGenerations +// case strings.HasPrefix(path, "/v1/edits"): +// return Edits +// case strings.HasPrefix(path, "/v1/audio/speech"): +// return AudioSpeech +// case strings.HasPrefix(path, "/v1/audio/transcriptions"): +// return AudioTranscription +// case strings.HasPrefix(path, "/v1/audio/translations"): +// return AudioTranslation +// case strings.HasPrefix(path, "/v1/rerank"): +// return Rerank +// default: +// return Unknown +// } +// } diff --git a/service/aiproxy/router/relay.go b/service/aiproxy/router/relay.go index bbeef8dd275..32f54850c77 100644 --- a/service/aiproxy/router/relay.go +++ b/service/aiproxy/router/relay.go @@ -3,6 +3,7 @@ package router import ( "github.com/labring/sealos/service/aiproxy/controller" "github.com/labring/sealos/service/aiproxy/middleware" + "github.com/labring/sealos/service/aiproxy/relay/relaymode" "github.com/gin-gonic/gin" ) @@ -25,18 +26,18 @@ func SetRelayRouter(router *gin.Engine) { relayV1Router := router.Group("/v1") relayV1Router.Use(middleware.TokenAuth, middleware.Distribute) { - relayV1Router.POST("/completions", controller.Relay) - relayV1Router.POST("/chat/completions", controller.Relay) - relayV1Router.POST("/edits", controller.Relay) - relayV1Router.POST("/images/generations", controller.Relay) + relayV1Router.POST("/completions", controller.NewRelay(relaymode.Completions)) + relayV1Router.POST("/chat/completions", controller.NewRelay(relaymode.ChatCompletions)) + relayV1Router.POST("/edits", controller.NewRelay(relaymode.Edits)) + relayV1Router.POST("/images/generations", controller.NewRelay(relaymode.ImagesGenerations)) relayV1Router.POST("/images/edits", controller.RelayNotImplemented) relayV1Router.POST("/images/variations", controller.RelayNotImplemented) - relayV1Router.POST("/embeddings", controller.Relay) - relayV1Router.POST("/engines/:model/embeddings", controller.Relay) - relayV1Router.POST("/audio/transcriptions", controller.Relay) - relayV1Router.POST("/audio/translations", controller.Relay) - relayV1Router.POST("/audio/speech", controller.Relay) - relayV1Router.POST("/rerank", controller.Relay) + relayV1Router.POST("/embeddings", controller.NewRelay(relaymode.Embeddings)) + relayV1Router.POST("/engines/:model/embeddings", controller.NewRelay(relaymode.Embeddings)) + relayV1Router.POST("/audio/transcriptions", controller.NewRelay(relaymode.AudioTranscription)) + relayV1Router.POST("/audio/translations", controller.NewRelay(relaymode.AudioTranslation)) + relayV1Router.POST("/audio/speech", controller.NewRelay(relaymode.AudioSpeech)) + relayV1Router.POST("/rerank", controller.NewRelay(relaymode.Rerank)) relayV1Router.GET("/files", controller.RelayNotImplemented) relayV1Router.POST("/files", controller.RelayNotImplemented) relayV1Router.DELETE("/files/:id", controller.RelayNotImplemented) @@ -48,7 +49,7 @@ func SetRelayRouter(router *gin.Engine) { relayV1Router.POST("/fine_tuning/jobs/:id/cancel", controller.RelayNotImplemented) relayV1Router.GET("/fine_tuning/jobs/:id/events", controller.RelayNotImplemented) relayV1Router.DELETE("/models/:model", controller.RelayNotImplemented) - relayV1Router.POST("/moderations", controller.Relay) + relayV1Router.POST("/moderations", controller.NewRelay(relaymode.Moderations)) relayV1Router.POST("/assistants", controller.RelayNotImplemented) relayV1Router.GET("/assistants/:id", controller.RelayNotImplemented) relayV1Router.POST("/assistants/:id", controller.RelayNotImplemented) From 96094f43ebc9e621cfba8f83bfab1c2d4fa7ac21 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 2 Jan 2025 15:09:23 +0800 Subject: [PATCH 063/167] fix: check balance --- service/aiproxy/relay/adaptor/ali/rerank.go | 4 +- .../aiproxy/relay/adaptor/cohere/adaptor.go | 2 +- service/aiproxy/relay/adaptor/coze/adaptor.go | 6 +- .../aiproxy/relay/adaptor/doubaoaudio/tts.go | 4 +- service/aiproxy/relay/adaptor/gemini/main.go | 6 +- service/aiproxy/relay/adaptor/minimax/tts.go | 4 +- service/aiproxy/relay/adaptor/openai/main.go | 8 +- .../relay/adaptor/openai/moderations.go | 4 +- .../aiproxy/relay/adaptor/openai/rerank.go | 6 +- service/aiproxy/relay/adaptor/openai/tts.go | 4 +- service/aiproxy/relay/controller/helper.go | 36 ++++----- service/aiproxy/relay/controller/image.go | 81 ++++++++++--------- service/aiproxy/relay/controller/rerank.go | 59 ++++++++++---- service/aiproxy/relay/controller/stt.go | 23 +++--- service/aiproxy/relay/controller/text.go | 75 +++++++++++------ service/aiproxy/relay/controller/tts.go | 57 +++++++++---- service/aiproxy/relay/meta/meta.go | 2 +- service/aiproxy/relay/model/misc.go | 11 --- 18 files changed, 230 insertions(+), 162 deletions(-) diff --git a/service/aiproxy/relay/adaptor/ali/rerank.go b/service/aiproxy/relay/adaptor/ali/rerank.go index a33413d8500..ef6d5fb6fab 100644 --- a/service/aiproxy/relay/adaptor/ali/rerank.go +++ b/service/aiproxy/relay/adaptor/ali/rerank.go @@ -86,9 +86,9 @@ func RerankHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*relay var usage *relaymodel.Usage if rerankResponse.Usage == nil { usage = &relaymodel.Usage{ - PromptTokens: meta.PromptTokens, + PromptTokens: meta.InputTokens, CompletionTokens: 0, - TotalTokens: meta.PromptTokens, + TotalTokens: meta.InputTokens, } } else { usage = &relaymodel.Usage{ diff --git a/service/aiproxy/relay/adaptor/cohere/adaptor.go b/service/aiproxy/relay/adaptor/cohere/adaptor.go index 896bded7615..7b1ddd3f967 100644 --- a/service/aiproxy/relay/adaptor/cohere/adaptor.go +++ b/service/aiproxy/relay/adaptor/cohere/adaptor.go @@ -62,7 +62,7 @@ func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Respons if utils.IsStreamResponse(resp) { err, usage = StreamHandler(c, resp) } else { - err, usage = Handler(c, resp, meta.PromptTokens, meta.ActualModelName) + err, usage = Handler(c, resp, meta.InputTokens, meta.ActualModelName) } } return diff --git a/service/aiproxy/relay/adaptor/coze/adaptor.go b/service/aiproxy/relay/adaptor/coze/adaptor.go index cdc3b7d45b2..b1bf41a138d 100644 --- a/service/aiproxy/relay/adaptor/coze/adaptor.go +++ b/service/aiproxy/relay/adaptor/coze/adaptor.go @@ -84,14 +84,14 @@ func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Respons if utils.IsStreamResponse(resp) { err, responseText = StreamHandler(c, resp) } else { - err, responseText = Handler(c, resp, meta.PromptTokens, meta.ActualModelName) + err, responseText = Handler(c, resp, meta.InputTokens, meta.ActualModelName) } if responseText != nil { - usage = openai.ResponseText2Usage(*responseText, meta.ActualModelName, meta.PromptTokens) + usage = openai.ResponseText2Usage(*responseText, meta.ActualModelName, meta.InputTokens) } else { usage = &relaymodel.Usage{} } - usage.PromptTokens = meta.PromptTokens + usage.PromptTokens = meta.InputTokens usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens return } diff --git a/service/aiproxy/relay/adaptor/doubaoaudio/tts.go b/service/aiproxy/relay/adaptor/doubaoaudio/tts.go index 1f6ff2e81d2..399c0c8e7e8 100644 --- a/service/aiproxy/relay/adaptor/doubaoaudio/tts.go +++ b/service/aiproxy/relay/adaptor/doubaoaudio/tts.go @@ -171,8 +171,8 @@ func TTSDoResponse(meta *meta.Meta, c *gin.Context, _ *http.Response) (*relaymod defer conn.Close() usage := &relaymodel.Usage{ - PromptTokens: meta.PromptTokens, - TotalTokens: meta.PromptTokens, + PromptTokens: meta.InputTokens, + TotalTokens: meta.InputTokens, } for { diff --git a/service/aiproxy/relay/adaptor/gemini/main.go b/service/aiproxy/relay/adaptor/gemini/main.go index 6416523dbcd..77d9968da39 100644 --- a/service/aiproxy/relay/adaptor/gemini/main.go +++ b/service/aiproxy/relay/adaptor/gemini/main.go @@ -202,7 +202,7 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io if err != nil { return "", nil, nil, err } - meta.PromptTokens = tokenCount + meta.InputTokens = tokenCount // Build actual request geminiRequest := ChatRequest{ @@ -438,7 +438,7 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model render.Done(c) usage := model.Usage{ - PromptTokens: meta.PromptTokens, + PromptTokens: meta.InputTokens, } tokenCount, err := CountTokens(c.Request.Context(), meta, respContent) @@ -473,7 +473,7 @@ func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage } usage := model.Usage{ - PromptTokens: meta.PromptTokens, + PromptTokens: meta.InputTokens, } tokenCount, err := CountTokens(c.Request.Context(), meta, respContent) if err != nil { diff --git a/service/aiproxy/relay/adaptor/minimax/tts.go b/service/aiproxy/relay/adaptor/minimax/tts.go index 0ab53ef78a7..2dd69d9ebb7 100644 --- a/service/aiproxy/relay/adaptor/minimax/tts.go +++ b/service/aiproxy/relay/adaptor/minimax/tts.go @@ -140,7 +140,7 @@ func TTSHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*relaymod log.Error("write response body failed: " + err.Error()) } - usageCharacters := meta.PromptTokens + usageCharacters := meta.InputTokens if result.ExtraInfo.UsageCharacters > 0 { usageCharacters = result.ExtraInfo.UsageCharacters } @@ -161,7 +161,7 @@ func ttsStreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*re scanner := bufio.NewScanner(resp.Body) scanner.Split(bufio.ScanLines) - usageCharacters := meta.PromptTokens + usageCharacters := meta.InputTokens for scanner.Scan() { data := scanner.Text() diff --git a/service/aiproxy/relay/adaptor/openai/main.go b/service/aiproxy/relay/adaptor/openai/main.go index bb670ee6a76..e466a8aefb7 100644 --- a/service/aiproxy/relay/adaptor/openai/main.go +++ b/service/aiproxy/relay/adaptor/openai/main.go @@ -111,12 +111,12 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model render.Done(c) if usage == nil || (usage.TotalTokens == 0 && responseText != "") { - usage = ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens) + usage = ResponseText2Usage(responseText, meta.ActualModelName, meta.InputTokens) } if usage.TotalTokens != 0 && usage.PromptTokens == 0 { // some channels don't return prompt tokens & completion tokens - usage.PromptTokens = meta.PromptTokens - usage.CompletionTokens = usage.TotalTokens - meta.PromptTokens + usage.PromptTokens = meta.InputTokens + usage.CompletionTokens = usage.TotalTokens - meta.InputTokens } return usage, nil @@ -146,7 +146,7 @@ func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage completionTokens += CountTokenText(choice.Message.StringContent(), meta.ActualModelName) } textResponse.Usage = model.Usage{ - PromptTokens: meta.PromptTokens, + PromptTokens: meta.InputTokens, CompletionTokens: completionTokens, } } diff --git a/service/aiproxy/relay/adaptor/openai/moderations.go b/service/aiproxy/relay/adaptor/openai/moderations.go index 877d378f99c..b462a198193 100644 --- a/service/aiproxy/relay/adaptor/openai/moderations.go +++ b/service/aiproxy/relay/adaptor/openai/moderations.go @@ -41,8 +41,8 @@ func ModerationsHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (* } usage := &model.Usage{ - PromptTokens: meta.PromptTokens, - TotalTokens: meta.PromptTokens, + PromptTokens: meta.InputTokens, + TotalTokens: meta.InputTokens, } newData, err := stdjson.Marshal(respMap) diff --git a/service/aiproxy/relay/adaptor/openai/rerank.go b/service/aiproxy/relay/adaptor/openai/rerank.go index b92e31beb2b..f4774c7d847 100644 --- a/service/aiproxy/relay/adaptor/openai/rerank.go +++ b/service/aiproxy/relay/adaptor/openai/rerank.go @@ -51,13 +51,13 @@ func RerankHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model if rerankResponse.Meta.Tokens == nil { return &model.Usage{ - PromptTokens: meta.PromptTokens, + PromptTokens: meta.InputTokens, CompletionTokens: 0, - TotalTokens: meta.PromptTokens, + TotalTokens: meta.InputTokens, }, nil } if rerankResponse.Meta.Tokens.InputTokens <= 0 { - rerankResponse.Meta.Tokens.InputTokens = meta.PromptTokens + rerankResponse.Meta.Tokens.InputTokens = meta.InputTokens } return &model.Usage{ PromptTokens: rerankResponse.Meta.Tokens.InputTokens, diff --git a/service/aiproxy/relay/adaptor/openai/tts.go b/service/aiproxy/relay/adaptor/openai/tts.go index df7f3ee472f..08f3fa07c20 100644 --- a/service/aiproxy/relay/adaptor/openai/tts.go +++ b/service/aiproxy/relay/adaptor/openai/tts.go @@ -50,8 +50,8 @@ func TTSHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*relaymod log.Error("write response body failed: " + err.Error()) } return &relaymodel.Usage{ - PromptTokens: meta.PromptTokens, + PromptTokens: meta.InputTokens, CompletionTokens: 0, - TotalTokens: meta.PromptTokens, + TotalTokens: meta.InputTokens, }, nil } diff --git a/service/aiproxy/relay/controller/helper.go b/service/aiproxy/relay/controller/helper.go index 53bb69387ff..9c457a2defa 100644 --- a/service/aiproxy/relay/controller/helper.go +++ b/service/aiproxy/relay/controller/helper.go @@ -29,16 +29,16 @@ import ( var ConsumeWaitGroup sync.WaitGroup type PreCheckGroupBalanceReq struct { - PromptTokens int - MaxTokens int - Price float64 + InputTokens int + MaxTokens int + Price float64 } func getPreConsumedAmount(req *PreCheckGroupBalanceReq) float64 { - if req.Price == 0 || (req.PromptTokens == 0 && req.MaxTokens == 0) { + if req == nil || req.Price == 0 || (req.InputTokens == 0 && req.MaxTokens == 0) { return 0 } - preConsumedTokens := int64(req.PromptTokens) + preConsumedTokens := int64(req.InputTokens) if req.MaxTokens != 0 { preConsumedTokens += int64(req.MaxTokens) } @@ -49,21 +49,22 @@ func getPreConsumedAmount(req *PreCheckGroupBalanceReq) float64 { InexactFloat64() } -func preCheckGroupBalance(ctx context.Context, req *PreCheckGroupBalanceReq, meta *meta.Meta) (bool, balance.PostGroupConsumer, error) { +func preCheckGroupBalance(req *PreCheckGroupBalanceReq, meta *meta.Meta, groupRemainBalance float64) bool { if meta.IsChannelTest { - return true, nil, nil + return true } preConsumedAmount := getPreConsumedAmount(req) - groupRemainBalance, postGroupConsumer, err := balance.Default.GetGroupRemainBalance(ctx, meta.Group.ID) - if err != nil { - return false, nil, err - } - if groupRemainBalance < preConsumedAmount { - return false, nil, nil + return groupRemainBalance > preConsumedAmount +} + +func getGroupBalance(ctx context.Context, meta *meta.Meta) (float64, balance.PostGroupConsumer, error) { + if meta.IsChannelTest { + return 0, nil, nil } - return true, postGroupConsumer, nil + + return balance.Default.GetGroupRemainBalance(ctx, meta.Group.ID) } func postConsumeAmount( @@ -306,15 +307,12 @@ func DoHelper(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*relaymodel.U // copy buf to detail.ResponseBody detail.ResponseBody = rw.body.String() if relayErr != nil { - if detail.ResponseBody == "" { - detail.ResponseBody = relayErr.JSON() - } return nil, &detail, relayErr } if usage == nil { usage = &relaymodel.Usage{ - PromptTokens: meta.PromptTokens, - TotalTokens: meta.PromptTokens, + PromptTokens: meta.InputTokens, + TotalTokens: meta.InputTokens, } } if usage.TotalTokens == 0 { diff --git a/service/aiproxy/relay/controller/image.go b/service/aiproxy/relay/controller/image.go index 86f4ff4a91e..d485adc3705 100644 --- a/service/aiproxy/relay/controller/image.go +++ b/service/aiproxy/relay/controller/image.go @@ -7,8 +7,11 @@ import ( "net/http" "github.com/gin-gonic/gin" + "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" + "github.com/labring/sealos/service/aiproxy/common/conv" "github.com/labring/sealos/service/aiproxy/middleware" + "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/channeltype" "github.com/labring/sealos/service/aiproxy/relay/meta" @@ -22,26 +25,19 @@ func getImageRequest(c *gin.Context) (*relaymodel.ImageRequest, error) { if err != nil { return nil, err } - if imageRequest.N == 0 { - imageRequest.N = 1 + if imageRequest.Prompt == "" { + return nil, errors.New("prompt is required") } if imageRequest.Size == "" { return nil, errors.New("size is required") } - return imageRequest, nil -} - -func validateImageRequest(imageRequest *relaymodel.ImageRequest) *relaymodel.ErrorWithStatusCode { - // check prompt length - if imageRequest.Prompt == "" { - return openai.ErrorWrapper(errors.New("prompt is required"), "prompt_missing", http.StatusBadRequest) + if imageRequest.N == 0 { + imageRequest.N = 1 } - - // Number of generated images validation if err := billingprice.ValidateImageMaxBatchSize(imageRequest.Model, imageRequest.N); err != nil { - return openai.ErrorWrapper(err, "n_not_within_range", http.StatusBadRequest) + return nil, err } - return nil + return imageRequest, nil } func getImageCostPrice(modelName string, reqModel string, size string) (float64, error) { @@ -56,53 +52,58 @@ func RelayImageHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStat log := middleware.GetLogger(c) ctx := c.Request.Context() + adaptor, ok := channeltype.GetAdaptor(meta.Channel.Type) + if !ok { + log.Errorf("invalid (%s[%d]) channel type: %d", meta.Channel.Name, meta.Channel.ID, meta.Channel.Type) + return openai.ErrorWrapperWithMessage("invalid channel error", "invalid_channel_type", http.StatusInternalServerError) + } + + groupRemainBalance, postGroupConsumer, err := getGroupBalance(ctx, meta) + if err != nil { + log.Errorf("get group (%s) balance failed: %v", meta.Group.ID, err) + return openai.ErrorWrapper( + fmt.Errorf("get group (%s) balance failed", meta.Group.ID), + "get_group_quota_failed", + http.StatusInternalServerError, + ) + } + imageRequest, err := getImageRequest(c) if err != nil { - log.Errorf("get image request failed: %s", err.Error()) + log.Errorf("get request failed: %s", err.Error()) + var detail model.RequestDetail + reqDetail, err := common.GetRequestBody(c.Request) + if err != nil { + log.Errorf("get request body failed: %s", err.Error()) + } else { + detail.RequestBody = conv.BytesToString(reqDetail) + } ConsumeWaitGroup.Add(1) go postConsumeAmount(context.Background(), &ConsumeWaitGroup, nil, - http.StatusOK, + http.StatusBadRequest, nil, meta, 0, 0, - imageRequest.Size, - nil, + err.Error(), + &detail, ) return openai.ErrorWrapper(err, "invalid_image_request", http.StatusBadRequest) } - meta.PromptTokens = imageRequest.N - - bizErr := validateImageRequest(imageRequest) - if bizErr != nil { - return bizErr - } - imageCostPrice, err := getImageCostPrice(meta.OriginModelName, meta.ActualModelName, imageRequest.Size) if err != nil { return openai.ErrorWrapper(err, "get_image_cost_price_failed", http.StatusInternalServerError) } - adaptor, ok := channeltype.GetAdaptor(meta.Channel.Type) - if !ok { - return openai.ErrorWrapper(fmt.Errorf("invalid channel type: %d", meta.Channel.Type), "invalid_channel_type", http.StatusBadRequest) - } + meta.InputTokens = imageRequest.N - ok, postGroupConsumer, err := preCheckGroupBalance(ctx, &PreCheckGroupBalanceReq{ - PromptTokens: meta.PromptTokens, - Price: imageCostPrice, - }, meta) - if err != nil { - log.Errorf("get group (%s) balance failed: %v", meta.Group.ID, err) - return openai.ErrorWrapper( - fmt.Errorf("get group (%s) balance failed", meta.Group.ID), - "get_group_quota_failed", - http.StatusInternalServerError, - ) - } + ok = preCheckGroupBalance(&PreCheckGroupBalanceReq{ + InputTokens: meta.InputTokens, + Price: imageCostPrice, + }, meta, groupRemainBalance) if !ok { return openai.ErrorWrapper(errors.New("group balance is not enough"), "insufficient_group_balance", http.StatusForbidden) } diff --git a/service/aiproxy/relay/controller/rerank.go b/service/aiproxy/relay/controller/rerank.go index b46a48f439a..8a9f5b783c7 100644 --- a/service/aiproxy/relay/controller/rerank.go +++ b/service/aiproxy/relay/controller/rerank.go @@ -8,8 +8,11 @@ import ( "strings" "github.com/gin-gonic/gin" + "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" + "github.com/labring/sealos/service/aiproxy/common/conv" "github.com/labring/sealos/service/aiproxy/middleware" + "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/channeltype" "github.com/labring/sealos/service/aiproxy/relay/meta" @@ -22,23 +25,13 @@ func RerankHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatusCo log := middleware.GetLogger(c) ctx := c.Request.Context() - rerankRequest, err := getRerankRequest(c) - if err != nil { - log.Errorf("get rerank request failed: %s", err.Error()) - return openai.ErrorWrapper(err, "invalid_rerank_request", http.StatusBadRequest) - } - - price, completionPrice, ok := billingprice.GetModelPrice(meta.OriginModelName, meta.ActualModelName) + adaptor, ok := channeltype.GetAdaptor(meta.Channel.Type) if !ok { - return openai.ErrorWrapper(fmt.Errorf("model price not found: %s", meta.OriginModelName), "model_price_not_found", http.StatusInternalServerError) + log.Errorf("invalid (%s[%d]) channel type: %d", meta.Channel.Name, meta.Channel.ID, meta.Channel.Type) + return openai.ErrorWrapperWithMessage("invalid channel error", "invalid_channel_type", http.StatusInternalServerError) } - meta.PromptTokens = rerankPromptTokens(rerankRequest) - - ok, postGroupConsumer, err := preCheckGroupBalance(ctx, &PreCheckGroupBalanceReq{ - PromptTokens: meta.PromptTokens, - Price: price, - }, meta) + groupRemainBalance, postGroupConsumer, err := getGroupBalance(ctx, meta) if err != nil { log.Errorf("get group (%s) balance failed: %v", meta.Group.ID, err) return openai.ErrorWrapper( @@ -47,13 +40,45 @@ func RerankHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatusCo http.StatusInternalServerError, ) } + + rerankRequest, err := getRerankRequest(c) + if err != nil { + log.Errorf("get request failed: %s", err.Error()) + var detail model.RequestDetail + reqDetail, err := common.GetRequestBody(c.Request) + if err != nil { + log.Errorf("get request body failed: %s", err.Error()) + } else { + detail.RequestBody = conv.BytesToString(reqDetail) + } + ConsumeWaitGroup.Add(1) + go postConsumeAmount(context.Background(), + &ConsumeWaitGroup, + nil, + http.StatusBadRequest, + nil, + meta, + 0, + 0, + err.Error(), + &detail, + ) + return openai.ErrorWrapper(err, "invalid_rerank_request", http.StatusBadRequest) + } + + price, completionPrice, ok := billingprice.GetModelPrice(meta.OriginModelName, meta.ActualModelName) if !ok { - return openai.ErrorWrapper(errors.New("group balance is not enough"), "insufficient_group_balance", http.StatusForbidden) + return openai.ErrorWrapper(fmt.Errorf("model price not found: %s", meta.OriginModelName), "model_price_not_found", http.StatusInternalServerError) } - adaptor, ok := channeltype.GetAdaptor(meta.Channel.Type) + meta.InputTokens = rerankPromptTokens(rerankRequest) + + ok = preCheckGroupBalance(&PreCheckGroupBalanceReq{ + InputTokens: meta.InputTokens, + Price: price, + }, meta, groupRemainBalance) if !ok { - return openai.ErrorWrapper(fmt.Errorf("invalid channel type: %d", meta.Channel.Type), "invalid_channel_type", http.StatusBadRequest) + return openai.ErrorWrapper(errors.New("group balance is not enough"), "insufficient_group_balance", http.StatusForbidden) } usage, detail, respErr := DoHelper(adaptor, c, meta) diff --git a/service/aiproxy/relay/controller/stt.go b/service/aiproxy/relay/controller/stt.go index 07890a885d3..317df5ff755 100644 --- a/service/aiproxy/relay/controller/stt.go +++ b/service/aiproxy/relay/controller/stt.go @@ -22,18 +22,11 @@ func RelaySTTHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatus adaptor, ok := channeltype.GetAdaptor(meta.Channel.Type) if !ok { - return openai.ErrorWrapper(fmt.Errorf("invalid channel type: %d", meta.Channel.Type), "invalid_channel_type", http.StatusBadRequest) + log.Errorf("invalid (%s[%d]) channel type: %d", meta.Channel.Name, meta.Channel.ID, meta.Channel.Type) + return openai.ErrorWrapperWithMessage("invalid channel error", "invalid_channel_type", http.StatusInternalServerError) } - price, completionPrice, ok := billingprice.GetModelPrice(meta.OriginModelName, meta.ActualModelName) - if !ok { - return openai.ErrorWrapper(fmt.Errorf("model price not found: %s", meta.OriginModelName), "model_price_not_found", http.StatusInternalServerError) - } - - ok, postGroupConsumer, err := preCheckGroupBalance(ctx, &PreCheckGroupBalanceReq{ - PromptTokens: meta.PromptTokens, - Price: price, - }, meta) + groupRemainBalance, postGroupConsumer, err := getGroupBalance(ctx, meta) if err != nil { log.Errorf("get group (%s) balance failed: %v", meta.Group.ID, err) return openai.ErrorWrapper( @@ -42,6 +35,16 @@ func RelaySTTHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatus http.StatusInternalServerError, ) } + + price, completionPrice, ok := billingprice.GetModelPrice(meta.OriginModelName, meta.ActualModelName) + if !ok { + return openai.ErrorWrapper(fmt.Errorf("model price not found: %s", meta.OriginModelName), "model_price_not_found", http.StatusInternalServerError) + } + + ok = preCheckGroupBalance(&PreCheckGroupBalanceReq{ + InputTokens: meta.InputTokens, + Price: price, + }, meta, groupRemainBalance) if !ok { return openai.ErrorWrapper(errors.New("group balance is not enough"), "insufficient_group_balance", http.StatusForbidden) } diff --git a/service/aiproxy/relay/controller/text.go b/service/aiproxy/relay/controller/text.go index 7f7e47b7d9a..8b6af6b9e6a 100644 --- a/service/aiproxy/relay/controller/text.go +++ b/service/aiproxy/relay/controller/text.go @@ -7,23 +7,49 @@ import ( "net/http" "github.com/gin-gonic/gin" + "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" + "github.com/labring/sealos/service/aiproxy/common/conv" "github.com/labring/sealos/service/aiproxy/middleware" + "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/channeltype" "github.com/labring/sealos/service/aiproxy/relay/meta" - "github.com/labring/sealos/service/aiproxy/relay/model" + relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" billingprice "github.com/labring/sealos/service/aiproxy/relay/price" "github.com/labring/sealos/service/aiproxy/relay/utils" ) -func RelayTextHelper(meta *meta.Meta, c *gin.Context) *model.ErrorWithStatusCode { +func RelayTextHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatusCode { log := middleware.GetLogger(c) ctx := c.Request.Context() + adaptor, ok := channeltype.GetAdaptor(meta.Channel.Type) + if !ok { + log.Errorf("invalid (%s[%d]) channel type: %d", meta.Channel.Name, meta.Channel.ID, meta.Channel.Type) + return openai.ErrorWrapperWithMessage("invalid channel error", "invalid_channel_type", http.StatusInternalServerError) + } + + groupRemainBalance, postGroupConsumer, err := getGroupBalance(ctx, meta) + if err != nil { + log.Errorf("get group (%s) balance failed: %v", meta.Group.ID, err) + return openai.ErrorWrapper( + fmt.Errorf("get group (%s) balance failed", meta.Group.ID), + "get_group_quota_failed", + http.StatusInternalServerError, + ) + } + textRequest, err := utils.UnmarshalGeneralOpenAIRequest(c.Request) if err != nil { - log.Errorf("get and validate text request failed: %s", err.Error()) + log.Errorf("get request failed: %s", err.Error()) + var detail model.RequestDetail + reqDetail, err := common.GetRequestBody(c.Request) + if err != nil { + log.Errorf("get request body failed: %s", err.Error()) + } else { + detail.RequestBody = conv.BytesToString(reqDetail) + } ConsumeWaitGroup.Add(1) go postConsumeAmount(context.Background(), &ConsumeWaitGroup, @@ -34,9 +60,9 @@ func RelayTextHelper(meta *meta.Meta, c *gin.Context) *model.ErrorWithStatusCode 0, 0, err.Error(), - nil, + &detail, ) - return openai.ErrorWrapper(err, "invalid_text_request", http.StatusBadRequest) + return openai.ErrorWrapper(fmt.Errorf("get and validate text request failed: %s", err.Error()), "invalid_text_request", http.StatusBadRequest) } // get model price @@ -45,28 +71,27 @@ func RelayTextHelper(meta *meta.Meta, c *gin.Context) *model.ErrorWithStatusCode return openai.ErrorWrapper(fmt.Errorf("model price not found: %s", meta.OriginModelName), "model_price_not_found", http.StatusInternalServerError) } // pre-consume balance - promptTokens := openai.GetPromptTokens(meta, textRequest) - meta.PromptTokens = promptTokens - ok, postGroupConsumer, err := preCheckGroupBalance(ctx, &PreCheckGroupBalanceReq{ - PromptTokens: promptTokens, - MaxTokens: textRequest.MaxTokens, - Price: price, - }, meta) - if err != nil { - log.Errorf("get group (%s) balance failed: %v", meta.Group.ID, err) - return openai.ErrorWrapper( - fmt.Errorf("get group (%s) balance failed", meta.Group.ID), - "get_group_quota_failed", - http.StatusInternalServerError, - ) - } - if !ok { - return openai.ErrorWrapper(errors.New("group balance is not enough"), "insufficient_group_balance", http.StatusForbidden) - } + meta.InputTokens = openai.GetPromptTokens(meta, textRequest) - adaptor, ok := channeltype.GetAdaptor(meta.Channel.Type) + ok = preCheckGroupBalance(&PreCheckGroupBalanceReq{ + InputTokens: meta.InputTokens, + MaxTokens: textRequest.MaxTokens, + Price: price, + }, meta, groupRemainBalance) if !ok { - return openai.ErrorWrapper(fmt.Errorf("invalid channel type: %d", meta.Channel.Type), "invalid_channel_type", http.StatusBadRequest) + ConsumeWaitGroup.Add(1) + go postConsumeAmount(context.Background(), + &ConsumeWaitGroup, + postGroupConsumer, + http.StatusForbidden, + nil, + meta, + 0, + 0, + "group balance is not enough", + nil, + ) + return openai.ErrorWrapper(errors.New("group balance is not enough"), "insufficient_group_balance", http.StatusForbidden) } // do response diff --git a/service/aiproxy/relay/controller/tts.go b/service/aiproxy/relay/controller/tts.go index 3d8838c2afa..0d7a9c03837 100644 --- a/service/aiproxy/relay/controller/tts.go +++ b/service/aiproxy/relay/controller/tts.go @@ -7,8 +7,11 @@ import ( "net/http" "github.com/gin-gonic/gin" + "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" + "github.com/labring/sealos/service/aiproxy/common/conv" "github.com/labring/sealos/service/aiproxy/middleware" + "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/channeltype" "github.com/labring/sealos/service/aiproxy/relay/meta" @@ -23,7 +26,18 @@ func RelayTTSHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatus adaptor, ok := channeltype.GetAdaptor(meta.Channel.Type) if !ok { - return openai.ErrorWrapper(fmt.Errorf("invalid channel type: %d", meta.Channel.Type), "invalid_channel_type", http.StatusBadRequest) + log.Errorf("invalid (%s[%d]) channel type: %d", meta.Channel.Name, meta.Channel.ID, meta.Channel.Type) + return openai.ErrorWrapperWithMessage("invalid channel error", "invalid_channel_type", http.StatusInternalServerError) + } + + groupRemainBalance, postGroupConsumer, err := getGroupBalance(ctx, meta) + if err != nil { + log.Errorf("get group (%s) balance failed: %v", meta.Group.ID, err) + return openai.ErrorWrapper( + fmt.Errorf("get group (%s) balance failed", meta.Group.ID), + "get_group_quota_failed", + http.StatusInternalServerError, + ) } price, completionPrice, ok := billingprice.GetModelPrice(meta.OriginModelName, meta.ActualModelName) @@ -33,22 +47,35 @@ func RelayTTSHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatus ttsRequest, err := utils.UnmarshalTTSRequest(c.Request) if err != nil { - return openai.ErrorWrapper(err, "invalid_json", http.StatusBadRequest) - } - meta.PromptTokens = openai.CountTokenText(ttsRequest.Input, meta.ActualModelName) - - ok, postGroupConsumer, err := preCheckGroupBalance(ctx, &PreCheckGroupBalanceReq{ - PromptTokens: meta.PromptTokens, - Price: price, - }, meta) - if err != nil { - log.Errorf("get group (%s) balance failed: %v", meta.Group.ID, err) - return openai.ErrorWrapper( - fmt.Errorf("get group (%s) balance failed", meta.Group.ID), - "get_group_quota_failed", - http.StatusInternalServerError, + log.Errorf("get request failed: %s", err.Error()) + var detail model.RequestDetail + reqDetail, err := common.GetRequestBody(c.Request) + if err != nil { + log.Errorf("get request body failed: %s", err.Error()) + } else { + detail.RequestBody = conv.BytesToString(reqDetail) + } + ConsumeWaitGroup.Add(1) + go postConsumeAmount(context.Background(), + &ConsumeWaitGroup, + nil, + http.StatusBadRequest, + nil, + meta, + 0, + 0, + err.Error(), + &detail, ) + return openai.ErrorWrapper(err, "invalid_tts_request", http.StatusBadRequest) } + + meta.InputTokens = openai.CountTokenText(ttsRequest.Input, meta.ActualModelName) + + ok = preCheckGroupBalance(&PreCheckGroupBalanceReq{ + InputTokens: meta.InputTokens, + Price: price, + }, meta, groupRemainBalance) if !ok { return openai.ErrorWrapper(errors.New("group balance is not enough"), "insufficient_group_balance", http.StatusForbidden) } diff --git a/service/aiproxy/relay/meta/meta.go b/service/aiproxy/relay/meta/meta.go index d077a471a35..63cc4de07cb 100644 --- a/service/aiproxy/relay/meta/meta.go +++ b/service/aiproxy/relay/meta/meta.go @@ -27,7 +27,7 @@ type Meta struct { OriginModelName string ActualModelName string Mode int - PromptTokens int + InputTokens int IsChannelTest bool } diff --git a/service/aiproxy/relay/model/misc.go b/service/aiproxy/relay/model/misc.go index 15793353362..ff4029eeb73 100644 --- a/service/aiproxy/relay/model/misc.go +++ b/service/aiproxy/relay/model/misc.go @@ -2,9 +2,6 @@ package model import ( "fmt" - - json "github.com/json-iterator/go" - "github.com/labring/sealos/service/aiproxy/common/conv" ) type Usage struct { @@ -36,11 +33,3 @@ type ErrorWithStatusCode struct { func (e *ErrorWithStatusCode) String() string { return fmt.Sprintf("%s, status_code: %d", e.Error.String(), e.StatusCode) } - -func (e *ErrorWithStatusCode) JSON() string { - json, err := json.Marshal(e) - if err != nil { - return "" - } - return conv.BytesToString(json) -} From c75d4dac235e938e4fccf6cd92c041124175ab8f Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 2 Jan 2025 15:46:04 +0800 Subject: [PATCH 064/167] refactor: relay handler --- service/aiproxy/common/gin.go | 7 + service/aiproxy/controller/channel-test.go | 4 +- service/aiproxy/controller/relay.go | 4 +- service/aiproxy/middleware/auth.go | 4 +- service/aiproxy/relay/adaptor/ali/adaptor.go | 4 +- .../aiproxy/relay/adaptor/ali/embeddings.go | 4 +- service/aiproxy/relay/adaptor/ali/image.go | 2 +- service/aiproxy/relay/adaptor/ali/rerank.go | 2 +- .../aiproxy/relay/adaptor/ali/stt-realtime.go | 2 +- service/aiproxy/relay/adaptor/ali/tts.go | 2 +- .../relay/adaptor/anthropic/adaptor.go | 2 +- .../aiproxy/relay/adaptor/anthropic/main.go | 4 +- service/aiproxy/relay/adaptor/aws/adaptor.go | 2 +- .../aiproxy/relay/adaptor/aws/claude/main.go | 8 +- .../relay/adaptor/aws/llama3/adapter.go | 2 +- .../aiproxy/relay/adaptor/aws/llama3/main.go | 8 +- .../aiproxy/relay/adaptor/azure/constants.go | 2 +- .../aiproxy/relay/adaptor/baidu/adaptor.go | 4 +- .../aiproxy/relay/adaptor/baidu/embeddings.go | 2 +- service/aiproxy/relay/adaptor/baidu/main.go | 6 +- .../aiproxy/relay/adaptor/baiduv2/adaptor.go | 6 +- .../relay/adaptor/cloudflare/adaptor.go | 4 +- .../aiproxy/relay/adaptor/cohere/adaptor.go | 4 +- service/aiproxy/relay/adaptor/coze/adaptor.go | 8 +- service/aiproxy/relay/adaptor/doubao/main.go | 2 +- .../aiproxy/relay/adaptor/gemini/adaptor.go | 4 +- .../relay/adaptor/gemini/embeddings.go | 2 +- service/aiproxy/relay/adaptor/gemini/main.go | 12 +- service/aiproxy/relay/adaptor/minimax/tts.go | 2 +- service/aiproxy/relay/adaptor/ollama/main.go | 4 +- .../aiproxy/relay/adaptor/openai/adaptor.go | 2 +- .../relay/adaptor/openai/embeddings.go | 2 +- service/aiproxy/relay/adaptor/openai/image.go | 2 +- service/aiproxy/relay/adaptor/openai/main.go | 12 +- .../relay/adaptor/openai/moderations.go | 4 +- .../aiproxy/relay/adaptor/openai/rerank.go | 2 +- service/aiproxy/relay/adaptor/openai/stt.go | 4 +- service/aiproxy/relay/adaptor/openai/tts.go | 2 +- .../relay/adaptor/siliconflow/image.go | 2 +- .../aiproxy/relay/adaptor/vertexai/adaptor.go | 12 +- .../aiproxy/relay/adaptor/xunfei/adaptor.go | 8 +- service/aiproxy/relay/controller/handle.go | 111 +++++++++++++++ service/aiproxy/relay/controller/helper.go | 27 ++-- service/aiproxy/relay/controller/image.go | 115 ++-------------- service/aiproxy/relay/controller/rerank.go | 113 ++-------------- service/aiproxy/relay/controller/stt.go | 82 ++--------- service/aiproxy/relay/controller/text.go | 128 ++---------------- service/aiproxy/relay/controller/tts.go | 113 ++-------------- service/aiproxy/relay/meta/meta.go | 26 ++-- service/aiproxy/relay/price/image.go | 10 +- service/aiproxy/relay/price/model.go | 10 +- 51 files changed, 299 insertions(+), 611 deletions(-) create mode 100644 service/aiproxy/relay/controller/handle.go diff --git a/service/aiproxy/common/gin.go b/service/aiproxy/common/gin.go index eb2c95f0123..e62b4eed63c 100644 --- a/service/aiproxy/common/gin.go +++ b/service/aiproxy/common/gin.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "net/http" + "strings" "github.com/gin-gonic/gin" json "github.com/json-iterator/go" @@ -40,6 +41,12 @@ func (l *LimitedReader) Read(p []byte) (n int, err error) { } func GetRequestBody(req *http.Request) ([]byte, error) { + contentType := req.Header.Get("Content-Type") + if contentType == "application/x-www-form-urlencoded" || + strings.HasPrefix(contentType, "multipart/form-data") { + return nil, nil + } + requestBody := req.Context().Value(RequestBodyKey{}) if requestBody != nil { return requestBody.([]byte), nil diff --git a/service/aiproxy/controller/channel-test.go b/service/aiproxy/controller/channel-test.go index 4e4a6cfe176..450145a2cc9 100644 --- a/service/aiproxy/controller/channel-test.go +++ b/service/aiproxy/controller/channel-test.go @@ -69,8 +69,8 @@ func testSingleModel(channel *model.Channel, modelName string) (*model.ChannelTe return channel.UpdateModelTest( meta.RequestAt, - meta.OriginModelName, - meta.ActualModelName, + meta.OriginModel, + meta.ActualModel, meta.Mode, time.Since(meta.RequestAt).Seconds(), success, diff --git a/service/aiproxy/controller/relay.go b/service/aiproxy/controller/relay.go index 3edac7b44a8..edc3bef017a 100644 --- a/service/aiproxy/controller/relay.go +++ b/service/aiproxy/controller/relay.go @@ -57,7 +57,7 @@ func RelayHelper(meta *meta.Meta, c *gin.Context, relayController RelayControlle if err == nil { if err := monitor.AddRequest( c.Request.Context(), - meta.OriginModelName, + meta.OriginModel, int64(meta.Channel.ID), false, ); err != nil { @@ -68,7 +68,7 @@ func RelayHelper(meta *meta.Meta, c *gin.Context, relayController RelayControlle if shouldRetry(c, err.StatusCode) { if err := monitor.AddRequest( c.Request.Context(), - meta.OriginModelName, + meta.OriginModel, int64(meta.Channel.ID), true, ); err != nil { diff --git a/service/aiproxy/middleware/auth.go b/service/aiproxy/middleware/auth.go index 3bcde94e124..b52acd57ebe 100644 --- a/service/aiproxy/middleware/auth.go +++ b/service/aiproxy/middleware/auth.go @@ -102,8 +102,8 @@ func SetLogFieldsFromMeta(m *meta.Meta, fields logrus.Fields) { SetLogRequestIDField(fields, m.RequestID) SetLogModeField(fields, m.Mode) - SetLogModelFields(fields, m.OriginModelName) - SetLogActualModelFields(fields, m.ActualModelName) + SetLogModelFields(fields, m.OriginModel) + SetLogActualModelFields(fields, m.ActualModel) if m.IsChannelTest { SetLogIsChannelTestField(fields, true) diff --git a/service/aiproxy/relay/adaptor/ali/adaptor.go b/service/aiproxy/relay/adaptor/ali/adaptor.go index a8837295c9c..76ebfa8fa68 100644 --- a/service/aiproxy/relay/adaptor/ali/adaptor.go +++ b/service/aiproxy/relay/adaptor/ali/adaptor.go @@ -76,7 +76,7 @@ func (a *Adaptor) DoRequest(meta *meta.Meta, _ *gin.Context, req *http.Request) case relaymode.AudioTranscription: return STTDoRequest(meta, req) case relaymode.ChatCompletions: - if meta.IsChannelTest && strings.Contains(meta.ActualModelName, "-ocr") { + if meta.IsChannelTest && strings.Contains(meta.ActualModel, "-ocr") { return &http.Response{ StatusCode: http.StatusOK, Body: io.NopCloser(bytes.NewReader(nil)), @@ -95,7 +95,7 @@ func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Respons case relaymode.ImagesGenerations: usage, err = ImageHandler(meta, c, resp) case relaymode.ChatCompletions: - if meta.IsChannelTest && strings.Contains(meta.ActualModelName, "-ocr") { + if meta.IsChannelTest && strings.Contains(meta.ActualModel, "-ocr") { return nil, nil } usage, err = openai.DoResponse(meta, c, resp) diff --git a/service/aiproxy/relay/adaptor/ali/embeddings.go b/service/aiproxy/relay/adaptor/ali/embeddings.go index c6863d536b4..66f4569cc18 100644 --- a/service/aiproxy/relay/adaptor/ali/embeddings.go +++ b/service/aiproxy/relay/adaptor/ali/embeddings.go @@ -21,7 +21,7 @@ func ConvertEmbeddingsRequest(meta *meta.Meta, req *http.Request) (string, http. if err != nil { return "", nil, nil, err } - reqMap["model"] = meta.ActualModelName + reqMap["model"] = meta.ActualModel input, ok := reqMap["input"] if !ok { return "", nil, nil, errors.New("input is required") @@ -56,7 +56,7 @@ func embeddingResponse2OpenAI(meta *meta.Meta, response *EmbeddingResponse) *ope openAIEmbeddingResponse := openai.EmbeddingResponse{ Object: "list", Data: make([]*openai.EmbeddingResponseItem, 0, 1), - Model: meta.OriginModelName, + Model: meta.OriginModel, Usage: response.Usage, } diff --git a/service/aiproxy/relay/adaptor/ali/image.go b/service/aiproxy/relay/adaptor/ali/image.go index 21d1574851c..4a8c12e1da2 100644 --- a/service/aiproxy/relay/adaptor/ali/image.go +++ b/service/aiproxy/relay/adaptor/ali/image.go @@ -27,7 +27,7 @@ func ConvertImageRequest(meta *meta.Meta, req *http.Request) (string, http.Heade if err != nil { return "", nil, nil, err } - request.Model = meta.ActualModelName + request.Model = meta.ActualModel var imageRequest ImageRequest imageRequest.Input.Prompt = request.Prompt diff --git a/service/aiproxy/relay/adaptor/ali/rerank.go b/service/aiproxy/relay/adaptor/ali/rerank.go index ef6d5fb6fab..ab739bc8ac9 100644 --- a/service/aiproxy/relay/adaptor/ali/rerank.go +++ b/service/aiproxy/relay/adaptor/ali/rerank.go @@ -32,7 +32,7 @@ func ConvertRerankRequest(meta *meta.Meta, req *http.Request) (string, http.Head if err != nil { return "", nil, nil, err } - reqMap["model"] = meta.ActualModelName + reqMap["model"] = meta.ActualModel reqMap["input"] = map[string]any{ "query": reqMap["query"], "documents": reqMap["documents"], diff --git a/service/aiproxy/relay/adaptor/ali/stt-realtime.go b/service/aiproxy/relay/adaptor/ali/stt-realtime.go index 1d45920f7b5..46ca83892ae 100644 --- a/service/aiproxy/relay/adaptor/ali/stt-realtime.go +++ b/service/aiproxy/relay/adaptor/ali/stt-realtime.go @@ -89,7 +89,7 @@ func ConvertSTTRequest(meta *meta.Meta, request *http.Request) (string, http.Hea TaskID: uuid.New().String(), }, Payload: STTPayload{ - Model: meta.ActualModelName, + Model: meta.ActualModel, Task: "asr", TaskGroup: "audio", Function: "recognition", diff --git a/service/aiproxy/relay/adaptor/ali/tts.go b/service/aiproxy/relay/adaptor/ali/tts.go index 3c685a081c6..642e76fe8ab 100644 --- a/service/aiproxy/relay/adaptor/ali/tts.go +++ b/service/aiproxy/relay/adaptor/ali/tts.go @@ -107,7 +107,7 @@ func ConvertTTSRequest(meta *meta.Meta, req *http.Request) (string, http.Header, if ok { sampleRate = int(sampleRateI) } - request.Model = meta.ActualModelName + request.Model = meta.ActualModel if strings.HasPrefix(request.Model, "sambert-v") { voice := request.Voice diff --git a/service/aiproxy/relay/adaptor/anthropic/adaptor.go b/service/aiproxy/relay/adaptor/anthropic/adaptor.go index 73601212833..e4f035793a5 100644 --- a/service/aiproxy/relay/adaptor/anthropic/adaptor.go +++ b/service/aiproxy/relay/adaptor/anthropic/adaptor.go @@ -37,7 +37,7 @@ func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, c *gin.Context, req *http. // https://x.com/alexalbert__/status/1812921642143900036 // claude-3-5-sonnet can support 8k context - if strings.HasPrefix(meta.ActualModelName, "claude-3-5-sonnet") { + if strings.HasPrefix(meta.ActualModel, "claude-3-5-sonnet") { req.Header.Set("Anthropic-Beta", "max-tokens-3-5-sonnet-2024-07-15") } diff --git a/service/aiproxy/relay/adaptor/anthropic/main.go b/service/aiproxy/relay/adaptor/anthropic/main.go index 169a6e0df0d..50fe177a9ab 100644 --- a/service/aiproxy/relay/adaptor/anthropic/main.go +++ b/service/aiproxy/relay/adaptor/anthropic/main.go @@ -44,7 +44,7 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (*Request, error) { if err != nil { return nil, err } - textRequest.Model = meta.ActualModelName + textRequest.Model = meta.ActualModel meta.Set("stream", textRequest.Stream) claudeTools := make([]Tool, 0, len(textRequest.Tools)) @@ -372,7 +372,7 @@ func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Error }, nil } fullTextResponse := ResponseClaude2OpenAI(&claudeResponse) - fullTextResponse.Model = meta.OriginModelName + fullTextResponse.Model = meta.OriginModel usage := model.Usage{ PromptTokens: claudeResponse.Usage.InputTokens, CompletionTokens: claudeResponse.Usage.OutputTokens, diff --git a/service/aiproxy/relay/adaptor/aws/adaptor.go b/service/aiproxy/relay/adaptor/aws/adaptor.go index cb79e41cab1..2c53495b039 100644 --- a/service/aiproxy/relay/adaptor/aws/adaptor.go +++ b/service/aiproxy/relay/adaptor/aws/adaptor.go @@ -18,7 +18,7 @@ var _ adaptor.Adaptor = new(Adaptor) type Adaptor struct{} func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { - adaptor := GetAdaptor(meta.ActualModelName) + adaptor := GetAdaptor(meta.ActualModel) if adaptor == nil { return "", nil, nil, errors.New("adaptor not found") } diff --git a/service/aiproxy/relay/adaptor/aws/claude/main.go b/service/aiproxy/relay/adaptor/aws/claude/main.go index 7c2106981df..7a9e19f1d61 100644 --- a/service/aiproxy/relay/adaptor/aws/claude/main.go +++ b/service/aiproxy/relay/adaptor/aws/claude/main.go @@ -93,7 +93,7 @@ func awsModelID(requestModel string) (string, error) { } func Handler(meta *meta.Meta, c *gin.Context) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) { - awsModelID, err := awsModelID(meta.ActualModelName) + awsModelID, err := awsModelID(meta.ActualModel) if err != nil { return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil } @@ -138,7 +138,7 @@ func Handler(meta *meta.Meta, c *gin.Context) (*relaymodel.ErrorWithStatusCode, } openaiResp := anthropic.ResponseClaude2OpenAI(claudeResponse) - openaiResp.Model = meta.OriginModelName + openaiResp.Model = meta.OriginModel usage := relaymodel.Usage{ PromptTokens: claudeResponse.Usage.InputTokens, CompletionTokens: claudeResponse.Usage.OutputTokens, @@ -153,8 +153,8 @@ func Handler(meta *meta.Meta, c *gin.Context) (*relaymodel.ErrorWithStatusCode, func StreamHandler(meta *meta.Meta, c *gin.Context) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) { log := middleware.GetLogger(c) createdTime := time.Now().Unix() - originModelName := meta.OriginModelName - awsModelID, err := awsModelID(meta.ActualModelName) + originModelName := meta.OriginModel + awsModelID, err := awsModelID(meta.ActualModel) if err != nil { return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil } diff --git a/service/aiproxy/relay/adaptor/aws/llama3/adapter.go b/service/aiproxy/relay/adaptor/aws/llama3/adapter.go index bca6e0d8561..ecfd2ee18cb 100644 --- a/service/aiproxy/relay/adaptor/aws/llama3/adapter.go +++ b/service/aiproxy/relay/adaptor/aws/llama3/adapter.go @@ -24,7 +24,7 @@ func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, ht if err != nil { return "", nil, nil, err } - request.Model = meta.ActualModelName + request.Model = meta.ActualModel meta.Set("stream", request.Stream) llamaReq := ConvertRequest(request) meta.Set(ConvertedRequest, llamaReq) diff --git a/service/aiproxy/relay/adaptor/aws/llama3/main.go b/service/aiproxy/relay/adaptor/aws/llama3/main.go index 436aac4fdfb..d353096178f 100644 --- a/service/aiproxy/relay/adaptor/aws/llama3/main.go +++ b/service/aiproxy/relay/adaptor/aws/llama3/main.go @@ -94,7 +94,7 @@ func ConvertRequest(textRequest *relaymodel.GeneralOpenAIRequest) *Request { } func Handler(meta *meta.Meta, c *gin.Context) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) { - awsModelID, err := awsModelID(meta.ActualModelName) + awsModelID, err := awsModelID(meta.ActualModel) if err != nil { return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil } @@ -132,7 +132,7 @@ func Handler(meta *meta.Meta, c *gin.Context) (*relaymodel.ErrorWithStatusCode, } openaiResp := ResponseLlama2OpenAI(&llamaResponse) - openaiResp.Model = meta.OriginModelName + openaiResp.Model = meta.OriginModel usage := relaymodel.Usage{ PromptTokens: llamaResponse.PromptTokenCount, CompletionTokens: llamaResponse.GenerationTokenCount, @@ -171,7 +171,7 @@ func StreamHandler(meta *meta.Meta, c *gin.Context) (*relaymodel.ErrorWithStatus log := middleware.GetLogger(c) createdTime := time.Now().Unix() - awsModelID, err := awsModelID(meta.ActualModelName) + awsModelID, err := awsModelID(meta.ActualModel) if err != nil { return utils.WrapErr(errors.Wrap(err, "awsModelID")), nil } @@ -231,7 +231,7 @@ func StreamHandler(meta *meta.Meta, c *gin.Context) (*relaymodel.ErrorWithStatus } response := StreamResponseLlama2OpenAI(&llamaResp) response.ID = "chatcmpl-" + random.GetUUID() - response.Model = meta.OriginModelName + response.Model = meta.OriginModel response.Created = createdTime err = render.ObjectData(c, response) if err != nil { diff --git a/service/aiproxy/relay/adaptor/azure/constants.go b/service/aiproxy/relay/adaptor/azure/constants.go index 19bc3de87bf..257f94a9411 100644 --- a/service/aiproxy/relay/adaptor/azure/constants.go +++ b/service/aiproxy/relay/adaptor/azure/constants.go @@ -20,7 +20,7 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { if err != nil { return "", err } - model := strings.ReplaceAll(meta.ActualModelName, ".", "") + model := strings.ReplaceAll(meta.ActualModel, ".", "") switch meta.Mode { case relaymode.ImagesGenerations: // https://learn.microsoft.com/en-us/azure/ai-services/openai/dall-e-quickstart?tabs=dalle3%2Ccommand-line&pivots=rest-api diff --git a/service/aiproxy/relay/adaptor/baidu/adaptor.go b/service/aiproxy/relay/adaptor/baidu/adaptor.go index a68633c6363..0605eef7cc7 100644 --- a/service/aiproxy/relay/adaptor/baidu/adaptor.go +++ b/service/aiproxy/relay/adaptor/baidu/adaptor.go @@ -64,9 +64,9 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { pathSuffix = "text2image" } - modelEndpoint, ok := modelEndpointMap[meta.ActualModelName] + modelEndpoint, ok := modelEndpointMap[meta.ActualModel] if !ok { - modelEndpoint = strings.ToLower(meta.ActualModelName) + modelEndpoint = strings.ToLower(meta.ActualModel) } // Construct full URL diff --git a/service/aiproxy/relay/adaptor/baidu/embeddings.go b/service/aiproxy/relay/adaptor/baidu/embeddings.go index 9071c871499..c07582d47ee 100644 --- a/service/aiproxy/relay/adaptor/baidu/embeddings.go +++ b/service/aiproxy/relay/adaptor/baidu/embeddings.go @@ -41,7 +41,7 @@ func EmbeddingsHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*r if err != nil { return &baiduResponse.Usage, openai.ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError) } - respMap["model"] = meta.OriginModelName + respMap["model"] = meta.OriginModel respMap["object"] = "list" data, err := json.Marshal(respMap) diff --git a/service/aiproxy/relay/adaptor/baidu/main.go b/service/aiproxy/relay/adaptor/baidu/main.go index 46d691f9298..34257469170 100644 --- a/service/aiproxy/relay/adaptor/baidu/main.go +++ b/service/aiproxy/relay/adaptor/baidu/main.go @@ -46,7 +46,7 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io if err != nil { return "", nil, nil, err } - request.Model = meta.ActualModelName + request.Model = meta.ActualModel baiduRequest := ChatRequest{ Messages: request.Messages, Temperature: request.Temperature, @@ -117,7 +117,7 @@ func streamResponseBaidu2OpenAI(meta *meta.Meta, baiduResponse *ChatStreamRespon ID: baiduResponse.ID, Object: "chat.completion.chunk", Created: baiduResponse.Created, - Model: meta.OriginModelName, + Model: meta.OriginModel, Choices: []*openai.ChatCompletionsStreamResponseChoice{&choice}, Usage: baiduResponse.Usage, } @@ -185,7 +185,7 @@ func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage return nil, openai.ErrorWrapperWithMessage(baiduResponse.Error.ErrorMsg, "baidu_error_"+strconv.Itoa(baiduResponse.Error.ErrorCode), http.StatusInternalServerError) } fullTextResponse := responseBaidu2OpenAI(&baiduResponse) - fullTextResponse.Model = meta.OriginModelName + fullTextResponse.Model = meta.OriginModel jsonResponse, err := json.Marshal(fullTextResponse) if err != nil { return nil, openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError) diff --git a/service/aiproxy/relay/adaptor/baiduv2/adaptor.go b/service/aiproxy/relay/adaptor/baiduv2/adaptor.go index 9f844488d9d..d0fb3ba0006 100644 --- a/service/aiproxy/relay/adaptor/baiduv2/adaptor.go +++ b/service/aiproxy/relay/adaptor/baiduv2/adaptor.go @@ -61,11 +61,11 @@ func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http. func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { switch meta.Mode { case relaymode.ChatCompletions: - actModel := meta.ActualModelName + actModel := meta.ActualModel v2Model := toV2ModelName(actModel) if v2Model != actModel { - meta.ActualModelName = v2Model - defer func() { meta.ActualModelName = actModel }() + meta.ActualModel = v2Model + defer func() { meta.ActualModel = actModel }() } return openai.ConvertRequest(meta, req) default: diff --git a/service/aiproxy/relay/adaptor/cloudflare/adaptor.go b/service/aiproxy/relay/adaptor/cloudflare/adaptor.go index cf7e84b0704..7680155fa76 100644 --- a/service/aiproxy/relay/adaptor/cloudflare/adaptor.go +++ b/service/aiproxy/relay/adaptor/cloudflare/adaptor.go @@ -43,9 +43,9 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { return urlPrefix + "/v1/embeddings", nil default: if isAIGateWay { - return fmt.Sprintf("%s/%s", urlPrefix, meta.ActualModelName), nil + return fmt.Sprintf("%s/%s", urlPrefix, meta.ActualModel), nil } - return fmt.Sprintf("%s/run/%s", urlPrefix, meta.ActualModelName), nil + return fmt.Sprintf("%s/run/%s", urlPrefix, meta.ActualModel), nil } } diff --git a/service/aiproxy/relay/adaptor/cohere/adaptor.go b/service/aiproxy/relay/adaptor/cohere/adaptor.go index 7b1ddd3f967..df545466358 100644 --- a/service/aiproxy/relay/adaptor/cohere/adaptor.go +++ b/service/aiproxy/relay/adaptor/cohere/adaptor.go @@ -38,7 +38,7 @@ func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, ht if err != nil { return "", nil, nil, err } - request.Model = meta.ActualModelName + request.Model = meta.ActualModel requestBody := ConvertRequest(request) if requestBody == nil { return "", nil, nil, errors.New("request body is nil") @@ -62,7 +62,7 @@ func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Respons if utils.IsStreamResponse(resp) { err, usage = StreamHandler(c, resp) } else { - err, usage = Handler(c, resp, meta.InputTokens, meta.ActualModelName) + err, usage = Handler(c, resp, meta.InputTokens, meta.ActualModel) } } return diff --git a/service/aiproxy/relay/adaptor/coze/adaptor.go b/service/aiproxy/relay/adaptor/coze/adaptor.go index b1bf41a138d..528c5e62522 100644 --- a/service/aiproxy/relay/adaptor/coze/adaptor.go +++ b/service/aiproxy/relay/adaptor/coze/adaptor.go @@ -51,11 +51,11 @@ func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, ht return "", nil, nil, err } request.User = userID - request.Model = meta.ActualModelName + request.Model = meta.ActualModel cozeRequest := Request{ Stream: request.Stream, User: request.User, - BotID: strings.TrimPrefix(meta.ActualModelName, "bot-"), + BotID: strings.TrimPrefix(meta.ActualModel, "bot-"), } for i, message := range request.Messages { if i == len(request.Messages)-1 { @@ -84,10 +84,10 @@ func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Respons if utils.IsStreamResponse(resp) { err, responseText = StreamHandler(c, resp) } else { - err, responseText = Handler(c, resp, meta.InputTokens, meta.ActualModelName) + err, responseText = Handler(c, resp, meta.InputTokens, meta.ActualModel) } if responseText != nil { - usage = openai.ResponseText2Usage(*responseText, meta.ActualModelName, meta.InputTokens) + usage = openai.ResponseText2Usage(*responseText, meta.ActualModel, meta.InputTokens) } else { usage = &relaymodel.Usage{} } diff --git a/service/aiproxy/relay/adaptor/doubao/main.go b/service/aiproxy/relay/adaptor/doubao/main.go index 9d4b7eb42da..fca376cd62f 100644 --- a/service/aiproxy/relay/adaptor/doubao/main.go +++ b/service/aiproxy/relay/adaptor/doubao/main.go @@ -17,7 +17,7 @@ func GetRequestURL(meta *meta.Meta) (string, error) { } switch meta.Mode { case relaymode.ChatCompletions: - if strings.HasPrefix(meta.ActualModelName, "bot-") { + if strings.HasPrefix(meta.ActualModel, "bot-") { return u + "/api/v3/bots/chat/completions", nil } return u + "/api/v3/chat/completions", nil diff --git a/service/aiproxy/relay/adaptor/gemini/adaptor.go b/service/aiproxy/relay/adaptor/gemini/adaptor.go index 556eaf28dac..d5cb42928f4 100644 --- a/service/aiproxy/relay/adaptor/gemini/adaptor.go +++ b/service/aiproxy/relay/adaptor/gemini/adaptor.go @@ -27,10 +27,10 @@ func getRequestURL(meta *meta.Meta, action string) string { u = baseURL } version := "v1beta" - if _, ok := v1ModelMap[meta.ActualModelName]; ok { + if _, ok := v1ModelMap[meta.ActualModel]; ok { version = "v1" } - return fmt.Sprintf("%s/%s/models/%s:%s", u, version, meta.ActualModelName, action) + return fmt.Sprintf("%s/%s/models/%s:%s", u, version, meta.ActualModel, action) } func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { diff --git a/service/aiproxy/relay/adaptor/gemini/embeddings.go b/service/aiproxy/relay/adaptor/gemini/embeddings.go index 54ff391e4bd..403284e1db3 100644 --- a/service/aiproxy/relay/adaptor/gemini/embeddings.go +++ b/service/aiproxy/relay/adaptor/gemini/embeddings.go @@ -18,7 +18,7 @@ func ConvertEmbeddingRequest(meta *meta.Meta, req *http.Request) (string, http.H if err != nil { return "", nil, nil, err } - request.Model = meta.ActualModelName + request.Model = meta.ActualModel inputs := request.ParseInput() requests := make([]EmbeddingRequest, len(inputs)) diff --git a/service/aiproxy/relay/adaptor/gemini/main.go b/service/aiproxy/relay/adaptor/gemini/main.go index 77d9968da39..6922660c7f2 100644 --- a/service/aiproxy/relay/adaptor/gemini/main.go +++ b/service/aiproxy/relay/adaptor/gemini/main.go @@ -190,7 +190,7 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io return "", nil, nil, err } - textRequest.Model = meta.ActualModelName + textRequest.Model = meta.ActualModel meta.Set("stream", textRequest.Stream) systemContent, contents, err := buildContents(req.Context(), textRequest) @@ -317,7 +317,7 @@ func getToolCalls(candidate *ChatCandidate) []*model.Tool { func responseGeminiChat2OpenAI(meta *meta.Meta, response *ChatResponse) *openai.TextResponse { fullTextResponse := openai.TextResponse{ ID: "chatcmpl-" + random.GetUUID(), - Model: meta.OriginModelName, + Model: meta.OriginModel, Object: "chat.completion", Created: time.Now().Unix(), Choices: make([]*openai.TextResponseChoice, 0, len(response.Candidates)), @@ -356,7 +356,7 @@ func streamResponseGeminiChat2OpenAI(meta *meta.Meta, geminiResponse *ChatRespon response := &openai.ChatCompletionsStreamResponse{ ID: "chatcmpl-" + random.GetUUID(), Created: time.Now().Unix(), - Model: meta.OriginModelName, + Model: meta.OriginModel, Object: "chat.completion.chunk", Choices: make([]*openai.ChatCompletionsStreamResponseChoice, 0, len(geminiResponse.Candidates)), } @@ -444,7 +444,7 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model tokenCount, err := CountTokens(c.Request.Context(), meta, respContent) if err != nil { log.Error("count tokens failed: " + err.Error()) - usage.CompletionTokens = openai.CountTokenText(responseText.String(), meta.ActualModelName) + usage.CompletionTokens = openai.CountTokenText(responseText.String(), meta.ActualModel) } else { usage.CompletionTokens = tokenCount } @@ -466,7 +466,7 @@ func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage return nil, openai.ErrorWrapperWithMessage("No candidates returned", "gemini_error", resp.StatusCode) } fullTextResponse := responseGeminiChat2OpenAI(meta, &geminiResponse) - fullTextResponse.Model = meta.OriginModelName + fullTextResponse.Model = meta.OriginModel respContent := []*ChatContent{} for _, candidate := range geminiResponse.Candidates { respContent = append(respContent, &candidate.Content) @@ -478,7 +478,7 @@ func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage tokenCount, err := CountTokens(c.Request.Context(), meta, respContent) if err != nil { log.Error("count tokens failed: " + err.Error()) - usage.CompletionTokens = openai.CountTokenText(geminiResponse.GetResponseText(), meta.ActualModelName) + usage.CompletionTokens = openai.CountTokenText(geminiResponse.GetResponseText(), meta.ActualModel) } else { usage.CompletionTokens = tokenCount } diff --git a/service/aiproxy/relay/adaptor/minimax/tts.go b/service/aiproxy/relay/adaptor/minimax/tts.go index 2dd69d9ebb7..8663758b387 100644 --- a/service/aiproxy/relay/adaptor/minimax/tts.go +++ b/service/aiproxy/relay/adaptor/minimax/tts.go @@ -25,7 +25,7 @@ func ConvertTTSRequest(meta *meta.Meta, req *http.Request) (string, http.Header, return "", nil, nil, err } - reqMap["model"] = meta.ActualModelName + reqMap["model"] = meta.ActualModel reqMap["text"] = reqMap["input"] delete(reqMap, "input") diff --git a/service/aiproxy/relay/adaptor/ollama/main.go b/service/aiproxy/relay/adaptor/ollama/main.go index 0b16a837d11..f49c868d3f7 100644 --- a/service/aiproxy/relay/adaptor/ollama/main.go +++ b/service/aiproxy/relay/adaptor/ollama/main.go @@ -29,7 +29,7 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io if err != nil { return "", nil, nil, err } - request.Model = meta.ActualModelName + request.Model = meta.ActualModel ollamaRequest := ChatRequest{ Model: request.Model, @@ -180,7 +180,7 @@ func ConvertEmbeddingRequest(meta *meta.Meta, req *http.Request) (string, http.H if err != nil { return "", nil, nil, err } - request.Model = meta.ActualModelName + request.Model = meta.ActualModel data, err := json.Marshal(&EmbeddingRequest{ Model: request.Model, Input: request.ParseInput(), diff --git a/service/aiproxy/relay/adaptor/openai/adaptor.go b/service/aiproxy/relay/adaptor/openai/adaptor.go index cb2fbee34b7..1d0155ab393 100644 --- a/service/aiproxy/relay/adaptor/openai/adaptor.go +++ b/service/aiproxy/relay/adaptor/openai/adaptor.go @@ -141,7 +141,7 @@ func ConvertTextRequest(meta *meta.Meta, req *http.Request) (string, http.Header } } - reqMap["model"] = meta.ActualModelName + reqMap["model"] = meta.ActualModel jsonData, err := json.Marshal(reqMap) if err != nil { return "", nil, nil, err diff --git a/service/aiproxy/relay/adaptor/openai/embeddings.go b/service/aiproxy/relay/adaptor/openai/embeddings.go index b368fe87597..f1851aa4a8c 100644 --- a/service/aiproxy/relay/adaptor/openai/embeddings.go +++ b/service/aiproxy/relay/adaptor/openai/embeddings.go @@ -20,7 +20,7 @@ func ConvertEmbeddingsRequest(meta *meta.Meta, req *http.Request) (string, http. return "", nil, nil, err } - reqMap["model"] = meta.ActualModelName + reqMap["model"] = meta.ActualModel if meta.GetBool(MetaEmbeddingsPatchInputToSlices) { switch v := reqMap["input"].(type) { diff --git a/service/aiproxy/relay/adaptor/openai/image.go b/service/aiproxy/relay/adaptor/openai/image.go index 1614421987f..fa09364cded 100644 --- a/service/aiproxy/relay/adaptor/openai/image.go +++ b/service/aiproxy/relay/adaptor/openai/image.go @@ -22,7 +22,7 @@ func ConvertImageRequest(meta *meta.Meta, req *http.Request) (string, http.Heade } meta.Set(MetaResponseFormat, reqMap["response_format"]) - reqMap["model"] = meta.ActualModelName + reqMap["model"] = meta.ActualModel jsonData, err := json.Marshal(reqMap) if err != nil { return "", nil, nil, err diff --git a/service/aiproxy/relay/adaptor/openai/main.go b/service/aiproxy/relay/adaptor/openai/main.go index e466a8aefb7..0c91b314f4c 100644 --- a/service/aiproxy/relay/adaptor/openai/main.go +++ b/service/aiproxy/relay/adaptor/openai/main.go @@ -82,8 +82,8 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model log.Error("error unmarshalling stream response: " + err.Error()) continue } - if _, ok := respMap["model"]; ok && meta.OriginModelName != "" { - respMap["model"] = meta.OriginModelName + if _, ok := respMap["model"]; ok && meta.OriginModel != "" { + respMap["model"] = meta.OriginModel } err = render.ObjectData(c, respMap) if err != nil { @@ -111,7 +111,7 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model render.Done(c) if usage == nil || (usage.TotalTokens == 0 && responseText != "") { - usage = ResponseText2Usage(responseText, meta.ActualModelName, meta.InputTokens) + usage = ResponseText2Usage(responseText, meta.ActualModel, meta.InputTokens) } if usage.TotalTokens != 0 && usage.PromptTokens == 0 { // some channels don't return prompt tokens & completion tokens @@ -143,7 +143,7 @@ func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage if textResponse.Usage.TotalTokens == 0 || (textResponse.Usage.PromptTokens == 0 && textResponse.Usage.CompletionTokens == 0) { completionTokens := 0 for _, choice := range textResponse.Choices { - completionTokens += CountTokenText(choice.Message.StringContent(), meta.ActualModelName) + completionTokens += CountTokenText(choice.Message.StringContent(), meta.ActualModel) } textResponse.Usage = model.Usage{ PromptTokens: meta.InputTokens, @@ -158,8 +158,8 @@ func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage return &textResponse.Usage, ErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError) } - if _, ok := respMap["model"]; ok && meta.OriginModelName != "" { - respMap["model"] = meta.OriginModelName + if _, ok := respMap["model"]; ok && meta.OriginModel != "" { + respMap["model"] = meta.OriginModel } newData, err := stdjson.Marshal(respMap) diff --git a/service/aiproxy/relay/adaptor/openai/moderations.go b/service/aiproxy/relay/adaptor/openai/moderations.go index b462a198193..14d2c4919ef 100644 --- a/service/aiproxy/relay/adaptor/openai/moderations.go +++ b/service/aiproxy/relay/adaptor/openai/moderations.go @@ -36,8 +36,8 @@ func ModerationsHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (* return nil, ErrorWrapperWithMessage(errorResp.Error.Message, errorResp.Error.Code, http.StatusBadRequest) } - if _, ok := respMap["model"]; ok && meta.OriginModelName != "" { - respMap["model"] = meta.OriginModelName + if _, ok := respMap["model"]; ok && meta.OriginModel != "" { + respMap["model"] = meta.OriginModel } usage := &model.Usage{ diff --git a/service/aiproxy/relay/adaptor/openai/rerank.go b/service/aiproxy/relay/adaptor/openai/rerank.go index f4774c7d847..653415a68f0 100644 --- a/service/aiproxy/relay/adaptor/openai/rerank.go +++ b/service/aiproxy/relay/adaptor/openai/rerank.go @@ -19,7 +19,7 @@ func ConvertRerankRequest(meta *meta.Meta, req *http.Request) (string, http.Head if err != nil { return "", nil, nil, err } - reqMap["model"] = meta.ActualModelName + reqMap["model"] = meta.ActualModel jsonData, err := json.Marshal(reqMap) if err != nil { return "", nil, nil, err diff --git a/service/aiproxy/relay/adaptor/openai/stt.go b/service/aiproxy/relay/adaptor/openai/stt.go index c8f070bd18f..ab32ced6c8e 100644 --- a/service/aiproxy/relay/adaptor/openai/stt.go +++ b/service/aiproxy/relay/adaptor/openai/stt.go @@ -32,7 +32,7 @@ func ConvertSTTRequest(meta *meta.Meta, request *http.Request) (string, http.Hea } value := values[0] if key == "model" { - err = multipartWriter.WriteField(key, meta.ActualModelName) + err = multipartWriter.WriteField(key, meta.ActualModel) if err != nil { return "", nil, nil, err } @@ -113,7 +113,7 @@ func STTHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Us if err != nil { return nil, ErrorWrapper(err, "get_text_from_body_err", http.StatusInternalServerError) } - completionTokens := CountTokenText(text, meta.ActualModelName) + completionTokens := CountTokenText(text, meta.ActualModel) for k, v := range resp.Header { c.Writer.Header().Set(k, v[0]) diff --git a/service/aiproxy/relay/adaptor/openai/tts.go b/service/aiproxy/relay/adaptor/openai/tts.go index 08f3fa07c20..bdce9258938 100644 --- a/service/aiproxy/relay/adaptor/openai/tts.go +++ b/service/aiproxy/relay/adaptor/openai/tts.go @@ -28,7 +28,7 @@ func ConvertTTSRequest(meta *meta.Meta, req *http.Request) (string, http.Header, if err != nil { return "", nil, nil, err } - reqMap["model"] = meta.ActualModelName + reqMap["model"] = meta.ActualModel jsonData, err := json.Marshal(reqMap) if err != nil { return "", nil, nil, err diff --git a/service/aiproxy/relay/adaptor/siliconflow/image.go b/service/aiproxy/relay/adaptor/siliconflow/image.go index 08853c733a4..1992fccc6ba 100644 --- a/service/aiproxy/relay/adaptor/siliconflow/image.go +++ b/service/aiproxy/relay/adaptor/siliconflow/image.go @@ -33,7 +33,7 @@ func ConvertImageRequest(meta *meta.Meta, request *http.Request) (http.Header, i meta.Set(openai.MetaResponseFormat, reqMap["response_format"]) - reqMap["model"] = meta.ActualModelName + reqMap["model"] = meta.ActualModel reqMap["batch_size"] = reqMap["n"] delete(reqMap, "n") if _, ok := reqMap["steps"]; ok { diff --git a/service/aiproxy/relay/adaptor/vertexai/adaptor.go b/service/aiproxy/relay/adaptor/vertexai/adaptor.go index 737c330a643..075d532cc18 100644 --- a/service/aiproxy/relay/adaptor/vertexai/adaptor.go +++ b/service/aiproxy/relay/adaptor/vertexai/adaptor.go @@ -30,7 +30,7 @@ type Config struct { } func (a *Adaptor) ConvertRequest(meta *meta.Meta, request *http.Request) (string, http.Header, io.Reader, error) { - adaptor := GetAdaptor(meta.ActualModelName) + adaptor := GetAdaptor(meta.ActualModel) if adaptor == nil { return "", nil, nil, errors.New("adaptor not found") } @@ -39,9 +39,9 @@ func (a *Adaptor) ConvertRequest(meta *meta.Meta, request *http.Request) (string } func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode) { - adaptor := GetAdaptor(meta.ActualModelName) + adaptor := GetAdaptor(meta.ActualModel) if adaptor == nil { - return nil, openai.ErrorWrapperWithMessage(meta.ActualModelName+" adaptor not found", "adaptor_not_found", http.StatusInternalServerError) + return nil, openai.ErrorWrapperWithMessage(meta.ActualModel+" adaptor not found", "adaptor_not_found", http.StatusInternalServerError) } return adaptor.DoResponse(meta, c, resp) } @@ -56,7 +56,7 @@ func (a *Adaptor) GetChannelName() string { func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { var suffix string - if strings.HasPrefix(meta.ActualModelName, "gemini") { + if strings.HasPrefix(meta.ActualModel, "gemini") { if meta.GetBool("stream") { suffix = "streamGenerateContent?alt=sse" } else { @@ -81,7 +81,7 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { meta.Channel.BaseURL, config.ProjectID, config.Region, - meta.ActualModelName, + meta.ActualModel, suffix, ), nil } @@ -90,7 +90,7 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { config.Region, config.ProjectID, config.Region, - meta.ActualModelName, + meta.ActualModel, suffix, ), nil } diff --git a/service/aiproxy/relay/adaptor/xunfei/adaptor.go b/service/aiproxy/relay/adaptor/xunfei/adaptor.go index faa51f5d4ca..0309a92f258 100644 --- a/service/aiproxy/relay/adaptor/xunfei/adaptor.go +++ b/service/aiproxy/relay/adaptor/xunfei/adaptor.go @@ -23,14 +23,14 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { } func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { - domain, err := getXunfeiDomain(meta.ActualModelName) + domain, err := getXunfeiDomain(meta.ActualModel) if err != nil { return "", nil, nil, err } - model := meta.ActualModelName - meta.ActualModelName = domain + model := meta.ActualModel + meta.ActualModel = domain defer func() { - meta.ActualModelName = model + meta.ActualModel = model }() method, h, body, err := a.Adaptor.ConvertRequest(meta, req) if err != nil { diff --git a/service/aiproxy/relay/controller/handle.go b/service/aiproxy/relay/controller/handle.go new file mode 100644 index 00000000000..e46e7c8b991 --- /dev/null +++ b/service/aiproxy/relay/controller/handle.go @@ -0,0 +1,111 @@ +package controller + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/gin-gonic/gin" + "github.com/labring/sealos/service/aiproxy/common" + "github.com/labring/sealos/service/aiproxy/common/conv" + "github.com/labring/sealos/service/aiproxy/middleware" + "github.com/labring/sealos/service/aiproxy/model" + "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" + "github.com/labring/sealos/service/aiproxy/relay/channeltype" + "github.com/labring/sealos/service/aiproxy/relay/meta" + relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" +) + +func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBalanceReq, error)) *relaymodel.ErrorWithStatusCode { + log := middleware.GetLogger(c) + ctx := c.Request.Context() + + // 1. Get adaptor + adaptor, ok := channeltype.GetAdaptor(meta.Channel.Type) + if !ok { + log.Errorf("invalid (%s[%d]) channel type: %d", meta.Channel.Name, meta.Channel.ID, meta.Channel.Type) + return openai.ErrorWrapperWithMessage("invalid channel error", "invalid_channel_type", http.StatusInternalServerError) + } + + // 2. Get group balance + groupRemainBalance, postGroupConsumer, err := getGroupBalance(ctx, meta) + if err != nil { + log.Errorf("get group (%s) balance failed: %v", meta.Group.ID, err) + return openai.ErrorWrapper( + fmt.Errorf("get group (%s) balance failed", meta.Group.ID), + "get_group_quota_failed", + http.StatusInternalServerError, + ) + } + + // 3. Pre-process request + preCheckReq, err := preProcess() + if err != nil { + log.Errorf("pre-process request failed: %s", err.Error()) + var detail *model.RequestDetail + body, bodyErr := common.GetRequestBody(c.Request) + if bodyErr != nil { + log.Errorf("get request body failed: %s", bodyErr.Error()) + } else { + detail = &model.RequestDetail{ + RequestBody: conv.BytesToString(body), + } + } + ConsumeWaitGroup.Add(1) + go postConsumeAmount(context.Background(), + &ConsumeWaitGroup, + nil, + http.StatusBadRequest, + nil, + meta, + 0, + 0, + err.Error(), + detail, + ) + return openai.ErrorWrapper(err, "invalid_request", http.StatusBadRequest) + } + + // 4. Pre-check balance + ok = checkGroupBalance(preCheckReq, meta, groupRemainBalance) + if !ok { + return openai.ErrorWrapper(errors.New("group balance is not enough"), "insufficient_group_balance", http.StatusForbidden) + } + + meta.InputTokens = preCheckReq.InputTokens + + // 5. Do request + usage, detail, respErr := DoHelper(adaptor, c, meta) + if respErr != nil { + ConsumeWaitGroup.Add(1) + go postConsumeAmount(context.Background(), + &ConsumeWaitGroup, + postGroupConsumer, + respErr.StatusCode, + usage, + meta, + preCheckReq.InputPrice, + preCheckReq.OutputPrice, + respErr.String(), + detail, + ) + return respErr + } + + // 6. Post consume + ConsumeWaitGroup.Add(1) + go postConsumeAmount(context.Background(), + &ConsumeWaitGroup, + postGroupConsumer, + http.StatusOK, + usage, + meta, + preCheckReq.InputPrice, + preCheckReq.OutputPrice, + "", + nil, + ) + + return nil +} diff --git a/service/aiproxy/relay/controller/helper.go b/service/aiproxy/relay/controller/helper.go index 9c457a2defa..eb156d5d7c5 100644 --- a/service/aiproxy/relay/controller/helper.go +++ b/service/aiproxy/relay/controller/helper.go @@ -24,6 +24,7 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/relaymode" "github.com/labring/sealos/service/aiproxy/relay/utils" "github.com/shopspring/decimal" + log "github.com/sirupsen/logrus" ) var ConsumeWaitGroup sync.WaitGroup @@ -31,11 +32,12 @@ var ConsumeWaitGroup sync.WaitGroup type PreCheckGroupBalanceReq struct { InputTokens int MaxTokens int - Price float64 + InputPrice float64 + OutputPrice float64 } func getPreConsumedAmount(req *PreCheckGroupBalanceReq) float64 { - if req == nil || req.Price == 0 || (req.InputTokens == 0 && req.MaxTokens == 0) { + if req == nil || req.InputPrice == 0 || (req.InputTokens == 0 && req.MaxTokens == 0) { return 0 } preConsumedTokens := int64(req.InputTokens) @@ -44,15 +46,18 @@ func getPreConsumedAmount(req *PreCheckGroupBalanceReq) float64 { } return decimal. NewFromInt(preConsumedTokens). - Mul(decimal.NewFromFloat(req.Price)). + Mul(decimal.NewFromFloat(req.InputPrice)). Div(decimal.NewFromInt(billingprice.PriceUnit)). InexactFloat64() } -func preCheckGroupBalance(req *PreCheckGroupBalanceReq, meta *meta.Meta, groupRemainBalance float64) bool { +func checkGroupBalance(req *PreCheckGroupBalanceReq, meta *meta.Meta, groupRemainBalance float64) bool { if meta.IsChannelTest { return true } + if groupRemainBalance <= 0 { + return false + } preConsumedAmount := getPreConsumedAmount(req) @@ -79,7 +84,13 @@ func postConsumeAmount( content string, requestDetail *model.RequestDetail, ) { - defer consumeWaitGroup.Done() + defer func() { + consumeWaitGroup.Done() + if r := recover(); r != nil { + log.Errorf("panic in post consume amount: %v", r) + } + }() + if meta.IsChannelTest { return } @@ -94,7 +105,7 @@ func postConsumeAmount( meta.Channel.ID, 0, 0, - meta.OriginModelName, + meta.OriginModel, meta.Token.ID, meta.Token.Name, 0, @@ -133,7 +144,7 @@ func postConsumeAmount( meta.RequestAt, meta.Group.ID, meta.Token.Name, - meta.OriginModelName, + meta.OriginModel, err.Error(), amount, meta.Token.ID, @@ -154,7 +165,7 @@ func postConsumeAmount( meta.Channel.ID, promptTokens, completionTokens, - meta.OriginModelName, + meta.OriginModel, meta.Token.ID, meta.Token.Name, amount, diff --git a/service/aiproxy/relay/controller/image.go b/service/aiproxy/relay/controller/image.go index d485adc3705..2ff31c2b730 100644 --- a/service/aiproxy/relay/controller/image.go +++ b/service/aiproxy/relay/controller/image.go @@ -1,19 +1,10 @@ package controller import ( - "context" "errors" "fmt" - "net/http" "github.com/gin-gonic/gin" - "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/config" - "github.com/labring/sealos/service/aiproxy/common/conv" - "github.com/labring/sealos/service/aiproxy/middleware" - "github.com/labring/sealos/service/aiproxy/model" - "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/channeltype" "github.com/labring/sealos/service/aiproxy/relay/meta" relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" billingprice "github.com/labring/sealos/service/aiproxy/relay/price" @@ -40,8 +31,8 @@ func getImageRequest(c *gin.Context) (*relaymodel.ImageRequest, error) { return imageRequest, nil } -func getImageCostPrice(modelName string, reqModel string, size string) (float64, error) { - imageCostPrice, ok := billingprice.GetImageSizePrice(modelName, reqModel, size) +func getImageCostPrice(model string, size string) (float64, error) { + imageCostPrice, ok := billingprice.GetImageSizePrice(model, size) if !ok { return 0, fmt.Errorf("invalid image size: %s", size) } @@ -49,100 +40,20 @@ func getImageCostPrice(modelName string, reqModel string, size string) (float64, } func RelayImageHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatusCode { - log := middleware.GetLogger(c) - ctx := c.Request.Context() - - adaptor, ok := channeltype.GetAdaptor(meta.Channel.Type) - if !ok { - log.Errorf("invalid (%s[%d]) channel type: %d", meta.Channel.Name, meta.Channel.ID, meta.Channel.Type) - return openai.ErrorWrapperWithMessage("invalid channel error", "invalid_channel_type", http.StatusInternalServerError) - } - - groupRemainBalance, postGroupConsumer, err := getGroupBalance(ctx, meta) - if err != nil { - log.Errorf("get group (%s) balance failed: %v", meta.Group.ID, err) - return openai.ErrorWrapper( - fmt.Errorf("get group (%s) balance failed", meta.Group.ID), - "get_group_quota_failed", - http.StatusInternalServerError, - ) - } - - imageRequest, err := getImageRequest(c) - if err != nil { - log.Errorf("get request failed: %s", err.Error()) - var detail model.RequestDetail - reqDetail, err := common.GetRequestBody(c.Request) + return Handle(meta, c, func() (*PreCheckGroupBalanceReq, error) { + imageRequest, err := getImageRequest(c) if err != nil { - log.Errorf("get request body failed: %s", err.Error()) - } else { - detail.RequestBody = conv.BytesToString(reqDetail) + return nil, err } - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, - nil, - http.StatusBadRequest, - nil, - meta, - 0, - 0, - err.Error(), - &detail, - ) - return openai.ErrorWrapper(err, "invalid_image_request", http.StatusBadRequest) - } - imageCostPrice, err := getImageCostPrice(meta.OriginModelName, meta.ActualModelName, imageRequest.Size) - if err != nil { - return openai.ErrorWrapper(err, "get_image_cost_price_failed", http.StatusInternalServerError) - } - - meta.InputTokens = imageRequest.N - - ok = preCheckGroupBalance(&PreCheckGroupBalanceReq{ - InputTokens: meta.InputTokens, - Price: imageCostPrice, - }, meta, groupRemainBalance) - if !ok { - return openai.ErrorWrapper(errors.New("group balance is not enough"), "insufficient_group_balance", http.StatusForbidden) - } - - // do response - usage, detail, respErr := DoHelper(adaptor, c, meta) - if respErr != nil { - if detail != nil && config.DebugEnabled { - log.Errorf("do image failed: %s\nrequest detail:\n%s\nresponse detail:\n%s", respErr, detail.RequestBody, detail.ResponseBody) - } else { - log.Errorf("do image failed: %s", respErr) + imageCostPrice, err := getImageCostPrice(meta.OriginModel, imageRequest.Size) + if err != nil { + return nil, err } - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, - postGroupConsumer, - respErr.StatusCode, - usage, - meta, - imageCostPrice, - 0, - respErr.String(), - detail, - ) - return respErr - } - - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, - postGroupConsumer, - http.StatusOK, - usage, - meta, - imageCostPrice, - 0, - imageRequest.Size, - nil, - ) - return nil + return &PreCheckGroupBalanceReq{ + InputTokens: imageRequest.N, + InputPrice: imageCostPrice, + }, nil + }) } diff --git a/service/aiproxy/relay/controller/rerank.go b/service/aiproxy/relay/controller/rerank.go index 8a9f5b783c7..737e2bc2d2e 100644 --- a/service/aiproxy/relay/controller/rerank.go +++ b/service/aiproxy/relay/controller/rerank.go @@ -1,20 +1,11 @@ package controller import ( - "context" "errors" "fmt" - "net/http" "strings" "github.com/gin-gonic/gin" - "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/config" - "github.com/labring/sealos/service/aiproxy/common/conv" - "github.com/labring/sealos/service/aiproxy/middleware" - "github.com/labring/sealos/service/aiproxy/model" - "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/channeltype" "github.com/labring/sealos/service/aiproxy/relay/meta" relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" billingprice "github.com/labring/sealos/service/aiproxy/relay/price" @@ -22,101 +13,23 @@ import ( ) func RerankHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatusCode { - log := middleware.GetLogger(c) - ctx := c.Request.Context() - - adaptor, ok := channeltype.GetAdaptor(meta.Channel.Type) - if !ok { - log.Errorf("invalid (%s[%d]) channel type: %d", meta.Channel.Name, meta.Channel.ID, meta.Channel.Type) - return openai.ErrorWrapperWithMessage("invalid channel error", "invalid_channel_type", http.StatusInternalServerError) - } - - groupRemainBalance, postGroupConsumer, err := getGroupBalance(ctx, meta) - if err != nil { - log.Errorf("get group (%s) balance failed: %v", meta.Group.ID, err) - return openai.ErrorWrapper( - fmt.Errorf("get group (%s) balance failed", meta.Group.ID), - "get_group_quota_failed", - http.StatusInternalServerError, - ) - } - - rerankRequest, err := getRerankRequest(c) - if err != nil { - log.Errorf("get request failed: %s", err.Error()) - var detail model.RequestDetail - reqDetail, err := common.GetRequestBody(c.Request) - if err != nil { - log.Errorf("get request body failed: %s", err.Error()) - } else { - detail.RequestBody = conv.BytesToString(reqDetail) + return Handle(meta, c, func() (*PreCheckGroupBalanceReq, error) { + price, completionPrice, ok := billingprice.GetModelPrice(meta.OriginModel) + if !ok { + return nil, fmt.Errorf("model price not found: %s", meta.OriginModel) } - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, - nil, - http.StatusBadRequest, - nil, - meta, - 0, - 0, - err.Error(), - &detail, - ) - return openai.ErrorWrapper(err, "invalid_rerank_request", http.StatusBadRequest) - } - price, completionPrice, ok := billingprice.GetModelPrice(meta.OriginModelName, meta.ActualModelName) - if !ok { - return openai.ErrorWrapper(fmt.Errorf("model price not found: %s", meta.OriginModelName), "model_price_not_found", http.StatusInternalServerError) - } - - meta.InputTokens = rerankPromptTokens(rerankRequest) - - ok = preCheckGroupBalance(&PreCheckGroupBalanceReq{ - InputTokens: meta.InputTokens, - Price: price, - }, meta, groupRemainBalance) - if !ok { - return openai.ErrorWrapper(errors.New("group balance is not enough"), "insufficient_group_balance", http.StatusForbidden) - } - - usage, detail, respErr := DoHelper(adaptor, c, meta) - if respErr != nil { - if detail != nil && config.DebugEnabled { - log.Errorf("do rerank failed: %s\nrequest detail:\n%s\nresponse detail:\n%s", respErr, detail.RequestBody, detail.ResponseBody) - } else { - log.Errorf("do rerank failed: %s", respErr) + rerankRequest, err := getRerankRequest(c) + if err != nil { + return nil, err } - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, - postGroupConsumer, - http.StatusInternalServerError, - usage, - meta, - price, - completionPrice, - respErr.String(), - detail, - ) - return respErr - } - - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, - postGroupConsumer, - http.StatusOK, - usage, - meta, - price, - completionPrice, - "", - nil, - ) - return nil + return &PreCheckGroupBalanceReq{ + InputTokens: rerankPromptTokens(rerankRequest), + InputPrice: price, + OutputPrice: completionPrice, + }, nil + }) } func getRerankRequest(c *gin.Context) (*relaymodel.RerankRequest, error) { diff --git a/service/aiproxy/relay/controller/stt.go b/service/aiproxy/relay/controller/stt.go index 317df5ff755..e12d130c4e1 100644 --- a/service/aiproxy/relay/controller/stt.go +++ b/service/aiproxy/relay/controller/stt.go @@ -1,88 +1,24 @@ package controller import ( - "context" - "errors" "fmt" - "net/http" "github.com/gin-gonic/gin" - "github.com/labring/sealos/service/aiproxy/common/config" - "github.com/labring/sealos/service/aiproxy/middleware" - "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/channeltype" "github.com/labring/sealos/service/aiproxy/relay/meta" relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" billingprice "github.com/labring/sealos/service/aiproxy/relay/price" ) func RelaySTTHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatusCode { - log := middleware.GetLogger(c) - ctx := c.Request.Context() - - adaptor, ok := channeltype.GetAdaptor(meta.Channel.Type) - if !ok { - log.Errorf("invalid (%s[%d]) channel type: %d", meta.Channel.Name, meta.Channel.ID, meta.Channel.Type) - return openai.ErrorWrapperWithMessage("invalid channel error", "invalid_channel_type", http.StatusInternalServerError) - } - - groupRemainBalance, postGroupConsumer, err := getGroupBalance(ctx, meta) - if err != nil { - log.Errorf("get group (%s) balance failed: %v", meta.Group.ID, err) - return openai.ErrorWrapper( - fmt.Errorf("get group (%s) balance failed", meta.Group.ID), - "get_group_quota_failed", - http.StatusInternalServerError, - ) - } - - price, completionPrice, ok := billingprice.GetModelPrice(meta.OriginModelName, meta.ActualModelName) - if !ok { - return openai.ErrorWrapper(fmt.Errorf("model price not found: %s", meta.OriginModelName), "model_price_not_found", http.StatusInternalServerError) - } - - ok = preCheckGroupBalance(&PreCheckGroupBalanceReq{ - InputTokens: meta.InputTokens, - Price: price, - }, meta, groupRemainBalance) - if !ok { - return openai.ErrorWrapper(errors.New("group balance is not enough"), "insufficient_group_balance", http.StatusForbidden) - } - - usage, detail, respErr := DoHelper(adaptor, c, meta) - if respErr != nil { - if detail != nil && config.DebugEnabled { - log.Errorf("do stt failed: %s\nrequest detail:\n%s\nresponse detail:\n%s", respErr, detail.RequestBody, detail.ResponseBody) - } else { - log.Errorf("do stt failed: %s", respErr) + return Handle(meta, c, func() (*PreCheckGroupBalanceReq, error) { + price, completionPrice, ok := billingprice.GetModelPrice(meta.OriginModel) + if !ok { + return nil, fmt.Errorf("model price not found: %s", meta.OriginModel) } - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, - postGroupConsumer, - respErr.StatusCode, - usage, - meta, - price, - completionPrice, - respErr.String(), - detail, - ) - return respErr - } - - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, - postGroupConsumer, - http.StatusOK, - usage, - meta, - price, - completionPrice, - "", - nil, - ) - return nil + return &PreCheckGroupBalanceReq{ + InputPrice: price, + OutputPrice: completionPrice, + }, nil + }) } diff --git a/service/aiproxy/relay/controller/text.go b/service/aiproxy/relay/controller/text.go index 8b6af6b9e6a..da3ee8d40e1 100644 --- a/service/aiproxy/relay/controller/text.go +++ b/service/aiproxy/relay/controller/text.go @@ -1,19 +1,10 @@ package controller import ( - "context" - "errors" "fmt" - "net/http" "github.com/gin-gonic/gin" - "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/config" - "github.com/labring/sealos/service/aiproxy/common/conv" - "github.com/labring/sealos/service/aiproxy/middleware" - "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/channeltype" "github.com/labring/sealos/service/aiproxy/relay/meta" relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" billingprice "github.com/labring/sealos/service/aiproxy/relay/price" @@ -21,113 +12,22 @@ import ( ) func RelayTextHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatusCode { - log := middleware.GetLogger(c) - ctx := c.Request.Context() - - adaptor, ok := channeltype.GetAdaptor(meta.Channel.Type) - if !ok { - log.Errorf("invalid (%s[%d]) channel type: %d", meta.Channel.Name, meta.Channel.ID, meta.Channel.Type) - return openai.ErrorWrapperWithMessage("invalid channel error", "invalid_channel_type", http.StatusInternalServerError) - } - - groupRemainBalance, postGroupConsumer, err := getGroupBalance(ctx, meta) - if err != nil { - log.Errorf("get group (%s) balance failed: %v", meta.Group.ID, err) - return openai.ErrorWrapper( - fmt.Errorf("get group (%s) balance failed", meta.Group.ID), - "get_group_quota_failed", - http.StatusInternalServerError, - ) - } + return Handle(meta, c, func() (*PreCheckGroupBalanceReq, error) { + price, completionPrice, ok := billingprice.GetModelPrice(meta.OriginModel) + if !ok { + return nil, fmt.Errorf("model price not found: %s", meta.OriginModel) + } - textRequest, err := utils.UnmarshalGeneralOpenAIRequest(c.Request) - if err != nil { - log.Errorf("get request failed: %s", err.Error()) - var detail model.RequestDetail - reqDetail, err := common.GetRequestBody(c.Request) + textRequest, err := utils.UnmarshalGeneralOpenAIRequest(c.Request) if err != nil { - log.Errorf("get request body failed: %s", err.Error()) - } else { - detail.RequestBody = conv.BytesToString(reqDetail) + return nil, err } - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, - nil, - http.StatusBadRequest, - nil, - meta, - 0, - 0, - err.Error(), - &detail, - ) - return openai.ErrorWrapper(fmt.Errorf("get and validate text request failed: %s", err.Error()), "invalid_text_request", http.StatusBadRequest) - } - // get model price - price, completionPrice, ok := billingprice.GetModelPrice(meta.OriginModelName, meta.ActualModelName) - if !ok { - return openai.ErrorWrapper(fmt.Errorf("model price not found: %s", meta.OriginModelName), "model_price_not_found", http.StatusInternalServerError) - } - // pre-consume balance - meta.InputTokens = openai.GetPromptTokens(meta, textRequest) - - ok = preCheckGroupBalance(&PreCheckGroupBalanceReq{ - InputTokens: meta.InputTokens, - MaxTokens: textRequest.MaxTokens, - Price: price, - }, meta, groupRemainBalance) - if !ok { - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, - postGroupConsumer, - http.StatusForbidden, - nil, - meta, - 0, - 0, - "group balance is not enough", - nil, - ) - return openai.ErrorWrapper(errors.New("group balance is not enough"), "insufficient_group_balance", http.StatusForbidden) - } - - // do response - usage, detail, respErr := DoHelper(adaptor, c, meta) - if respErr != nil { - if detail != nil && config.DebugEnabled { - log.Errorf("do text failed: %s\nrequest detail:\n%s\nresponse detail:\n%s", respErr, detail.RequestBody, detail.ResponseBody) - } else { - log.Errorf("do text failed: %s", respErr) - } - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, - postGroupConsumer, - respErr.StatusCode, - usage, - meta, - price, - completionPrice, - respErr.String(), - detail, - ) - return respErr - } - // post-consume amount - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, - postGroupConsumer, - http.StatusOK, - usage, - meta, - price, - completionPrice, - "", - nil, - ) - return nil + return &PreCheckGroupBalanceReq{ + InputTokens: openai.GetPromptTokens(meta, textRequest), + MaxTokens: textRequest.MaxTokens, + InputPrice: price, + OutputPrice: completionPrice, + }, nil + }) } diff --git a/service/aiproxy/relay/controller/tts.go b/service/aiproxy/relay/controller/tts.go index 0d7a9c03837..56e0298c9cb 100644 --- a/service/aiproxy/relay/controller/tts.go +++ b/service/aiproxy/relay/controller/tts.go @@ -1,19 +1,10 @@ package controller import ( - "context" - "errors" "fmt" - "net/http" "github.com/gin-gonic/gin" - "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/config" - "github.com/labring/sealos/service/aiproxy/common/conv" - "github.com/labring/sealos/service/aiproxy/middleware" - "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/channeltype" "github.com/labring/sealos/service/aiproxy/relay/meta" relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" billingprice "github.com/labring/sealos/service/aiproxy/relay/price" @@ -21,99 +12,21 @@ import ( ) func RelayTTSHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatusCode { - log := middleware.GetLogger(c) - ctx := c.Request.Context() - - adaptor, ok := channeltype.GetAdaptor(meta.Channel.Type) - if !ok { - log.Errorf("invalid (%s[%d]) channel type: %d", meta.Channel.Name, meta.Channel.ID, meta.Channel.Type) - return openai.ErrorWrapperWithMessage("invalid channel error", "invalid_channel_type", http.StatusInternalServerError) - } - - groupRemainBalance, postGroupConsumer, err := getGroupBalance(ctx, meta) - if err != nil { - log.Errorf("get group (%s) balance failed: %v", meta.Group.ID, err) - return openai.ErrorWrapper( - fmt.Errorf("get group (%s) balance failed", meta.Group.ID), - "get_group_quota_failed", - http.StatusInternalServerError, - ) - } - - price, completionPrice, ok := billingprice.GetModelPrice(meta.OriginModelName, meta.ActualModelName) - if !ok { - return openai.ErrorWrapper(fmt.Errorf("model price not found: %s", meta.OriginModelName), "model_price_not_found", http.StatusInternalServerError) - } - - ttsRequest, err := utils.UnmarshalTTSRequest(c.Request) - if err != nil { - log.Errorf("get request failed: %s", err.Error()) - var detail model.RequestDetail - reqDetail, err := common.GetRequestBody(c.Request) - if err != nil { - log.Errorf("get request body failed: %s", err.Error()) - } else { - detail.RequestBody = conv.BytesToString(reqDetail) + return Handle(meta, c, func() (*PreCheckGroupBalanceReq, error) { + price, completionPrice, ok := billingprice.GetModelPrice(meta.OriginModel) + if !ok { + return nil, fmt.Errorf("model price not found: %s", meta.OriginModel) } - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, - nil, - http.StatusBadRequest, - nil, - meta, - 0, - 0, - err.Error(), - &detail, - ) - return openai.ErrorWrapper(err, "invalid_tts_request", http.StatusBadRequest) - } - meta.InputTokens = openai.CountTokenText(ttsRequest.Input, meta.ActualModelName) - - ok = preCheckGroupBalance(&PreCheckGroupBalanceReq{ - InputTokens: meta.InputTokens, - Price: price, - }, meta, groupRemainBalance) - if !ok { - return openai.ErrorWrapper(errors.New("group balance is not enough"), "insufficient_group_balance", http.StatusForbidden) - } - - usage, detail, respErr := DoHelper(adaptor, c, meta) - if respErr != nil { - if detail != nil && config.DebugEnabled { - log.Errorf("do tts failed: %s\nrequest detail:\n%s\nresponse detail:\n%s", respErr, detail.RequestBody, detail.ResponseBody) - } else { - log.Errorf("do tts failed: %s", respErr) + ttsRequest, err := utils.UnmarshalTTSRequest(c.Request) + if err != nil { + return nil, err } - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, - postGroupConsumer, - respErr.StatusCode, - usage, - meta, - price, - completionPrice, - respErr.String(), - detail, - ) - return respErr - } - - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, - postGroupConsumer, - http.StatusOK, - usage, - meta, - price, - completionPrice, - "", - nil, - ) - return nil + return &PreCheckGroupBalanceReq{ + InputTokens: openai.CountTokenText(ttsRequest.Input, meta.ActualModel), + InputPrice: price, + OutputPrice: completionPrice, + }, nil + }) } diff --git a/service/aiproxy/relay/meta/meta.go b/service/aiproxy/relay/meta/meta.go index 63cc4de07cb..7519d507eec 100644 --- a/service/aiproxy/relay/meta/meta.go +++ b/service/aiproxy/relay/meta/meta.go @@ -21,14 +21,14 @@ type Meta struct { Group *model.GroupCache Token *model.TokenCache - Endpoint string - RequestAt time.Time - RequestID string - OriginModelName string - ActualModelName string - Mode int - InputTokens int - IsChannelTest bool + Endpoint string + RequestAt time.Time + RequestID string + OriginModel string + ActualModel string + Mode int + InputTokens int + IsChannelTest bool } type Option func(meta *Meta) @@ -71,10 +71,10 @@ func WithToken(token *model.TokenCache) Option { func NewMeta(channel *model.Channel, mode int, modelName string, opts ...Option) *Meta { meta := Meta{ - values: make(map[string]any), - Mode: mode, - OriginModelName: modelName, - RequestAt: time.Now(), + values: make(map[string]any), + Mode: mode, + OriginModel: modelName, + RequestAt: time.Now(), } for _, opt := range opts { @@ -94,7 +94,7 @@ func (m *Meta) Reset(channel *model.Channel) { ID: channel.ID, Type: channel.Type, } - m.ActualModelName, _ = GetMappedModelName(m.OriginModelName, channel.ModelMapping) + m.ActualModel, _ = GetMappedModelName(m.OriginModel, channel.ModelMapping) m.ClearValues() } diff --git a/service/aiproxy/relay/price/image.go b/service/aiproxy/relay/price/image.go index 19bcde3f7b2..e5171f1a5e0 100644 --- a/service/aiproxy/relay/price/image.go +++ b/service/aiproxy/relay/price/image.go @@ -8,17 +8,11 @@ import ( "github.com/labring/sealos/service/aiproxy/model" ) -func GetImageSizePrice(model string, reqModel string, size string) (float64, bool) { +func GetImageSizePrice(model string, size string) (float64, bool) { if !config.GetBillingEnabled() { return 0, false } - if price, ok := getImageSizePrice(model, size); ok { - return price, true - } - if price, ok := getImageSizePrice(reqModel, size); ok { - return price, true - } - return 0, false + return getImageSizePrice(model, size) } func getImageSizePrice(modelName string, size string) (float64, bool) { diff --git a/service/aiproxy/relay/price/model.go b/service/aiproxy/relay/price/model.go index 09cda1bd708..48da1919591 100644 --- a/service/aiproxy/relay/price/model.go +++ b/service/aiproxy/relay/price/model.go @@ -16,18 +16,10 @@ const ( // https://openai.com/pricing // 价格单位:人民币/1K tokens -func GetModelPrice(mapedName string, reqModel string) (float64, float64, bool) { +func GetModelPrice(modelName string) (float64, float64, bool) { if !config.GetBillingEnabled() { return 0, 0, true } - price, completionPrice, ok := getModelPrice(mapedName) - if !ok && reqModel != "" { - price, completionPrice, ok = getModelPrice(reqModel) - } - return price, completionPrice, ok -} - -func getModelPrice(modelName string) (float64, float64, bool) { modelConfig, ok := model.CacheGetModelConfig(modelName) if !ok { return 0, 0, false From 8da1e38d72998b3ab20df9132f84961c0e235b17 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 2 Jan 2025 16:38:50 +0800 Subject: [PATCH 065/167] refactor: post relay --- service/aiproxy/relay/controller/helper.go | 225 +++++++++++++-------- 1 file changed, 137 insertions(+), 88 deletions(-) diff --git a/service/aiproxy/relay/controller/helper.go b/service/aiproxy/relay/controller/helper.go index eb156d5d7c5..95839b50881 100644 --- a/service/aiproxy/relay/controller/helper.go +++ b/service/aiproxy/relay/controller/helper.go @@ -79,8 +79,8 @@ func postConsumeAmount( code int, usage *relaymodel.Usage, meta *meta.Meta, - price, - completionPrice float64, + inputPrice, + outputPrice float64, content string, requestDetail *model.RequestDetail, ) { @@ -94,70 +94,76 @@ func postConsumeAmount( if meta.IsChannelTest { return } + log := middleware.NewLogger() middleware.SetLogFieldsFromMeta(meta, log.Data) + + amount := calculateAmount(ctx, usage, inputPrice, outputPrice, postGroupConsumer, meta, log) + + err := recordConsume(meta, code, usage, inputPrice, outputPrice, content, requestDetail, amount) + if err != nil { + log.Error("error batch record consume: " + err.Error()) + } +} + +func calculateAmount(ctx context.Context, usage *relaymodel.Usage, inputPrice, outputPrice float64, postGroupConsumer balance.PostGroupConsumer, meta *meta.Meta, log *log.Entry) float64 { if usage == nil { - err := model.BatchRecordConsume( + return 0 + } + + promptTokens := usage.PromptTokens + completionTokens := usage.CompletionTokens + totalTokens := promptTokens + completionTokens + + if totalTokens == 0 { + return 0 + } + + promptAmount := decimal.NewFromInt(int64(promptTokens)). + Mul(decimal.NewFromFloat(inputPrice)). + Div(decimal.NewFromInt(billingprice.PriceUnit)) + completionAmount := decimal.NewFromInt(int64(completionTokens)). + Mul(decimal.NewFromFloat(outputPrice)). + Div(decimal.NewFromInt(billingprice.PriceUnit)) + amount := promptAmount.Add(completionAmount).InexactFloat64() + + if amount > 0 { + return processGroupConsume(ctx, amount, postGroupConsumer, meta, log) + } + + return 0 +} + +func processGroupConsume(ctx context.Context, amount float64, postGroupConsumer balance.PostGroupConsumer, meta *meta.Meta, log *log.Entry) float64 { + consumedAmount, err := postGroupConsumer.PostGroupConsume(ctx, meta.Token.Name, amount) + if err != nil { + log.Error("error consuming token remain amount: " + err.Error()) + if err := model.CreateConsumeError( meta.RequestID, meta.RequestAt, meta.Group.ID, - code, - meta.Channel.ID, - 0, - 0, + meta.Token.Name, meta.OriginModel, + err.Error(), + amount, meta.Token.ID, - meta.Token.Name, - 0, - price, - completionPrice, - meta.Endpoint, - content, - meta.Mode, - requestDetail, - ) - if err != nil { - log.Error("error batch record consume: " + err.Error()) + ); err != nil { + log.Error("failed to create consume error: " + err.Error()) } - return + return amount } - promptTokens := usage.PromptTokens - completionTokens := usage.CompletionTokens - var amount float64 - totalTokens := promptTokens + completionTokens - if totalTokens != 0 { - promptAmount := decimal. - NewFromInt(int64(promptTokens)). - Mul(decimal.NewFromFloat(price)). - Div(decimal.NewFromInt(billingprice.PriceUnit)) - completionAmount := decimal. - NewFromInt(int64(completionTokens)). - Mul(decimal.NewFromFloat(completionPrice)). - Div(decimal.NewFromInt(billingprice.PriceUnit)) - amount = promptAmount.Add(completionAmount).InexactFloat64() - if amount > 0 { - _amount, err := postGroupConsumer.PostGroupConsume(ctx, meta.Token.Name, amount) - if err != nil { - log.Error("error consuming token remain amount: " + err.Error()) - err = model.CreateConsumeError( - meta.RequestID, - meta.RequestAt, - meta.Group.ID, - meta.Token.Name, - meta.OriginModel, - err.Error(), - amount, - meta.Token.ID, - ) - if err != nil { - log.Error("failed to create consume error: " + err.Error()) - } - } else { - amount = _amount - } - } + return consumedAmount +} + +func recordConsume(meta *meta.Meta, code int, usage *relaymodel.Usage, inputPrice, outputPrice float64, content string, requestDetail *model.RequestDetail, amount float64) error { + promptTokens := 0 + completionTokens := 0 + if usage != nil { + promptTokens = usage.PromptTokens + completionTokens = usage.CompletionTokens } - err := model.BatchRecordConsume( + + return model.BatchRecordConsume( meta.RequestID, meta.RequestAt, meta.Group.ID, @@ -169,16 +175,13 @@ func postConsumeAmount( meta.Token.ID, meta.Token.Name, amount, - price, - completionPrice, + inputPrice, + outputPrice, meta.Endpoint, content, meta.Mode, requestDetail, ) - if err != nil { - log.Error("error batch record consume: " + err.Error()) - } } func isErrorHappened(resp *http.Response) bool { @@ -230,31 +233,65 @@ func putBuffer(buf *bytes.Buffer) { func DoHelper(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*relaymodel.Usage, *model.RequestDetail, *relaymodel.ErrorWithStatusCode) { log := middleware.GetLogger(c) - detail := model.RequestDetail{} + + // 1. Get request body + if err := getRequestBody(meta, c, &detail); err != nil { + return nil, nil, err + } + + // 2. Convert and prepare request + resp, err := prepareAndDoRequest(a, c, meta) + if err != nil { + return nil, &detail, err + } + + // 3. Handle error response + if isErrorHappened(resp) { + if err := handleErrorResponse(resp, &detail); err != nil { + return nil, &detail, err + } + return nil, &detail, utils.RelayErrorHandler(meta, resp) + } + + // 4. Handle success response + usage, relayErr := handleSuccessResponse(a, c, meta, resp, &detail) + if relayErr != nil { + return nil, &detail, relayErr + } + + // 5. Update usage metrics + updateUsageMetrics(usage, meta, log) + + return usage, &detail, nil +} + +func getRequestBody(meta *meta.Meta, c *gin.Context, detail *model.RequestDetail) *relaymodel.ErrorWithStatusCode { switch meta.Mode { case relaymode.AudioTranscription, relaymode.AudioTranslation: - break + return nil default: reqBody, err := common.GetRequestBody(c.Request) if err != nil { - return nil, nil, openai.ErrorWrapperWithMessage("get request body failed: "+err.Error(), "get_request_body_failed", http.StatusBadRequest) + return openai.ErrorWrapperWithMessage("get request body failed: "+err.Error(), "get_request_body_failed", http.StatusBadRequest) } detail.RequestBody = conv.BytesToString(reqBody) + return nil } +} +func prepareAndDoRequest(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*http.Response, *relaymodel.ErrorWithStatusCode) { method, header, body, err := a.ConvertRequest(meta, c.Request) if err != nil { - return nil, &detail, openai.ErrorWrapperWithMessage("convert request failed: "+err.Error(), "convert_request_failed", http.StatusBadRequest) + return nil, openai.ErrorWrapperWithMessage("convert request failed: "+err.Error(), "convert_request_failed", http.StatusBadRequest) } fullRequestURL, err := a.GetRequestURL(meta) if err != nil { - return nil, &detail, openai.ErrorWrapperWithMessage("get request url failed: "+err.Error(), "get_request_url_failed", http.StatusBadRequest) + return nil, openai.ErrorWrapperWithMessage("get request url failed: "+err.Error(), "get_request_url_failed", http.StatusBadRequest) } - timeout := config.GetTimeoutWithModelType()[meta.Mode] - if timeout > 0 { + if timeout := config.GetTimeoutWithModelType()[meta.Mode]; timeout > 0 { rawRequest := c.Request ctx, cancel := context.WithTimeout(rawRequest.Context(), time.Duration(timeout)*time.Second) defer cancel() @@ -264,10 +301,17 @@ func DoHelper(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*relaymodel.U req, err := http.NewRequestWithContext(c.Request.Context(), method, fullRequestURL, body) if err != nil { - return nil, &detail, openai.ErrorWrapperWithMessage("new request failed: "+err.Error(), "new_request_failed", http.StatusBadRequest) + return nil, openai.ErrorWrapperWithMessage("new request failed: "+err.Error(), "new_request_failed", http.StatusBadRequest) } - log.Debugf("request url: %s", fullRequestURL) + if err := setupRequestHeader(a, c, meta, req, header); err != nil { + return nil, err + } + + return doRequest(a, c, meta, req) +} + +func setupRequestHeader(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta, req *http.Request, header http.Header) *relaymodel.ErrorWithStatusCode { contentType := req.Header.Get("Content-Type") if contentType == "" { contentType = "application/json; charset=utf-8" @@ -276,32 +320,37 @@ func DoHelper(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*relaymodel.U for key, value := range header { req.Header[key] = value } - err = a.SetupRequestHeader(meta, c, req) - if err != nil { - return nil, &detail, openai.ErrorWrapperWithMessage("setup request header failed: "+err.Error(), "setup_request_header_failed", http.StatusBadRequest) + if err := a.SetupRequestHeader(meta, c, req); err != nil { + return openai.ErrorWrapperWithMessage("setup request header failed: "+err.Error(), "setup_request_header_failed", http.StatusBadRequest) } + return nil +} +func doRequest(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta, req *http.Request) (*http.Response, *relaymodel.ErrorWithStatusCode) { resp, err := a.DoRequest(meta, c, req) if err != nil { if errors.Is(err, context.Canceled) { - return nil, &detail, openai.ErrorWrapperWithMessage("do request failed: request canceled by client", "request_canceled", http.StatusBadRequest) + return nil, openai.ErrorWrapperWithMessage("do request failed: request canceled by client", "request_canceled", http.StatusBadRequest) } if errors.Is(err, context.DeadlineExceeded) { - return nil, &detail, openai.ErrorWrapperWithMessage("do request failed: request timeout", "request_timeout", http.StatusGatewayTimeout) + return nil, openai.ErrorWrapperWithMessage("do request failed: request timeout", "request_timeout", http.StatusGatewayTimeout) } - return nil, &detail, openai.ErrorWrapperWithMessage("do request failed: "+err.Error(), "request_failed", http.StatusBadRequest) + return nil, openai.ErrorWrapperWithMessage("do request failed: "+err.Error(), "request_failed", http.StatusBadRequest) } + return resp, nil +} - if isErrorHappened(resp) { - respBody, err := io.ReadAll(resp.Body) - if err != nil { - return nil, &detail, openai.ErrorWrapperWithMessage("read response body failed: "+err.Error(), "read_response_body_failed", http.StatusBadRequest) - } - detail.ResponseBody = conv.BytesToString(respBody) - resp.Body = io.NopCloser(bytes.NewReader(respBody)) - return nil, &detail, utils.RelayErrorHandler(meta, resp) +func handleErrorResponse(resp *http.Response, detail *model.RequestDetail) *relaymodel.ErrorWithStatusCode { + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return openai.ErrorWrapperWithMessage("read response body failed: "+err.Error(), "read_response_body_failed", http.StatusBadRequest) } + detail.ResponseBody = conv.BytesToString(respBody) + resp.Body = io.NopCloser(bytes.NewReader(respBody)) + return nil +} +func handleSuccessResponse(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta, resp *http.Response, detail *model.RequestDetail) (*relaymodel.Usage, *relaymodel.ErrorWithStatusCode) { buf := getBuffer() defer putBuffer(buf) @@ -315,11 +364,12 @@ func DoHelper(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*relaymodel.U c.Header("Content-Type", resp.Header.Get("Content-Type")) usage, relayErr := a.DoResponse(meta, c, resp) - // copy buf to detail.ResponseBody detail.ResponseBody = rw.body.String() - if relayErr != nil { - return nil, &detail, relayErr - } + + return usage, relayErr +} + +func updateUsageMetrics(usage *relaymodel.Usage, meta *meta.Meta, log *log.Entry) { if usage == nil { usage = &relaymodel.Usage{ PromptTokens: meta.InputTokens, @@ -332,5 +382,4 @@ func DoHelper(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*relaymodel.U log.Data["t_input"] = usage.PromptTokens log.Data["t_output"] = usage.CompletionTokens log.Data["t_total"] = usage.TotalTokens - return usage, &detail, nil } From f56ea86bbed232838a19838be3c570f8881e4ed1 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 2 Jan 2025 16:59:36 +0800 Subject: [PATCH 066/167] feat: fill gaps before and after point --- service/aiproxy/controller/dashboard.go | 27 ++++++++++++++++++---- service/aiproxy/relay/relaymode/helper.go | 28 ----------------------- 2 files changed, 22 insertions(+), 33 deletions(-) delete mode 100644 service/aiproxy/relay/relaymode/helper.go diff --git a/service/aiproxy/controller/dashboard.go b/service/aiproxy/controller/dashboard.go index 6f3472a3f36..3bb54f1f11b 100644 --- a/service/aiproxy/controller/dashboard.go +++ b/service/aiproxy/controller/dashboard.go @@ -33,12 +33,21 @@ func getDashboardTime(t string) (time.Time, time.Time, time.Duration) { return start, end, timeSpan } -func fillGaps(data []*model.HourlyChartData, timeSpan time.Duration) []*model.HourlyChartData { - if len(data) <= 1 { +func fillGaps(data []*model.HourlyChartData, start, end time.Time, timeSpan time.Duration) []*model.HourlyChartData { + if len(data) == 0 { return data } - result := make([]*model.HourlyChartData, 0, len(data)) + result := make([]*model.HourlyChartData, 0, len(data)+2) + + // Add zero point before first data point if within range + firstPoint := time.Unix(data[0].Timestamp, 0) + if firstPointPrev := firstPoint.Add(-timeSpan); !firstPointPrev.Before(start) { + result = append(result, &model.HourlyChartData{ + Timestamp: firstPointPrev.Unix(), + }) + } + result = append(result, data[0]) for i := 1; i < len(data); i++ { @@ -75,6 +84,14 @@ func fillGaps(data []*model.HourlyChartData, timeSpan time.Duration) []*model.Ho result = append(result, curr) } + // Add zero point after last data point if within range + lastPoint := time.Unix(data[len(data)-1].Timestamp, 0) + if lastPointNext := lastPoint.Add(timeSpan); !lastPointNext.After(end) { + result = append(result, &model.HourlyChartData{ + Timestamp: lastPointNext.Unix(), + }) + } + return result } @@ -104,7 +121,7 @@ func GetDashboard(c *gin.Context) { return } - dashboards.ChartData = fillGaps(dashboards.ChartData, timeSpan) + dashboards.ChartData = fillGaps(dashboards.ChartData, start, end, timeSpan) middleware.SuccessResponse(c, dashboards) } @@ -126,6 +143,6 @@ func GetGroupDashboard(c *gin.Context) { return } - dashboards.ChartData = fillGaps(dashboards.ChartData, timeSpan) + dashboards.ChartData = fillGaps(dashboards.ChartData, start, end, timeSpan) middleware.SuccessResponse(c, dashboards) } diff --git a/service/aiproxy/relay/relaymode/helper.go b/service/aiproxy/relay/relaymode/helper.go deleted file mode 100644 index 20aa30a33e8..00000000000 --- a/service/aiproxy/relay/relaymode/helper.go +++ /dev/null @@ -1,28 +0,0 @@ -package relaymode - -// func GetByPath(path string) int { -// switch { -// case strings.HasPrefix(path, "/v1/chat/completions"): -// return ChatCompletions -// case strings.HasPrefix(path, "/v1/completions"): -// return Completions -// case strings.HasSuffix(path, "embeddings"): -// return Embeddings -// case strings.HasPrefix(path, "/v1/moderations"): -// return Moderations -// case strings.HasPrefix(path, "/v1/images/generations"): -// return ImagesGenerations -// case strings.HasPrefix(path, "/v1/edits"): -// return Edits -// case strings.HasPrefix(path, "/v1/audio/speech"): -// return AudioSpeech -// case strings.HasPrefix(path, "/v1/audio/transcriptions"): -// return AudioTranscription -// case strings.HasPrefix(path, "/v1/audio/translations"): -// return AudioTranslation -// case strings.HasPrefix(path, "/v1/rerank"): -// return Rerank -// default: -// return Unknown -// } -// } From 92d48c13a21ed197088d2fe51c1c55a26645d38e Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 2 Jan 2025 17:12:23 +0800 Subject: [PATCH 067/167] fix: qwen long tokens --- service/aiproxy/relay/adaptor/ali/constants.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/service/aiproxy/relay/adaptor/ali/constants.go b/service/aiproxy/relay/adaptor/ali/constants.go index 09833eed78f..61e0f7490d4 100644 --- a/service/aiproxy/relay/adaptor/ali/constants.go +++ b/service/aiproxy/relay/adaptor/ali/constants.go @@ -107,8 +107,8 @@ var ModelList = []*model.ModelConfig{ OutputPrice: 0.002, RPM: 1200, Config: model.NewModelConfig( - model.WithModelConfigMaxContextTokens(10000000), - model.WithModelConfigMaxInputTokens(10000000), + model.WithModelConfigMaxContextTokens(1000000), + model.WithModelConfigMaxInputTokens(1000000), model.WithModelConfigMaxOutputTokens(6000), model.WithModelConfigToolChoice(true), ), From 9158726df4d5f32444bcd6dda225b6a9d7fe7ff4 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 2 Jan 2025 17:40:26 +0800 Subject: [PATCH 068/167] feat: get rpm from redis --- .../common/{rate-limit.go => rpmlimit/mem.go} | 2 +- service/aiproxy/common/rpmlimit/rate-limit.go | 137 ++++++++++++++++++ service/aiproxy/controller/dashboard.go | 26 ++++ service/aiproxy/middleware/distributor.go | 10 +- service/aiproxy/middleware/rate-limit.go | 84 ----------- 5 files changed, 168 insertions(+), 91 deletions(-) rename service/aiproxy/common/{rate-limit.go => rpmlimit/mem.go} (99%) create mode 100644 service/aiproxy/common/rpmlimit/rate-limit.go delete mode 100644 service/aiproxy/middleware/rate-limit.go diff --git a/service/aiproxy/common/rate-limit.go b/service/aiproxy/common/rpmlimit/mem.go similarity index 99% rename from service/aiproxy/common/rate-limit.go rename to service/aiproxy/common/rpmlimit/mem.go index a94b6496fc2..5463d164b45 100644 --- a/service/aiproxy/common/rate-limit.go +++ b/service/aiproxy/common/rpmlimit/mem.go @@ -1,4 +1,4 @@ -package common +package rpmlimit import ( "sync" diff --git a/service/aiproxy/common/rpmlimit/rate-limit.go b/service/aiproxy/common/rpmlimit/rate-limit.go new file mode 100644 index 00000000000..0c6c30da93e --- /dev/null +++ b/service/aiproxy/common/rpmlimit/rate-limit.go @@ -0,0 +1,137 @@ +package rpmlimit + +import ( + "context" + "fmt" + "time" + + "github.com/labring/sealos/service/aiproxy/common" + "github.com/labring/sealos/service/aiproxy/common/config" + log "github.com/sirupsen/logrus" +) + +var inMemoryRateLimiter InMemoryRateLimiter + +const ( + groupModelRPMKey = "group_model_rpm:%s:%s" +) + +// 1. 使用Redis列表存储请求时间戳 +// 2. 列表长度代表当前窗口内的请求数 +// 3. 如果请求数未达到限制,直接添加新请求并返回成功 +// 4. 如果达到限制,则检查最老的请求是否已经过期 +// 5. 如果最老的请求已过期,移除它并添加新请求,否则拒绝新请求 +// 6. 通过EXPIRE命令设置键的过期时间,自动清理过期数据 +var luaScript = ` +local key = KEYS[1] +local max_requests = tonumber(ARGV[1]) +local window = tonumber(ARGV[2]) +local current_time = tonumber(ARGV[3]) + +local count = redis.call('LLEN', key) + +if count < max_requests then + redis.call('LPUSH', key, current_time) + redis.call('PEXPIRE', key, window) + return 1 +else + local oldest = redis.call('LINDEX', key, -1) + if current_time - tonumber(oldest) >= window then + redis.call('LPUSH', key, current_time) + redis.call('LTRIM', key, 0, max_requests - 1) + redis.call('PEXPIRE', key, window) + return 1 + else + return 0 + end +end +` + +var getRPMSumLuaScript = ` +local pattern = ARGV[1] +local window = tonumber(ARGV[2]) +local current_time = tonumber(ARGV[3]) + +local keys = redis.call('KEYS', pattern) +local total = 0 + +for _, key in ipairs(keys) do + local timestamps = redis.call('LRANGE', key, 0, -1) + for _, ts in ipairs(timestamps) do + if current_time - tonumber(ts) < window then + total = total + 1 + end + end +end + +return total +` + +func GetRPM(ctx context.Context, group, model string) (int64, error) { + if !common.RedisEnabled { + return 0, nil + } + + var pattern string + if group == "" && model == "" { + pattern = "group_model_rpm:*:*" + } else if group == "" { + pattern = "group_model_rpm:*:" + model + } else if model == "" { + pattern = fmt.Sprintf("group_model_rpm:%s:*", group) + } else { + pattern = fmt.Sprintf("group_model_rpm:%s:%s", group, model) + } + + rdb := common.RDB + currentTime := time.Now().UnixMilli() + result, err := rdb.Eval(ctx, getRPMSumLuaScript, []string{}, pattern, time.Minute.Milliseconds(), currentTime).Int64() + if err != nil { + return 0, err + } + + return result, nil +} + +func redisRateLimitRequest(ctx context.Context, group, model string, maxRequestNum int64, duration time.Duration) (bool, error) { + rdb := common.RDB + currentTime := time.Now().UnixMilli() + result, err := rdb.Eval(ctx, luaScript, []string{ + fmt.Sprintf(groupModelRPMKey, group, model), + }, maxRequestNum, duration.Milliseconds(), currentTime).Int64() + if err != nil { + return false, err + } + return result == 1, nil +} + +func RateLimit(ctx context.Context, group, model string, maxRequestNum int64, duration time.Duration) (bool, error) { + if maxRequestNum == 0 { + return true, nil + } + if common.RedisEnabled { + return redisRateLimitRequest(ctx, group, model, maxRequestNum, duration) + } + return MemoryRateLimit(ctx, group, model, maxRequestNum, duration), nil +} + +// ignore redis error +func ForceRateLimit(ctx context.Context, group, model string, maxRequestNum int64, duration time.Duration) bool { + if maxRequestNum == 0 { + return true + } + if common.RedisEnabled { + ok, err := redisRateLimitRequest(ctx, group, model, maxRequestNum, duration) + if err == nil { + return ok + } + log.Error("rate limit error: " + err.Error()) + } + return MemoryRateLimit(ctx, group, model, maxRequestNum, duration) +} + +func MemoryRateLimit(_ context.Context, group, model string, maxRequestNum int64, duration time.Duration) bool { + // It's safe to call multi times. + inMemoryRateLimiter.Init(config.RateLimitKeyExpirationDuration) + return inMemoryRateLimiter.Request(fmt.Sprintf(groupModelRPMKey, group, model), int(maxRequestNum), duration) +} diff --git a/service/aiproxy/controller/dashboard.go b/service/aiproxy/controller/dashboard.go index 3bb54f1f11b..0104aaefe7c 100644 --- a/service/aiproxy/controller/dashboard.go +++ b/service/aiproxy/controller/dashboard.go @@ -6,6 +6,8 @@ import ( "time" "github.com/gin-gonic/gin" + "github.com/labring/sealos/service/aiproxy/common" + "github.com/labring/sealos/service/aiproxy/common/rpmlimit" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" ) @@ -111,6 +113,8 @@ func getTimeSpanWithDefault(c *gin.Context, defaultTimeSpan time.Duration) time. } func GetDashboard(c *gin.Context) { + log := middleware.GetLogger(c) + start, end, timeSpan := getDashboardTime(c.Query("type")) modelName := c.Query("model") timeSpan = getTimeSpanWithDefault(c, timeSpan) @@ -122,10 +126,22 @@ func GetDashboard(c *gin.Context) { } dashboards.ChartData = fillGaps(dashboards.ChartData, start, end, timeSpan) + + if common.RedisEnabled { + rpm, err := rpmlimit.GetRPM(c.Request.Context(), "", modelName) + if err != nil { + log.Errorf("failed to get rpm: %v", err) + } else { + dashboards.RPM = rpm + } + } + middleware.SuccessResponse(c, dashboards) } func GetGroupDashboard(c *gin.Context) { + log := middleware.GetLogger(c) + group := c.Param("group") if group == "" { middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") @@ -144,5 +160,15 @@ func GetGroupDashboard(c *gin.Context) { } dashboards.ChartData = fillGaps(dashboards.ChartData, start, end, timeSpan) + + if common.RedisEnabled && tokenName == "" { + rpm, err := rpmlimit.GetRPM(c.Request.Context(), group, modelName) + if err != nil { + log.Errorf("failed to get rpm: %v", err) + } else { + dashboards.RPM = rpm + } + } + middleware.SuccessResponse(c, dashboards) } diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index ac2ce9b8219..911b4df007c 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -9,15 +9,12 @@ import ( "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common/config" "github.com/labring/sealos/service/aiproxy/common/ctxkey" + "github.com/labring/sealos/service/aiproxy/common/rpmlimit" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/meta" log "github.com/sirupsen/logrus" ) -const ( - groupModelRPMKey = "group_model_rpm:%s:%s" -) - type ModelRequest struct { Model string `form:"model" json:"model"` } @@ -59,9 +56,10 @@ func checkGroupModelRPMAndTPM(c *gin.Context, group *model.GroupCache, requestMo adjustedModelRPM := int64(float64(modelRPM) * groupRPMRatio * groupConsumeLevelRpmRatio) - ok := ForceRateLimit( + ok := rpmlimit.ForceRateLimit( c.Request.Context(), - fmt.Sprintf(groupModelRPMKey, group.ID, requestModel), + group.ID, + requestModel, adjustedModelRPM, time.Minute, ) diff --git a/service/aiproxy/middleware/rate-limit.go b/service/aiproxy/middleware/rate-limit.go deleted file mode 100644 index 103138e41c0..00000000000 --- a/service/aiproxy/middleware/rate-limit.go +++ /dev/null @@ -1,84 +0,0 @@ -package middleware - -import ( - "context" - "time" - - "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/config" - log "github.com/sirupsen/logrus" -) - -var inMemoryRateLimiter common.InMemoryRateLimiter - -// 1. 使用Redis列表存储请求时间戳 -// 2. 列表长度代表当前窗口内的请求数 -// 3. 如果请求数未达到限制,直接添加新请求并返回成功 -// 4. 如果达到限制,则检查最老的请求是否已经过期 -// 5. 如果最老的请求已过期,移除它并添加新请求,否则拒绝新请求 -// 6. 通过EXPIRE命令设置键的过期时间,自动清理过期数据 -var luaScript = ` -local key = KEYS[1] -local max_requests = tonumber(ARGV[1]) -local window = tonumber(ARGV[2]) -local current_time = tonumber(ARGV[3]) - -local count = redis.call('LLEN', key) - -if count < max_requests then - redis.call('LPUSH', key, current_time) - redis.call('PEXPIRE', key, window) - return 1 -else - local oldest = redis.call('LINDEX', key, -1) - if current_time - tonumber(oldest) >= window then - redis.call('LPUSH', key, current_time) - redis.call('LTRIM', key, 0, max_requests - 1) - redis.call('PEXPIRE', key, window) - return 1 - else - return 0 - end -end -` - -func redisRateLimitRequest(ctx context.Context, key string, maxRequestNum int64, duration time.Duration) (bool, error) { - rdb := common.RDB - currentTime := time.Now().UnixMilli() - result, err := rdb.Eval(ctx, luaScript, []string{key}, maxRequestNum, duration.Milliseconds(), currentTime).Int64() - if err != nil { - return false, err - } - return result == 1, nil -} - -func RateLimit(ctx context.Context, key string, maxRequestNum int64, duration time.Duration) (bool, error) { - if maxRequestNum == 0 { - return true, nil - } - if common.RedisEnabled { - return redisRateLimitRequest(ctx, key, maxRequestNum, duration) - } - return MemoryRateLimit(ctx, key, maxRequestNum, duration), nil -} - -// ignore redis error -func ForceRateLimit(ctx context.Context, key string, maxRequestNum int64, duration time.Duration) bool { - if maxRequestNum == 0 { - return true - } - if common.RedisEnabled { - ok, err := redisRateLimitRequest(ctx, key, maxRequestNum, duration) - if err == nil { - return ok - } - log.Error("rate limit error: " + err.Error()) - } - return MemoryRateLimit(ctx, key, maxRequestNum, duration) -} - -func MemoryRateLimit(_ context.Context, key string, maxRequestNum int64, duration time.Duration) bool { - // It's safe to call multi times. - inMemoryRateLimiter.Init(config.RateLimitKeyExpirationDuration) - return inMemoryRateLimiter.Request(key, int(maxRequestNum), duration) -} From 92099d64dac224ea6777c00f5283a7743ce18b45 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 2 Jan 2025 21:07:05 +0800 Subject: [PATCH 069/167] fix: fill gaps --- service/aiproxy/controller/dashboard.go | 57 ++++++++++++++++--------- 1 file changed, 37 insertions(+), 20 deletions(-) diff --git a/service/aiproxy/controller/dashboard.go b/service/aiproxy/controller/dashboard.go index 0104aaefe7c..c9f8c92ef89 100644 --- a/service/aiproxy/controller/dashboard.go +++ b/service/aiproxy/controller/dashboard.go @@ -40,16 +40,37 @@ func fillGaps(data []*model.HourlyChartData, start, end time.Time, timeSpan time return data } - result := make([]*model.HourlyChartData, 0, len(data)+2) - - // Add zero point before first data point if within range + // Handle first point firstPoint := time.Unix(data[0].Timestamp, 0) - if firstPointPrev := firstPoint.Add(-timeSpan); !firstPointPrev.Before(start) { - result = append(result, &model.HourlyChartData{ - Timestamp: firstPointPrev.Unix(), + firstAlignedTime := firstPoint + for !firstAlignedTime.Add(-timeSpan).Before(start) { + firstAlignedTime = firstAlignedTime.Add(-timeSpan) + } + var firstIsZero bool + if !firstAlignedTime.Equal(firstPoint) { + data = append([]*model.HourlyChartData{ + { + Timestamp: firstAlignedTime.Unix(), + }, + }, data...) + firstIsZero = true + } + + // Handle last point + lastPoint := time.Unix(data[len(data)-1].Timestamp, 0) + lastAlignedTime := lastPoint + for !lastAlignedTime.Add(timeSpan).After(end) { + lastAlignedTime = lastAlignedTime.Add(timeSpan) + } + var lastIsZero bool + if !lastAlignedTime.Equal(lastPoint) { + data = append(data, &model.HourlyChartData{ + Timestamp: lastAlignedTime.Unix(), }) + lastIsZero = true } + result := make([]*model.HourlyChartData, 0, len(data)) result = append(result, data[0]) for i := 1; i < len(data); i++ { @@ -66,13 +87,17 @@ func fillGaps(data []*model.HourlyChartData, start, end time.Time, timeSpan time // If gap is more than 3 hours, only add boundary points if hourDiff > 3 { // Add point for hour after prev - result = append(result, &model.HourlyChartData{ - Timestamp: prev.Timestamp + int64(timeSpan.Seconds()), - }) + if i != 1 || (i == 1 && !firstIsZero) { + result = append(result, &model.HourlyChartData{ + Timestamp: prev.Timestamp + int64(timeSpan.Seconds()), + }) + } // Add point for hour before curr - result = append(result, &model.HourlyChartData{ - Timestamp: curr.Timestamp - int64(timeSpan.Seconds()), - }) + if i != len(data)-1 || (i == len(data)-1 && !lastIsZero) { + result = append(result, &model.HourlyChartData{ + Timestamp: curr.Timestamp - int64(timeSpan.Seconds()), + }) + } result = append(result, curr) continue } @@ -86,14 +111,6 @@ func fillGaps(data []*model.HourlyChartData, start, end time.Time, timeSpan time result = append(result, curr) } - // Add zero point after last data point if within range - lastPoint := time.Unix(data[len(data)-1].Timestamp, 0) - if lastPointNext := lastPoint.Add(timeSpan); !lastPointNext.After(end) { - result = append(result, &model.HourlyChartData{ - Timestamp: lastPointNext.Unix(), - }) - } - return result } From b3a1024f5de51cbab2b05eafa2f28738eec4f7cc Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 2 Jan 2025 22:38:07 +0800 Subject: [PATCH 070/167] fix: log error --- service/aiproxy/controller/channel-test.go | 2 +- service/aiproxy/relay/controller/handle.go | 14 +++++++++++++- service/aiproxy/relay/controller/helper.go | 10 +++++++++- service/aiproxy/relay/model/misc.go | 21 ++++++++++++--------- 4 files changed, 35 insertions(+), 12 deletions(-) diff --git a/service/aiproxy/controller/channel-test.go b/service/aiproxy/controller/channel-test.go index 450145a2cc9..4294b3f5be3 100644 --- a/service/aiproxy/controller/channel-test.go +++ b/service/aiproxy/controller/channel-test.go @@ -63,7 +63,7 @@ func testSingleModel(channel *model.Channel, modelName string) (*model.ChannelTe respStr = w.Body.String() code = w.Code } else { - respStr = bizErr.String() + respStr = bizErr.Error.String() code = bizErr.StatusCode } diff --git a/service/aiproxy/relay/controller/handle.go b/service/aiproxy/relay/controller/handle.go index e46e7c8b991..a027960062c 100644 --- a/service/aiproxy/relay/controller/handle.go +++ b/service/aiproxy/relay/controller/handle.go @@ -8,6 +8,7 @@ import ( "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common" + "github.com/labring/sealos/service/aiproxy/common/config" "github.com/labring/sealos/service/aiproxy/common/conv" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" @@ -78,6 +79,17 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa // 5. Do request usage, detail, respErr := DoHelper(adaptor, c, meta) if respErr != nil { + if detail != nil && config.DebugEnabled { + log.Errorf( + "handle failed: %+v\nrequest detail:\n%s\nresponse detail:\n%s", + respErr.Error, + detail.RequestBody, + detail.ResponseBody, + ) + } else { + log.Errorf("handle failed: %+v", respErr.Error) + } + ConsumeWaitGroup.Add(1) go postConsumeAmount(context.Background(), &ConsumeWaitGroup, @@ -87,7 +99,7 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa meta, preCheckReq.InputPrice, preCheckReq.OutputPrice, - respErr.String(), + respErr.Error.String(), detail, ) return respErr diff --git a/service/aiproxy/relay/controller/helper.go b/service/aiproxy/relay/controller/helper.go index 95839b50881..01254f0d344 100644 --- a/service/aiproxy/relay/controller/helper.go +++ b/service/aiproxy/relay/controller/helper.go @@ -231,7 +231,15 @@ func putBuffer(buf *bytes.Buffer) { bufferPool.Put(buf) } -func DoHelper(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*relaymodel.Usage, *model.RequestDetail, *relaymodel.ErrorWithStatusCode) { +func DoHelper( + a adaptor.Adaptor, + c *gin.Context, + meta *meta.Meta, +) ( + *relaymodel.Usage, + *model.RequestDetail, + *relaymodel.ErrorWithStatusCode, +) { log := middleware.GetLogger(c) detail := model.RequestDetail{} diff --git a/service/aiproxy/relay/model/misc.go b/service/aiproxy/relay/model/misc.go index ff4029eeb73..8fa8bd39133 100644 --- a/service/aiproxy/relay/model/misc.go +++ b/service/aiproxy/relay/model/misc.go @@ -2,6 +2,9 @@ package model import ( "fmt" + + json "github.com/json-iterator/go" + "github.com/labring/sealos/service/aiproxy/common/conv" ) type Usage struct { @@ -11,14 +14,18 @@ type Usage struct { } type Error struct { - Code any `json:"code"` - Message string `json:"message"` - Type string `json:"type"` - Param string `json:"param"` + Code any `json:"code,omitempty"` + Message string `json:"message,omitempty"` + Type string `json:"type,omitempty"` + Param string `json:"param,omitempty"` } func (e *Error) String() string { - return fmt.Sprintf("code: %v, message: %s, type: %s, param: %s", e.Code, e.Message, e.Type, e.Param) + jsonBuf, err := json.Marshal(e) + if err != nil { + return fmt.Sprintf("code: %v, message: %s, type: %s, param: %s", e.Code, e.Message, e.Type, e.Param) + } + return conv.BytesToString(jsonBuf) } func (e *Error) Error() string { @@ -29,7 +36,3 @@ type ErrorWithStatusCode struct { Error Error `json:"error"` StatusCode int `json:"-"` } - -func (e *ErrorWithStatusCode) String() string { - return fmt.Sprintf("%s, status_code: %d", e.Error.String(), e.StatusCode) -} From 28fb7d88133abd674edb267eaa70d26dda15b812 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 2 Jan 2025 22:39:23 +0800 Subject: [PATCH 071/167] fix: token not fount err log --- service/aiproxy/model/token.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/service/aiproxy/model/token.go b/service/aiproxy/model/token.go index 5b82ce7e092..11be7cf5cdc 100644 --- a/service/aiproxy/model/token.go +++ b/service/aiproxy/model/token.go @@ -197,10 +197,10 @@ func ValidateAndGetToken(key string) (token *TokenCache, err error) { } token, err = CacheGetTokenByKey(key) if err != nil { - log.Error("get token from cache failed: " + err.Error()) if errors.Is(err, gorm.ErrRecordNotFound) { return nil, errors.New("invalid token") } + log.Error("get token from cache failed: " + err.Error()) return nil, errors.New("token validation failed") } switch token.Status { From 1c9d1784985f065b79229a63ae943a1917ffe673 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 3 Jan 2025 10:17:07 +0800 Subject: [PATCH 072/167] fix: if err resp is not json, replay raw content --- service/aiproxy/relay/utils/error.go | 38 ++++------------------------ 1 file changed, 5 insertions(+), 33 deletions(-) diff --git a/service/aiproxy/relay/utils/error.go b/service/aiproxy/relay/utils/error.go index c70ce99eeaf..5f41c330e94 100644 --- a/service/aiproxy/relay/utils/error.go +++ b/service/aiproxy/relay/utils/error.go @@ -5,13 +5,11 @@ import ( "io" "net/http" "strconv" - "strings" json "github.com/json-iterator/go" "github.com/labring/sealos/service/aiproxy/common/conv" "github.com/labring/sealos/service/aiproxy/relay/meta" "github.com/labring/sealos/service/aiproxy/relay/model" - "github.com/labring/sealos/service/aiproxy/relay/relaymode" ) type GeneralErrorResponse struct { @@ -60,7 +58,7 @@ const ( ErrorCodeBadResponse = "bad_response" ) -func RelayErrorHandler(meta *meta.Meta, resp *http.Response) *model.ErrorWithStatusCode { +func RelayErrorHandler(_ *meta.Meta, resp *http.Response) *model.ErrorWithStatusCode { if resp == nil { return &model.ErrorWithStatusCode{ StatusCode: 500, @@ -71,36 +69,7 @@ func RelayErrorHandler(meta *meta.Meta, resp *http.Response) *model.ErrorWithSta }, } } - switch meta.Mode { - case relaymode.Rerank: - return RerankErrorHandler(resp) - default: - return RelayDefaultErrorHanlder(resp) - } -} - -func RerankErrorHandler(resp *http.Response) *model.ErrorWithStatusCode { - defer resp.Body.Close() - respBody, err := io.ReadAll(resp.Body) - if err != nil { - return &model.ErrorWithStatusCode{ - StatusCode: resp.StatusCode, - Error: model.Error{ - Message: err.Error(), - Type: ErrorTypeUpstream, - Code: ErrorCodeBadResponse, - }, - } - } - trimmedRespBody := strings.Trim(conv.BytesToString(respBody), "\"") - return &model.ErrorWithStatusCode{ - StatusCode: resp.StatusCode, - Error: model.Error{ - Message: trimmedRespBody, - Type: ErrorTypeUpstream, - Code: ErrorCodeBadResponse, - }, - } + return RelayDefaultErrorHanlder(resp) } func RelayDefaultErrorHanlder(resp *http.Response) *model.ErrorWithStatusCode { @@ -126,11 +95,14 @@ func RelayDefaultErrorHanlder(resp *http.Response) *model.ErrorWithStatusCode { Param: strconv.Itoa(resp.StatusCode), }, } + var errResponse GeneralErrorResponse err = json.Unmarshal(respBody, &errResponse) if err != nil { + ErrorWithStatusCode.Error.Message = conv.BytesToString(respBody) return ErrorWithStatusCode } + if errResponse.Error.Message != "" { // OpenAI format error, so we override the default one ErrorWithStatusCode.Error = errResponse.Error From 01c8b2efa9f31faa334d52b14316d5f4628108e2 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 3 Jan 2025 10:27:01 +0800 Subject: [PATCH 073/167] fix: do not save same response body and content --- service/aiproxy/relay/controller/helper.go | 18 +++--------------- service/aiproxy/relay/model/misc.go | 8 ++++++++ 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/service/aiproxy/relay/controller/helper.go b/service/aiproxy/relay/controller/helper.go index 01254f0d344..835b2e56d70 100644 --- a/service/aiproxy/relay/controller/helper.go +++ b/service/aiproxy/relay/controller/helper.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "errors" - "io" "net/http" "sync" "time" @@ -256,10 +255,9 @@ func DoHelper( // 3. Handle error response if isErrorHappened(resp) { - if err := handleErrorResponse(resp, &detail); err != nil { - return nil, &detail, err - } - return nil, &detail, utils.RelayErrorHandler(meta, resp) + relayErr := utils.RelayErrorHandler(meta, resp) + // detail.ResponseBody = relayErr.JSON() + return nil, &detail, relayErr } // 4. Handle success response @@ -348,16 +346,6 @@ func doRequest(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta, req *http.Req return resp, nil } -func handleErrorResponse(resp *http.Response, detail *model.RequestDetail) *relaymodel.ErrorWithStatusCode { - respBody, err := io.ReadAll(resp.Body) - if err != nil { - return openai.ErrorWrapperWithMessage("read response body failed: "+err.Error(), "read_response_body_failed", http.StatusBadRequest) - } - detail.ResponseBody = conv.BytesToString(respBody) - resp.Body = io.NopCloser(bytes.NewReader(respBody)) - return nil -} - func handleSuccessResponse(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta, resp *http.Response, detail *model.RequestDetail) (*relaymodel.Usage, *relaymodel.ErrorWithStatusCode) { buf := getBuffer() defer putBuffer(buf) diff --git a/service/aiproxy/relay/model/misc.go b/service/aiproxy/relay/model/misc.go index 8fa8bd39133..9ceb436164b 100644 --- a/service/aiproxy/relay/model/misc.go +++ b/service/aiproxy/relay/model/misc.go @@ -36,3 +36,11 @@ type ErrorWithStatusCode struct { Error Error `json:"error"` StatusCode int `json:"-"` } + +func (e *ErrorWithStatusCode) JSON() string { + jsonBuf, err := json.Marshal(e) + if err != nil { + return "" + } + return conv.BytesToString(jsonBuf) +} From 1cf6c8f15e4a2f1e2bbb4ef630cdf92d6d60ffde Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 3 Jan 2025 15:28:15 +0800 Subject: [PATCH 074/167] fix: save resp json or empty --- service/aiproxy/controller/channel-test.go | 2 +- service/aiproxy/relay/controller/handle.go | 2 +- service/aiproxy/relay/controller/helper.go | 2 +- service/aiproxy/relay/model/misc.go | 22 +++++++++++++--------- 4 files changed, 16 insertions(+), 12 deletions(-) diff --git a/service/aiproxy/controller/channel-test.go b/service/aiproxy/controller/channel-test.go index 4294b3f5be3..1685750cb9d 100644 --- a/service/aiproxy/controller/channel-test.go +++ b/service/aiproxy/controller/channel-test.go @@ -63,7 +63,7 @@ func testSingleModel(channel *model.Channel, modelName string) (*model.ChannelTe respStr = w.Body.String() code = w.Code } else { - respStr = bizErr.Error.String() + respStr = bizErr.Error.JSONOrEmpty() code = bizErr.StatusCode } diff --git a/service/aiproxy/relay/controller/handle.go b/service/aiproxy/relay/controller/handle.go index a027960062c..68cb02cbcba 100644 --- a/service/aiproxy/relay/controller/handle.go +++ b/service/aiproxy/relay/controller/handle.go @@ -99,7 +99,7 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa meta, preCheckReq.InputPrice, preCheckReq.OutputPrice, - respErr.Error.String(), + respErr.Error.JSONOrEmpty(), detail, ) return respErr diff --git a/service/aiproxy/relay/controller/helper.go b/service/aiproxy/relay/controller/helper.go index 835b2e56d70..3a5c3971b07 100644 --- a/service/aiproxy/relay/controller/helper.go +++ b/service/aiproxy/relay/controller/helper.go @@ -256,7 +256,7 @@ func DoHelper( // 3. Handle error response if isErrorHappened(resp) { relayErr := utils.RelayErrorHandler(meta, resp) - // detail.ResponseBody = relayErr.JSON() + detail.ResponseBody = relayErr.JSONOrEmpty() return nil, &detail, relayErr } diff --git a/service/aiproxy/relay/model/misc.go b/service/aiproxy/relay/model/misc.go index 9ceb436164b..7f06c440df8 100644 --- a/service/aiproxy/relay/model/misc.go +++ b/service/aiproxy/relay/model/misc.go @@ -1,8 +1,6 @@ package model import ( - "fmt" - json "github.com/json-iterator/go" "github.com/labring/sealos/service/aiproxy/common/conv" ) @@ -20,24 +18,30 @@ type Error struct { Param string `json:"param,omitempty"` } -func (e *Error) String() string { +func (e *Error) IsEmpty() bool { + return e == nil || (e.Code == nil && e.Message == "" && e.Type == "" && e.Param == "") +} + +func (e *Error) JSONOrEmpty() string { + if e.IsEmpty() { + return "" + } jsonBuf, err := json.Marshal(e) if err != nil { - return fmt.Sprintf("code: %v, message: %s, type: %s, param: %s", e.Code, e.Message, e.Type, e.Param) + return "" } return conv.BytesToString(jsonBuf) } -func (e *Error) Error() string { - return e.String() -} - type ErrorWithStatusCode struct { Error Error `json:"error"` StatusCode int `json:"-"` } -func (e *ErrorWithStatusCode) JSON() string { +func (e *ErrorWithStatusCode) JSONOrEmpty() string { + if e.Error.IsEmpty() { + return "" + } jsonBuf, err := json.Marshal(e) if err != nil { return "" From 6f36e27ffe1b505cb3e4d39c300eff58d37853ac Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 3 Jan 2025 15:58:40 +0800 Subject: [PATCH 075/167] feat: sort distinct values --- service/aiproxy/model/log.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index 4e2cfc8d005..06c283c1e69 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -1,8 +1,10 @@ package model import ( + "cmp" "errors" "fmt" + "slices" "strings" "time" @@ -674,7 +676,7 @@ func getChartData(group string, start, end time.Time, tokenName, modelName strin return chartData, err } -func getLogDistinctValues[T any](field string, group string, start, end time.Time) ([]T, error) { +func getLogDistinctValues[T cmp.Ordered](field string, group string, start, end time.Time) ([]T, error) { var values []T query := LogDB. Model(&Log{}). @@ -692,7 +694,11 @@ func getLogDistinctValues[T any](field string, group string, start, end time.Tim } err := query.Pluck(field, &values).Error - return values, err + if err != nil { + return nil, err + } + slices.Sort(values) + return values, nil } func sumTotalCount(chartData []*HourlyChartData) int64 { From 247b3ddcc7db76269cf4be66fc63fd05bf85de41 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sat, 4 Jan 2025 21:03:20 +0800 Subject: [PATCH 076/167] fix: token models --- service/aiproxy/controller/model.go | 47 +++++++++++++++++------------ service/aiproxy/middleware/auth.go | 28 +++++++++++++++-- service/aiproxy/model/cache.go | 28 +++++++++++++++-- 3 files changed, 78 insertions(+), 25 deletions(-) diff --git a/service/aiproxy/controller/model.go b/service/aiproxy/controller/model.go index 2db6be6c1c1..c39baff7eb9 100644 --- a/service/aiproxy/controller/model.go +++ b/service/aiproxy/controller/model.go @@ -10,6 +10,7 @@ import ( "github.com/gin-gonic/gin" json "github.com/json-iterator/go" "github.com/labring/sealos/service/aiproxy/common/config" + "github.com/labring/sealos/service/aiproxy/common/ctxkey" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/channeltype" @@ -190,19 +191,22 @@ func ChannelEnabledModelsByType(c *gin.Context) { } func ListModels(c *gin.Context) { - models := model.CacheGetEnabledModelConfigs() - - availableOpenAIModels := make([]*OpenAIModels, len(models)) - - for idx, model := range models { - availableOpenAIModels[idx] = &OpenAIModels{ - ID: model.Model, - Object: "model", - Created: 1626777600, - OwnedBy: string(model.Owner), - Root: model.Model, - Permission: permission, - Parent: nil, + modelConfigsMap := model.CacheGetEnabledModelConfigsMap() + token := c.MustGet(ctxkey.Token).(*model.TokenCache) + + availableOpenAIModels := make([]*OpenAIModels, 0, len(token.Models)) + + for _, model := range token.Models { + if mc, ok := modelConfigsMap[model]; ok { + availableOpenAIModels = append(availableOpenAIModels, &OpenAIModels{ + ID: model, + Object: "model", + Created: 1626777600, + OwnedBy: string(mc.Owner), + Root: model, + Permission: permission, + Parent: nil, + }) } } @@ -214,10 +218,15 @@ func ListModels(c *gin.Context) { func RetrieveModel(c *gin.Context) { modelName := c.Param("model") - enabledModels := model.GetEnabledModel2Channels() - model, ok := model.CacheGetModelConfig(modelName) - if _, exist := enabledModels[modelName]; !exist || !ok { + modelConfigsMap := model.CacheGetEnabledModelConfigsMap() + mc, ok := modelConfigsMap[modelName] + if ok { + token := c.MustGet(ctxkey.Token).(*model.TokenCache) + ok = slices.Contains(token.Models, modelName) + } + + if !ok { c.JSON(200, gin.H{ "error": &relaymodel.Error{ Message: fmt.Sprintf("the model '%s' does not exist", modelName), @@ -230,11 +239,11 @@ func RetrieveModel(c *gin.Context) { } c.JSON(200, &OpenAIModels{ - ID: model.Model, + ID: modelName, Object: "model", Created: 1626777600, - OwnedBy: string(model.Owner), - Root: model.Model, + OwnedBy: string(mc.Owner), + Root: modelName, Permission: permission, Parent: nil, }) diff --git a/service/aiproxy/middleware/auth.go b/service/aiproxy/middleware/auth.go index b52acd57ebe..0e10ccb3b73 100644 --- a/service/aiproxy/middleware/auth.go +++ b/service/aiproxy/middleware/auth.go @@ -88,9 +88,8 @@ func TokenAuth(c *gin.Context) { return } SetLogGroupFields(log.Data, group) - if len(token.Models) == 0 { - token.Models = model.CacheGetEnabledModels() - } + + storeTokenModels(token) c.Set(ctxkey.Group, group) c.Set(ctxkey.Token, token) @@ -98,6 +97,29 @@ func TokenAuth(c *gin.Context) { c.Next() } +func sliceFilter[T any](s []T, fn func(T) bool) []T { + i := 0 + for _, v := range s { + if fn(v) { + s[i] = v + i++ + } + } + return s[:i] +} + +func storeTokenModels(token *model.TokenCache) { + if len(token.Models) == 0 { + token.Models = model.CacheGetEnabledModels() + } else { + enabledModelsMap := model.CacheGetEnabledModelsMap() + token.Models = sliceFilter(token.Models, func(m string) bool { + _, ok := enabledModelsMap[m] + return ok + }) + } +} + func SetLogFieldsFromMeta(m *meta.Meta, fields logrus.Fields) { SetLogRequestIDField(fields, m.RequestID) diff --git a/service/aiproxy/model/cache.go b/service/aiproxy/model/cache.go index 235acff60d5..486d9ad0a61 100644 --- a/service/aiproxy/model/cache.go +++ b/service/aiproxy/model/cache.go @@ -329,7 +329,9 @@ func CacheGetGroupModelTPM(id string, model string) (int64, error) { var ( enabledModel2channels map[string][]*Channel enabledModels []string + enabledModelsMap map[string]struct{} enabledModelConfigs []*ModelConfig + enabledModelConfigsMap map[string]*ModelConfig enabledChannelType2ModelConfigs map[int][]*ModelConfig enabledChannelID2channel map[int]*Channel channelSyncLock sync.RWMutex @@ -349,6 +351,12 @@ func CacheGetEnabledModels() []string { return enabledModels } +func CacheGetEnabledModelsMap() map[string]struct{} { + channelSyncLock.RLock() + defer channelSyncLock.RUnlock() + return enabledModelsMap +} + // CacheGetEnabledChannelType2ModelConfigs returns a map of channel type to enabled model configs func CacheGetEnabledChannelType2ModelConfigs() map[int][]*ModelConfig { channelSyncLock.RLock() @@ -363,6 +371,12 @@ func CacheGetEnabledModelConfigs() []*ModelConfig { return enabledModelConfigs } +func CacheGetEnabledModelConfigsMap() map[string]*ModelConfig { + channelSyncLock.RLock() + defer channelSyncLock.RUnlock() + return enabledModelConfigsMap +} + func CacheGetEnabledChannelByID(id int) (*Channel, bool) { channelSyncLock.RLock() defer channelSyncLock.RUnlock() @@ -398,13 +412,15 @@ func InitModelConfigAndChannelCache() error { newEnabledChannelType2ModelConfigs := buildChannelTypeToModelConfigsMap(newEnabledChannels) // Build enabled models and configs lists - newEnabledModels, newEnabledModelConfigs := buildEnabledModelsAndConfigs(newEnabledChannelType2ModelConfigs) + newEnabledModels, newEnabledModelsMap, newEnabledModelConfigs, newEnabledModelConfigsMap := buildEnabledModelsAndConfigs(newEnabledChannelType2ModelConfigs) // Update global cache atomically updateGlobalCache( newEnabledModel2channels, newEnabledModels, + newEnabledModelsMap, newEnabledModelConfigs, + newEnabledModelConfigsMap, newEnabledChannelID2channel, newEnabledChannelType2ModelConfigs, ) @@ -532,10 +548,11 @@ func buildChannelTypeToModelConfigsMap(channels []*Channel) map[int][]*ModelConf return typeMap } -func buildEnabledModelsAndConfigs(typeMap map[int][]*ModelConfig) ([]string, []*ModelConfig) { +func buildEnabledModelsAndConfigs(typeMap map[int][]*ModelConfig) ([]string, map[string]struct{}, []*ModelConfig, map[string]*ModelConfig) { models := make([]string, 0) configs := make([]*ModelConfig, 0) appended := make(map[string]struct{}) + modelConfigsMap := make(map[string]*ModelConfig) for _, modelConfigs := range typeMap { for _, config := range modelConfigs { @@ -545,13 +562,14 @@ func buildEnabledModelsAndConfigs(typeMap map[int][]*ModelConfig) ([]string, []* models = append(models, config.Model) configs = append(configs, config) appended[config.Model] = struct{}{} + modelConfigsMap[config.Model] = config } } slices.Sort(models) slices.SortStableFunc(configs, SortModelConfigsFunc) - return models, configs + return models, appended, configs, modelConfigsMap } func SortModelConfigsFunc(i, j *ModelConfig) int { @@ -579,7 +597,9 @@ func SortModelConfigsFunc(i, j *ModelConfig) int { func updateGlobalCache( newEnabledModel2channels map[string][]*Channel, newEnabledModels []string, + newEnabledModelsMap map[string]struct{}, newEnabledModelConfigs []*ModelConfig, + newEnabledModelConfigsMap map[string]*ModelConfig, newEnabledChannelID2channel map[int]*Channel, newEnabledChannelType2ModelConfigs map[int][]*ModelConfig, ) { @@ -587,7 +607,9 @@ func updateGlobalCache( defer channelSyncLock.Unlock() enabledModel2channels = newEnabledModel2channels enabledModels = newEnabledModels + enabledModelsMap = newEnabledModelsMap enabledModelConfigs = newEnabledModelConfigs + enabledModelConfigsMap = newEnabledModelConfigsMap enabledChannelID2channel = newEnabledChannelID2channel enabledChannelType2ModelConfigs = newEnabledChannelType2ModelConfigs } From 75a9ca49cd0f358f3cdda202439dc9a32690b325 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sat, 4 Jan 2025 23:48:39 +0800 Subject: [PATCH 077/167] feat: redis clean expired cache --- service/aiproxy/common/rpmlimit/rate-limit.go | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/service/aiproxy/common/rpmlimit/rate-limit.go b/service/aiproxy/common/rpmlimit/rate-limit.go index 0c6c30da93e..3c186d707c8 100644 --- a/service/aiproxy/common/rpmlimit/rate-limit.go +++ b/service/aiproxy/common/rpmlimit/rate-limit.go @@ -20,7 +20,7 @@ const ( // 2. 列表长度代表当前窗口内的请求数 // 3. 如果请求数未达到限制,直接添加新请求并返回成功 // 4. 如果达到限制,则检查最老的请求是否已经过期 -// 5. 如果最老的请求已过期,移除它并添加新请求,否则拒绝新请求 +// 5. 如果最老的请求已过期,最多移除3个过期请求并添加新请求,否则拒绝新请求 // 6. 通过EXPIRE命令设置键的过期时间,自动清理过期数据 var luaScript = ` local key = KEYS[1] @@ -35,11 +35,19 @@ if count < max_requests then redis.call('PEXPIRE', key, window) return 1 else - local oldest = redis.call('LINDEX', key, -1) - if current_time - tonumber(oldest) >= window then + local removed = 0 + for i = 1, 3 do + local oldest = redis.call('LINDEX', key, -1) + if current_time - tonumber(oldest) >= window then + redis.call('RPOP', key) + removed = removed + 1 + else + break + end + end + if removed > 0 then redis.call('LPUSH', key, current_time) - redis.call('LTRIM', key, 0, max_requests - 1) - redis.call('PEXPIRE', key, window) + redis.call('PEXPIRE', key, window) return 1 else return 0 From 66f834d583b38da4ebb1ec83e5a43b1199d790ee Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sun, 5 Jan 2025 00:57:15 +0800 Subject: [PATCH 078/167] feat: atomic model cache --- service/aiproxy/common/ctxkey/key.go | 2 + service/aiproxy/controller/channel-billing.go | 7 +- service/aiproxy/controller/channel-test.go | 27 ++- service/aiproxy/controller/model.go | 19 +- service/aiproxy/controller/relay.go | 17 +- service/aiproxy/middleware/auth.go | 23 ++- service/aiproxy/middleware/distributor.go | 22 ++- service/aiproxy/middleware/request-id.go | 4 + service/aiproxy/middleware/utils.go | 3 +- service/aiproxy/model/cache.go | 161 +++++------------ service/aiproxy/relay/controller/consume.go | 171 ++++++++++++++++++ .../controller/{helper.go => dohelper.go} | 160 ---------------- service/aiproxy/relay/controller/image.go | 35 ++-- service/aiproxy/relay/controller/price.go | 29 +++ service/aiproxy/relay/controller/rerank.go | 3 +- service/aiproxy/relay/controller/stt.go | 3 +- service/aiproxy/relay/controller/text.go | 3 +- service/aiproxy/relay/controller/tts.go | 3 +- service/aiproxy/relay/meta/meta.go | 18 +- service/aiproxy/relay/price/image.go | 45 ----- service/aiproxy/relay/price/model.go | 28 --- service/aiproxy/relay/utils/testreq.go | 17 +- 22 files changed, 372 insertions(+), 428 deletions(-) create mode 100644 service/aiproxy/relay/controller/consume.go rename service/aiproxy/relay/controller/{helper.go => dohelper.go} (60%) create mode 100644 service/aiproxy/relay/controller/price.go delete mode 100644 service/aiproxy/relay/price/image.go delete mode 100644 service/aiproxy/relay/price/model.go diff --git a/service/aiproxy/common/ctxkey/key.go b/service/aiproxy/common/ctxkey/key.go index a986a4ebc18..83ffae25817 100644 --- a/service/aiproxy/common/ctxkey/key.go +++ b/service/aiproxy/common/ctxkey/key.go @@ -5,4 +5,6 @@ const ( Token = "token" OriginalModel = "original_model" RequestID = "X-Request-Id" + ModelCaches = "model_caches" + ModelConfig = "model_config" ) diff --git a/service/aiproxy/controller/channel-billing.go b/service/aiproxy/controller/channel-billing.go index 4124642655b..2ea6b241876 100644 --- a/service/aiproxy/controller/channel-billing.go +++ b/service/aiproxy/controller/channel-billing.go @@ -7,7 +7,6 @@ import ( "time" "github.com/labring/sealos/service/aiproxy/common/balance" - "github.com/labring/sealos/service/aiproxy/common/ctxkey" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor" @@ -105,7 +104,7 @@ func AutomaticallyUpdateChannels(frequency int) { // subscription func GetSubscription(c *gin.Context) { - group := c.MustGet(ctxkey.Group).(*model.GroupCache) + group := middleware.GetGroup(c) b, _, err := balance.Default.GetGroupRemainBalance(c, group.ID) if err != nil { log.Errorf("get group (%s) balance failed: %s", group.ID, err) @@ -115,7 +114,7 @@ func GetSubscription(c *gin.Context) { }) return } - token := c.MustGet(ctxkey.Token).(*model.TokenCache) + token := middleware.GetToken(c) quota := token.Quota if quota <= 0 { quota = b @@ -128,6 +127,6 @@ func GetSubscription(c *gin.Context) { } func GetUsage(c *gin.Context) { - token := c.MustGet(ctxkey.Token).(*model.TokenCache) + token := middleware.GetToken(c) c.JSON(http.StatusOK, openai.UsageResponse{TotalUsage: token.UsedAmount / 7 * 100}) } diff --git a/service/aiproxy/controller/channel-test.go b/service/aiproxy/controller/channel-test.go index 1685750cb9d..2ac9e26dd0a 100644 --- a/service/aiproxy/controller/channel-test.go +++ b/service/aiproxy/controller/channel-test.go @@ -29,8 +29,12 @@ import ( const channelTestRequestID = "channel-test" // testSingleModel tests a single model in the channel -func testSingleModel(channel *model.Channel, modelName string) (*model.ChannelTest, error) { - body, mode, err := utils.BuildRequest(modelName) +func testSingleModel(mc *model.ModelCaches, channel *model.Channel, modelName string) (*model.ChannelTest, error) { + modelConfig, ok := mc.ModelConfigMap[modelName] + if !ok { + return nil, errors.New(modelName + " model config not found") + } + body, mode, err := utils.BuildRequest(modelConfig) if err != nil { return nil, err } @@ -48,6 +52,7 @@ func testSingleModel(channel *model.Channel, modelName string) (*model.ChannelTe channel, mode, modelName, + modelConfig, meta.WithRequestID(channelTestRequestID), meta.WithChannelTest(true), ) @@ -116,7 +121,7 @@ func TestChannel(c *gin.Context) { return } - ct, err := testSingleModel(channel, modelName) + ct, err := testSingleModel(model.LoadModelCaches(), channel, modelName) if err != nil { log.Errorf("failed to test channel %s(%d) model %s: %s", channel.Name, channel.ID, modelName, err.Error()) c.JSON(http.StatusOK, middleware.APIResponse{ @@ -142,8 +147,8 @@ type testResult struct { Success bool `json:"success"` } -func processTestResult(channel *model.Channel, modelName string, returnSuccess bool, successResponseBody bool) *testResult { - ct, err := testSingleModel(channel, modelName) +func processTestResult(mc *model.ModelCaches, channel *model.Channel, modelName string, returnSuccess bool, successResponseBody bool) *testResult { + ct, err := testSingleModel(mc, channel, modelName) e := &utils.UnsupportedModelTypeError{} if errors.As(err, &e) { @@ -216,6 +221,8 @@ func TestChannelModels(c *gin.Context) { models[i], models[j] = models[j], models[i] }) + mc := model.LoadModelCaches() + for _, modelName := range models { wg.Add(1) semaphore <- struct{}{} @@ -224,7 +231,7 @@ func TestChannelModels(c *gin.Context) { defer wg.Done() defer func() { <-semaphore }() - result := processTestResult(channel, model, returnSuccess, successResponseBody) + result := processTestResult(mc, channel, model, returnSuccess, successResponseBody) if result == nil { return } @@ -299,6 +306,8 @@ func TestAllChannels(c *gin.Context) { newChannels[i], newChannels[j] = newChannels[j], newChannels[i] }) + mc := model.LoadModelCaches() + for _, channel := range newChannels { channelHasError := &atomic.Bool{} hasErrorMap[channel.ID] = channelHasError @@ -316,7 +325,7 @@ func TestAllChannels(c *gin.Context) { defer wg.Done() defer func() { <-semaphore }() - result := processTestResult(ch, model, returnSuccess, successResponseBody) + result := processTestResult(mc, ch, model, returnSuccess, successResponseBody) if result == nil { return } @@ -369,6 +378,8 @@ func AutoTestBannedModels() { return } + mc := model.LoadModelCaches() + for modelName, ids := range channels { for _, id := range ids { channel, err := model.LoadChannelByID(int(id)) @@ -376,7 +387,7 @@ func AutoTestBannedModels() { log.Errorf("failed to get channel by model %s: %s", modelName, err.Error()) continue } - result, err := testSingleModel(channel, modelName) + result, err := testSingleModel(mc, channel, modelName) if err != nil { log.Errorf("failed to test channel %s(%d) model %s: %s", channel.Name, channel.ID, modelName, err.Error()) } diff --git a/service/aiproxy/controller/model.go b/service/aiproxy/controller/model.go index c39baff7eb9..0728fb1593d 100644 --- a/service/aiproxy/controller/model.go +++ b/service/aiproxy/controller/model.go @@ -10,7 +10,6 @@ import ( "github.com/gin-gonic/gin" json "github.com/json-iterator/go" "github.com/labring/sealos/service/aiproxy/common/config" - "github.com/labring/sealos/service/aiproxy/common/ctxkey" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/channeltype" @@ -169,11 +168,11 @@ func ChannelDefaultModelsAndMappingByType(c *gin.Context) { } func EnabledModels(c *gin.Context) { - middleware.SuccessResponse(c, model.CacheGetEnabledModelConfigs()) + middleware.SuccessResponse(c, middleware.GetModelCaches(c).EnabledModelConfigs) } func ChannelEnabledModels(c *gin.Context) { - middleware.SuccessResponse(c, model.CacheGetEnabledChannelType2ModelConfigs()) + middleware.SuccessResponse(c, middleware.GetModelCaches(c).EnabledChannelType2ModelConfigs) } func ChannelEnabledModelsByType(c *gin.Context) { @@ -187,17 +186,17 @@ func ChannelEnabledModelsByType(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, "invalid type") return } - middleware.SuccessResponse(c, model.CacheGetEnabledChannelType2ModelConfigs()[channelTypeInt]) + middleware.SuccessResponse(c, middleware.GetModelCaches(c).EnabledChannelType2ModelConfigs[channelTypeInt]) } func ListModels(c *gin.Context) { - modelConfigsMap := model.CacheGetEnabledModelConfigsMap() - token := c.MustGet(ctxkey.Token).(*model.TokenCache) + enabledModelConfigsMap := middleware.GetModelCaches(c).EnabledModelConfigsMap + token := middleware.GetToken(c) availableOpenAIModels := make([]*OpenAIModels, 0, len(token.Models)) for _, model := range token.Models { - if mc, ok := modelConfigsMap[model]; ok { + if mc, ok := enabledModelConfigsMap[model]; ok { availableOpenAIModels = append(availableOpenAIModels, &OpenAIModels{ ID: model, Object: "model", @@ -218,11 +217,11 @@ func ListModels(c *gin.Context) { func RetrieveModel(c *gin.Context) { modelName := c.Param("model") + enabledModelConfigsMap := middleware.GetModelCaches(c).EnabledModelConfigsMap - modelConfigsMap := model.CacheGetEnabledModelConfigsMap() - mc, ok := modelConfigsMap[modelName] + mc, ok := enabledModelConfigsMap[modelName] if ok { - token := c.MustGet(ctxkey.Token).(*model.TokenCache) + token := middleware.GetToken(c) ok = slices.Contains(token.Models, modelName) } diff --git a/service/aiproxy/controller/relay.go b/service/aiproxy/controller/relay.go index edc3bef017a..ac5758042d7 100644 --- a/service/aiproxy/controller/relay.go +++ b/service/aiproxy/controller/relay.go @@ -9,7 +9,6 @@ import ( "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" - "github.com/labring/sealos/service/aiproxy/common/ctxkey" "github.com/labring/sealos/service/aiproxy/middleware" dbmodel "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/monitor" @@ -79,15 +78,15 @@ func RelayHelper(meta *meta.Meta, c *gin.Context, relayController RelayControlle return err, false } -func getChannelWithFallback(model string, failedChannelIDs ...int) (*dbmodel.Channel, error) { - channel, err := dbmodel.CacheGetRandomSatisfiedChannel(model, failedChannelIDs...) +func getChannelWithFallback(cache *dbmodel.ModelCaches, model string, failedChannelIDs ...int) (*dbmodel.Channel, error) { + channel, err := cache.GetRandomSatisfiedChannel(model, failedChannelIDs...) if err == nil { return channel, nil } if !errors.Is(err, dbmodel.ErrChannelsExhausted) { return nil, err } - return dbmodel.CacheGetRandomSatisfiedChannel(model) + return cache.GetRandomSatisfiedChannel(model) } func NewRelay(mode int) func(c *gin.Context) { @@ -103,7 +102,7 @@ func NewRelay(mode int) func(c *gin.Context) { func relay(c *gin.Context, mode int, relayController RelayController) { log := middleware.GetLogger(c) - requestModel := c.MustGet(string(ctxkey.OriginalModel)).(string) + requestModel := middleware.GetOriginalModel(c) ids, err := monitor.GetBannedChannels(c.Request.Context(), requestModel) if err != nil { @@ -117,7 +116,9 @@ func relay(c *gin.Context, mode int, relayController RelayController) { failedChannelIDs = append(failedChannelIDs, int(id)) } - channel, err := getChannelWithFallback(requestModel, failedChannelIDs...) + mc := middleware.GetModelCaches(c) + + channel, err := getChannelWithFallback(mc, requestModel, failedChannelIDs...) if err != nil { c.JSON(http.StatusServiceUnavailable, gin.H{ "error": &model.Error{ @@ -135,13 +136,13 @@ func relay(c *gin.Context, mode int, relayController RelayController) { return } failedChannelIDs = append(failedChannelIDs, channel.ID) - requestID := c.GetString(ctxkey.RequestID) + requestID := middleware.GetRequestID(c) var retryTimes int64 if retry { retryTimes = config.GetRetryTimes() } for i := retryTimes; i > 0; i-- { - newChannel, err := dbmodel.CacheGetRandomSatisfiedChannel(requestModel, failedChannelIDs...) + newChannel, err := mc.GetRandomSatisfiedChannel(requestModel, failedChannelIDs...) if err != nil { if errors.Is(err, dbmodel.ErrChannelsNotFound) { break diff --git a/service/aiproxy/middleware/auth.go b/service/aiproxy/middleware/auth.go index 0e10ccb3b73..4dd278add0f 100644 --- a/service/aiproxy/middleware/auth.go +++ b/service/aiproxy/middleware/auth.go @@ -89,14 +89,29 @@ func TokenAuth(c *gin.Context) { } SetLogGroupFields(log.Data, group) - storeTokenModels(token) + modelCaches := model.LoadModelCaches() + + storeTokenModels(token, modelCaches) c.Set(ctxkey.Group, group) c.Set(ctxkey.Token, token) + c.Set(ctxkey.ModelCaches, modelCaches) c.Next() } +func GetGroup(c *gin.Context) *model.GroupCache { + return c.MustGet(ctxkey.Group).(*model.GroupCache) +} + +func GetToken(c *gin.Context) *model.TokenCache { + return c.MustGet(ctxkey.Token).(*model.TokenCache) +} + +func GetModelCaches(c *gin.Context) *model.ModelCaches { + return c.MustGet(ctxkey.ModelCaches).(*model.ModelCaches) +} + func sliceFilter[T any](s []T, fn func(T) bool) []T { i := 0 for _, v := range s { @@ -108,11 +123,11 @@ func sliceFilter[T any](s []T, fn func(T) bool) []T { return s[:i] } -func storeTokenModels(token *model.TokenCache) { +func storeTokenModels(token *model.TokenCache, modelCaches *model.ModelCaches) { if len(token.Models) == 0 { - token.Models = model.CacheGetEnabledModels() + token.Models = modelCaches.EnabledModels } else { - enabledModelsMap := model.CacheGetEnabledModelsMap() + enabledModelsMap := modelCaches.EnabledModelsMap token.Models = sliceFilter(token.Models, func(m string) bool { _, ok := enabledModelsMap[m] return ok diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 911b4df007c..f4322e2975c 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -97,7 +97,7 @@ func Distribute(c *gin.Context) { log := GetLogger(c) - group := c.MustGet(ctxkey.Group).(*model.GroupCache) + group := GetGroup(c) requestModel, err := getRequestModel(c) if err != nil { @@ -111,7 +111,7 @@ func Distribute(c *gin.Context) { SetLogModelFields(log.Data, requestModel) - token := c.MustGet(ctxkey.Token).(*model.TokenCache) + token := GetToken(c) if len(token.Models) == 0 || !slices.Contains(token.Models, requestModel) { abortWithMessage(c, http.StatusForbidden, @@ -122,7 +122,7 @@ func Distribute(c *gin.Context) { return } - mc, ok := model.CacheGetModelConfig(requestModel) + mc, ok := GetModelCaches(c).ModelConfigMap[requestModel] if !ok { abortWithMessage(c, http.StatusServiceUnavailable, requestModel+" is not available") return @@ -133,19 +133,29 @@ func Distribute(c *gin.Context) { } c.Set(ctxkey.OriginalModel, requestModel) + c.Set(ctxkey.ModelConfig, mc) c.Next() } +func GetOriginalModel(c *gin.Context) string { + return c.GetString(ctxkey.OriginalModel) +} + +func GetModelConfig(c *gin.Context) *model.ModelConfig { + return c.MustGet(ctxkey.ModelConfig).(*model.ModelConfig) +} + func NewMetaByContext(c *gin.Context, channel *model.Channel, modelName string, mode int) *meta.Meta { - requestID := c.GetString(ctxkey.RequestID) - group := c.MustGet(ctxkey.Group).(*model.GroupCache) - token := c.MustGet(ctxkey.Token).(*model.TokenCache) + requestID := GetRequestID(c) + group := GetGroup(c) + token := GetToken(c) return meta.NewMeta( channel, mode, modelName, + GetModelConfig(c), meta.WithRequestID(requestID), meta.WithGroup(group), meta.WithToken(token), diff --git a/service/aiproxy/middleware/request-id.go b/service/aiproxy/middleware/request-id.go index 4296b3490ec..17ba4516bb7 100644 --- a/service/aiproxy/middleware/request-id.go +++ b/service/aiproxy/middleware/request-id.go @@ -20,6 +20,10 @@ func SetRequestID(c *gin.Context, id string) { SetLogRequestIDField(log.Data, id) } +func GetRequestID(c *gin.Context) string { + return c.GetString(ctxkey.RequestID) +} + func RequestID(c *gin.Context) { id := GenRequestID() SetRequestID(c, id) diff --git a/service/aiproxy/middleware/utils.go b/service/aiproxy/middleware/utils.go index 4fc44655207..96e5534679e 100644 --- a/service/aiproxy/middleware/utils.go +++ b/service/aiproxy/middleware/utils.go @@ -6,7 +6,6 @@ import ( "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/ctxkey" "github.com/labring/sealos/service/aiproxy/relay/model" ) @@ -22,7 +21,7 @@ func abortWithMessage(c *gin.Context, statusCode int, message string) { GetLogger(c).Error(message) c.JSON(statusCode, gin.H{ "error": &model.Error{ - Message: MessageWithRequestID(message, c.GetString(ctxkey.RequestID)), + Message: MessageWithRequestID(message, GetRequestID(c)), Type: ErrorTypeAIPROXY, }, }) diff --git a/service/aiproxy/model/cache.go b/service/aiproxy/model/cache.go index 486d9ad0a61..d2213905943 100644 --- a/service/aiproxy/model/cache.go +++ b/service/aiproxy/model/cache.go @@ -9,6 +9,7 @@ import ( "slices" "sort" "sync" + "sync/atomic" "time" json "github.com/json-iterator/go" @@ -326,67 +327,33 @@ func CacheGetGroupModelTPM(id string, model string) (int64, error) { return tpm, nil } -var ( - enabledModel2channels map[string][]*Channel - enabledModels []string - enabledModelsMap map[string]struct{} - enabledModelConfigs []*ModelConfig - enabledModelConfigsMap map[string]*ModelConfig - enabledChannelType2ModelConfigs map[int][]*ModelConfig - enabledChannelID2channel map[int]*Channel - channelSyncLock sync.RWMutex -) - -// GetEnabledModel2Channels returns a map of model name to enabled channels -func GetEnabledModel2Channels() map[string][]*Channel { - channelSyncLock.RLock() - defer channelSyncLock.RUnlock() - return enabledModel2channels -} - -// CacheGetEnabledModels returns a list of enabled model names -func CacheGetEnabledModels() []string { - channelSyncLock.RLock() - defer channelSyncLock.RUnlock() - return enabledModels +// read-only cache +// +//nolint:revive +type ModelCaches struct { + ModelConfigMap map[string]*ModelConfig + EnabledModel2channels map[string][]*Channel + EnabledModels []string + EnabledModelsMap map[string]struct{} + EnabledModelConfigs []*ModelConfig + EnabledModelConfigsMap map[string]*ModelConfig + EnabledChannelType2ModelConfigs map[int][]*ModelConfig + EnabledChannelID2channel map[int]*Channel } -func CacheGetEnabledModelsMap() map[string]struct{} { - channelSyncLock.RLock() - defer channelSyncLock.RUnlock() - return enabledModelsMap -} - -// CacheGetEnabledChannelType2ModelConfigs returns a map of channel type to enabled model configs -func CacheGetEnabledChannelType2ModelConfigs() map[int][]*ModelConfig { - channelSyncLock.RLock() - defer channelSyncLock.RUnlock() - return enabledChannelType2ModelConfigs -} - -// CacheGetEnabledModelConfigs returns a list of enabled model configs -func CacheGetEnabledModelConfigs() []*ModelConfig { - channelSyncLock.RLock() - defer channelSyncLock.RUnlock() - return enabledModelConfigs -} +var modelCaches atomic.Pointer[ModelCaches] -func CacheGetEnabledModelConfigsMap() map[string]*ModelConfig { - channelSyncLock.RLock() - defer channelSyncLock.RUnlock() - return enabledModelConfigsMap +func init() { + modelCaches.Store(new(ModelCaches)) } -func CacheGetEnabledChannelByID(id int) (*Channel, bool) { - channelSyncLock.RLock() - defer channelSyncLock.RUnlock() - channel, ok := enabledChannelID2channel[id] - return channel, ok +func LoadModelCaches() *ModelCaches { + return modelCaches.Load() } // InitModelConfigAndChannelCache initializes the channel cache from database func InitModelConfigAndChannelCache() error { - err := initModelConfigCache() + modelConfigMap, err := initializeModelConfigCache() if err != nil { return err } @@ -409,21 +376,22 @@ func InitModelConfigAndChannelCache() error { sortChannelsByPriority(newEnabledModel2channels) // Build channel type to model configs map - newEnabledChannelType2ModelConfigs := buildChannelTypeToModelConfigsMap(newEnabledChannels) + newEnabledChannelType2ModelConfigs := buildChannelTypeToModelConfigsMap(newEnabledChannels, modelConfigMap) // Build enabled models and configs lists newEnabledModels, newEnabledModelsMap, newEnabledModelConfigs, newEnabledModelConfigsMap := buildEnabledModelsAndConfigs(newEnabledChannelType2ModelConfigs) // Update global cache atomically - updateGlobalCache( - newEnabledModel2channels, - newEnabledModels, - newEnabledModelsMap, - newEnabledModelConfigs, - newEnabledModelConfigsMap, - newEnabledChannelID2channel, - newEnabledChannelType2ModelConfigs, - ) + modelCaches.Store(&ModelCaches{ + ModelConfigMap: modelConfigMap, + EnabledModel2channels: newEnabledModel2channels, + EnabledModels: newEnabledModels, + EnabledModelsMap: newEnabledModelsMap, + EnabledModelConfigs: newEnabledModelConfigs, + EnabledModelConfigsMap: newEnabledModelConfigsMap, + EnabledChannelType2ModelConfigs: newEnabledChannelType2ModelConfigs, + EnabledChannelID2channel: newEnabledChannelID2channel, + }) return nil } @@ -471,6 +439,19 @@ func LoadChannelByID(id int) (*Channel, error) { return &channel, nil } +func initializeModelConfigCache() (map[string]*ModelConfig, error) { + modelConfigs, err := GetAllModelConfigs() + if err != nil { + return nil, err + } + newModelConfigMap := make(map[string]*ModelConfig) + for _, modelConfig := range modelConfigs { + newModelConfigMap[modelConfig.Model] = modelConfig + } + + return newModelConfigMap, nil +} + func initializeChannelModels(channel *Channel) { if len(channel.Models) == 0 { channel.Models = config.GetDefaultChannelModels()[channel.Type] @@ -522,7 +503,7 @@ func sortChannelsByPriority(modelMap map[string][]*Channel) { } } -func buildChannelTypeToModelConfigsMap(channels []*Channel) map[int][]*ModelConfig { +func buildChannelTypeToModelConfigsMap(channels []*Channel, modelConfigMap map[string]*ModelConfig) map[int][]*ModelConfig { typeMap := make(map[int][]*ModelConfig) for _, channel := range channels { @@ -532,7 +513,7 @@ func buildChannelTypeToModelConfigsMap(channels []*Channel) map[int][]*ModelConf configs := typeMap[channel.Type] for _, model := range channel.Models { - if config, ok := CacheGetModelConfig(model); ok { + if config, ok := modelConfigMap[model]; ok { configs = append(configs, config) } } @@ -594,26 +575,6 @@ func SortModelConfigsFunc(i, j *ModelConfig) int { return 1 } -func updateGlobalCache( - newEnabledModel2channels map[string][]*Channel, - newEnabledModels []string, - newEnabledModelsMap map[string]struct{}, - newEnabledModelConfigs []*ModelConfig, - newEnabledModelConfigsMap map[string]*ModelConfig, - newEnabledChannelID2channel map[int]*Channel, - newEnabledChannelType2ModelConfigs map[int][]*ModelConfig, -) { - channelSyncLock.Lock() - defer channelSyncLock.Unlock() - enabledModel2channels = newEnabledModel2channels - enabledModels = newEnabledModels - enabledModelsMap = newEnabledModelsMap - enabledModelConfigs = newEnabledModelConfigs - enabledModelConfigsMap = newEnabledModelConfigsMap - enabledChannelID2channel = newEnabledChannelID2channel - enabledChannelType2ModelConfigs = newEnabledChannelType2ModelConfigs -} - func SyncModelConfigAndChannelCache(ctx context.Context, wg *sync.WaitGroup, frequency time.Duration) { defer wg.Done() @@ -653,8 +614,8 @@ var ( ) //nolint:gosec -func CacheGetRandomSatisfiedChannel(model string, ignoreChannel ...int) (*Channel, error) { - _channels := GetEnabledModel2Channels()[model] +func (c *ModelCaches) GetRandomSatisfiedChannel(model string, ignoreChannel ...int) (*Channel, error) { + _channels := c.EnabledModel2channels[model] if len(_channels) == 0 { return nil, ErrChannelsNotFound } @@ -687,31 +648,3 @@ func CacheGetRandomSatisfiedChannel(model string, ignoreChannel ...int) (*Channe return channels[rand.IntN(len(channels))], nil } - -var ( - modelConfigSyncLock sync.RWMutex - modelConfigMap map[string]*ModelConfig -) - -func initModelConfigCache() error { - modelConfigs, err := GetAllModelConfigs() - if err != nil { - return err - } - newModelConfigMap := make(map[string]*ModelConfig) - for _, modelConfig := range modelConfigs { - newModelConfigMap[modelConfig.Model] = modelConfig - } - - modelConfigSyncLock.Lock() - defer modelConfigSyncLock.Unlock() - modelConfigMap = newModelConfigMap - return nil -} - -func CacheGetModelConfig(model string) (*ModelConfig, bool) { - modelConfigSyncLock.RLock() - defer modelConfigSyncLock.RUnlock() - modelConfig, ok := modelConfigMap[model] - return modelConfig, ok -} diff --git a/service/aiproxy/relay/controller/consume.go b/service/aiproxy/relay/controller/consume.go new file mode 100644 index 00000000000..9142b200194 --- /dev/null +++ b/service/aiproxy/relay/controller/consume.go @@ -0,0 +1,171 @@ +package controller + +import ( + "context" + "sync" + + "github.com/labring/sealos/service/aiproxy/common/balance" + "github.com/labring/sealos/service/aiproxy/middleware" + "github.com/labring/sealos/service/aiproxy/model" + "github.com/labring/sealos/service/aiproxy/relay/meta" + relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" + "github.com/shopspring/decimal" + log "github.com/sirupsen/logrus" +) + +var ConsumeWaitGroup sync.WaitGroup + +type PreCheckGroupBalanceReq struct { + InputTokens int + MaxTokens int + InputPrice float64 + OutputPrice float64 +} + +func getPreConsumedAmount(req *PreCheckGroupBalanceReq) float64 { + if req == nil || req.InputPrice == 0 || (req.InputTokens == 0 && req.MaxTokens == 0) { + return 0 + } + preConsumedTokens := int64(req.InputTokens) + if req.MaxTokens != 0 { + preConsumedTokens += int64(req.MaxTokens) + } + return decimal. + NewFromInt(preConsumedTokens). + Mul(decimal.NewFromFloat(req.InputPrice)). + Div(decimal.NewFromInt(PriceUnit)). + InexactFloat64() +} + +func checkGroupBalance(req *PreCheckGroupBalanceReq, meta *meta.Meta, groupRemainBalance float64) bool { + if meta.IsChannelTest { + return true + } + if groupRemainBalance <= 0 { + return false + } + + preConsumedAmount := getPreConsumedAmount(req) + + return groupRemainBalance > preConsumedAmount +} + +func getGroupBalance(ctx context.Context, meta *meta.Meta) (float64, balance.PostGroupConsumer, error) { + if meta.IsChannelTest { + return 0, nil, nil + } + + return balance.Default.GetGroupRemainBalance(ctx, meta.Group.ID) +} + +func postConsumeAmount( + ctx context.Context, + consumeWaitGroup *sync.WaitGroup, + postGroupConsumer balance.PostGroupConsumer, + code int, + usage *relaymodel.Usage, + meta *meta.Meta, + inputPrice, + outputPrice float64, + content string, + requestDetail *model.RequestDetail, +) { + defer func() { + consumeWaitGroup.Done() + if r := recover(); r != nil { + log.Errorf("panic in post consume amount: %v", r) + } + }() + + if meta.IsChannelTest { + return + } + + log := middleware.NewLogger() + middleware.SetLogFieldsFromMeta(meta, log.Data) + + amount := calculateAmount(ctx, usage, inputPrice, outputPrice, postGroupConsumer, meta, log) + + err := recordConsume(meta, code, usage, inputPrice, outputPrice, content, requestDetail, amount) + if err != nil { + log.Error("error batch record consume: " + err.Error()) + } +} + +func calculateAmount(ctx context.Context, usage *relaymodel.Usage, inputPrice, outputPrice float64, postGroupConsumer balance.PostGroupConsumer, meta *meta.Meta, log *log.Entry) float64 { + if usage == nil { + return 0 + } + + promptTokens := usage.PromptTokens + completionTokens := usage.CompletionTokens + totalTokens := promptTokens + completionTokens + + if totalTokens == 0 { + return 0 + } + + promptAmount := decimal.NewFromInt(int64(promptTokens)). + Mul(decimal.NewFromFloat(inputPrice)). + Div(decimal.NewFromInt(PriceUnit)) + completionAmount := decimal.NewFromInt(int64(completionTokens)). + Mul(decimal.NewFromFloat(outputPrice)). + Div(decimal.NewFromInt(PriceUnit)) + amount := promptAmount.Add(completionAmount).InexactFloat64() + + if amount > 0 { + return processGroupConsume(ctx, amount, postGroupConsumer, meta, log) + } + + return 0 +} + +func processGroupConsume(ctx context.Context, amount float64, postGroupConsumer balance.PostGroupConsumer, meta *meta.Meta, log *log.Entry) float64 { + consumedAmount, err := postGroupConsumer.PostGroupConsume(ctx, meta.Token.Name, amount) + if err != nil { + log.Error("error consuming token remain amount: " + err.Error()) + if err := model.CreateConsumeError( + meta.RequestID, + meta.RequestAt, + meta.Group.ID, + meta.Token.Name, + meta.OriginModel, + err.Error(), + amount, + meta.Token.ID, + ); err != nil { + log.Error("failed to create consume error: " + err.Error()) + } + return amount + } + return consumedAmount +} + +func recordConsume(meta *meta.Meta, code int, usage *relaymodel.Usage, inputPrice, outputPrice float64, content string, requestDetail *model.RequestDetail, amount float64) error { + promptTokens := 0 + completionTokens := 0 + if usage != nil { + promptTokens = usage.PromptTokens + completionTokens = usage.CompletionTokens + } + + return model.BatchRecordConsume( + meta.RequestID, + meta.RequestAt, + meta.Group.ID, + code, + meta.Channel.ID, + promptTokens, + completionTokens, + meta.OriginModel, + meta.Token.ID, + meta.Token.Name, + amount, + inputPrice, + outputPrice, + meta.Endpoint, + content, + meta.Mode, + requestDetail, + ) +} diff --git a/service/aiproxy/relay/controller/helper.go b/service/aiproxy/relay/controller/dohelper.go similarity index 60% rename from service/aiproxy/relay/controller/helper.go rename to service/aiproxy/relay/controller/dohelper.go index 3a5c3971b07..45ce94f4508 100644 --- a/service/aiproxy/relay/controller/helper.go +++ b/service/aiproxy/relay/controller/dohelper.go @@ -10,7 +10,6 @@ import ( "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/balance" "github.com/labring/sealos/service/aiproxy/common/config" "github.com/labring/sealos/service/aiproxy/common/conv" "github.com/labring/sealos/service/aiproxy/middleware" @@ -19,170 +18,11 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/meta" relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" - billingprice "github.com/labring/sealos/service/aiproxy/relay/price" "github.com/labring/sealos/service/aiproxy/relay/relaymode" "github.com/labring/sealos/service/aiproxy/relay/utils" - "github.com/shopspring/decimal" log "github.com/sirupsen/logrus" ) -var ConsumeWaitGroup sync.WaitGroup - -type PreCheckGroupBalanceReq struct { - InputTokens int - MaxTokens int - InputPrice float64 - OutputPrice float64 -} - -func getPreConsumedAmount(req *PreCheckGroupBalanceReq) float64 { - if req == nil || req.InputPrice == 0 || (req.InputTokens == 0 && req.MaxTokens == 0) { - return 0 - } - preConsumedTokens := int64(req.InputTokens) - if req.MaxTokens != 0 { - preConsumedTokens += int64(req.MaxTokens) - } - return decimal. - NewFromInt(preConsumedTokens). - Mul(decimal.NewFromFloat(req.InputPrice)). - Div(decimal.NewFromInt(billingprice.PriceUnit)). - InexactFloat64() -} - -func checkGroupBalance(req *PreCheckGroupBalanceReq, meta *meta.Meta, groupRemainBalance float64) bool { - if meta.IsChannelTest { - return true - } - if groupRemainBalance <= 0 { - return false - } - - preConsumedAmount := getPreConsumedAmount(req) - - return groupRemainBalance > preConsumedAmount -} - -func getGroupBalance(ctx context.Context, meta *meta.Meta) (float64, balance.PostGroupConsumer, error) { - if meta.IsChannelTest { - return 0, nil, nil - } - - return balance.Default.GetGroupRemainBalance(ctx, meta.Group.ID) -} - -func postConsumeAmount( - ctx context.Context, - consumeWaitGroup *sync.WaitGroup, - postGroupConsumer balance.PostGroupConsumer, - code int, - usage *relaymodel.Usage, - meta *meta.Meta, - inputPrice, - outputPrice float64, - content string, - requestDetail *model.RequestDetail, -) { - defer func() { - consumeWaitGroup.Done() - if r := recover(); r != nil { - log.Errorf("panic in post consume amount: %v", r) - } - }() - - if meta.IsChannelTest { - return - } - - log := middleware.NewLogger() - middleware.SetLogFieldsFromMeta(meta, log.Data) - - amount := calculateAmount(ctx, usage, inputPrice, outputPrice, postGroupConsumer, meta, log) - - err := recordConsume(meta, code, usage, inputPrice, outputPrice, content, requestDetail, amount) - if err != nil { - log.Error("error batch record consume: " + err.Error()) - } -} - -func calculateAmount(ctx context.Context, usage *relaymodel.Usage, inputPrice, outputPrice float64, postGroupConsumer balance.PostGroupConsumer, meta *meta.Meta, log *log.Entry) float64 { - if usage == nil { - return 0 - } - - promptTokens := usage.PromptTokens - completionTokens := usage.CompletionTokens - totalTokens := promptTokens + completionTokens - - if totalTokens == 0 { - return 0 - } - - promptAmount := decimal.NewFromInt(int64(promptTokens)). - Mul(decimal.NewFromFloat(inputPrice)). - Div(decimal.NewFromInt(billingprice.PriceUnit)) - completionAmount := decimal.NewFromInt(int64(completionTokens)). - Mul(decimal.NewFromFloat(outputPrice)). - Div(decimal.NewFromInt(billingprice.PriceUnit)) - amount := promptAmount.Add(completionAmount).InexactFloat64() - - if amount > 0 { - return processGroupConsume(ctx, amount, postGroupConsumer, meta, log) - } - - return 0 -} - -func processGroupConsume(ctx context.Context, amount float64, postGroupConsumer balance.PostGroupConsumer, meta *meta.Meta, log *log.Entry) float64 { - consumedAmount, err := postGroupConsumer.PostGroupConsume(ctx, meta.Token.Name, amount) - if err != nil { - log.Error("error consuming token remain amount: " + err.Error()) - if err := model.CreateConsumeError( - meta.RequestID, - meta.RequestAt, - meta.Group.ID, - meta.Token.Name, - meta.OriginModel, - err.Error(), - amount, - meta.Token.ID, - ); err != nil { - log.Error("failed to create consume error: " + err.Error()) - } - return amount - } - return consumedAmount -} - -func recordConsume(meta *meta.Meta, code int, usage *relaymodel.Usage, inputPrice, outputPrice float64, content string, requestDetail *model.RequestDetail, amount float64) error { - promptTokens := 0 - completionTokens := 0 - if usage != nil { - promptTokens = usage.PromptTokens - completionTokens = usage.CompletionTokens - } - - return model.BatchRecordConsume( - meta.RequestID, - meta.RequestAt, - meta.Group.ID, - code, - meta.Channel.ID, - promptTokens, - completionTokens, - meta.OriginModel, - meta.Token.ID, - meta.Token.Name, - amount, - inputPrice, - outputPrice, - meta.Endpoint, - content, - meta.Mode, - requestDetail, - ) -} - func isErrorHappened(resp *http.Response) bool { if resp == nil { return false diff --git a/service/aiproxy/relay/controller/image.go b/service/aiproxy/relay/controller/image.go index 2ff31c2b730..919d17faab3 100644 --- a/service/aiproxy/relay/controller/image.go +++ b/service/aiproxy/relay/controller/image.go @@ -5,13 +5,26 @@ import ( "fmt" "github.com/gin-gonic/gin" + "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/meta" relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" - billingprice "github.com/labring/sealos/service/aiproxy/relay/price" "github.com/labring/sealos/service/aiproxy/relay/utils" ) -func getImageRequest(c *gin.Context) (*relaymodel.ImageRequest, error) { +func validateImageMaxBatchSize(modelConfig *model.ModelConfig, batchSize int) error { + if batchSize <= 1 { + return nil + } + if modelConfig.ImageMaxBatchSize <= 0 { + return nil + } + if batchSize > modelConfig.ImageMaxBatchSize { + return fmt.Errorf("batch size %d is greater than the maximum batch size %d", batchSize, modelConfig.ImageMaxBatchSize) + } + return nil +} + +func getImageRequest(meta *meta.Meta, c *gin.Context) (*relaymodel.ImageRequest, error) { imageRequest, err := utils.UnmarshalImageRequest(c.Request) if err != nil { return nil, err @@ -25,30 +38,22 @@ func getImageRequest(c *gin.Context) (*relaymodel.ImageRequest, error) { if imageRequest.N == 0 { imageRequest.N = 1 } - if err := billingprice.ValidateImageMaxBatchSize(imageRequest.Model, imageRequest.N); err != nil { + if err := validateImageMaxBatchSize(meta.ModelConfig, imageRequest.N); err != nil { return nil, err } return imageRequest, nil } -func getImageCostPrice(model string, size string) (float64, error) { - imageCostPrice, ok := billingprice.GetImageSizePrice(model, size) - if !ok { - return 0, fmt.Errorf("invalid image size: %s", size) - } - return imageCostPrice, nil -} - func RelayImageHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatusCode { return Handle(meta, c, func() (*PreCheckGroupBalanceReq, error) { - imageRequest, err := getImageRequest(c) + imageRequest, err := getImageRequest(meta, c) if err != nil { return nil, err } - imageCostPrice, err := getImageCostPrice(meta.OriginModel, imageRequest.Size) - if err != nil { - return nil, err + imageCostPrice, ok := GetImageSizePrice(meta.ModelConfig, imageRequest.Size) + if !ok { + return nil, fmt.Errorf("invalid image size: %s", imageRequest.Size) } return &PreCheckGroupBalanceReq{ diff --git a/service/aiproxy/relay/controller/price.go b/service/aiproxy/relay/controller/price.go new file mode 100644 index 00000000000..aa5652912ee --- /dev/null +++ b/service/aiproxy/relay/controller/price.go @@ -0,0 +1,29 @@ +package controller + +import ( + "github.com/labring/sealos/service/aiproxy/common/config" + "github.com/labring/sealos/service/aiproxy/model" +) + +const ( + // /1K tokens + PriceUnit = 1000 +) + +func GetModelPrice(modelConfig *model.ModelConfig) (float64, float64, bool) { + if !config.GetBillingEnabled() { + return 0, 0, true + } + return modelConfig.InputPrice, modelConfig.OutputPrice, true +} + +func GetImageSizePrice(modelConfig *model.ModelConfig, size string) (float64, bool) { + if !config.GetBillingEnabled() { + return 0, false + } + if len(modelConfig.ImagePrices) == 0 { + return 0, true + } + price, ok := modelConfig.ImagePrices[size] + return price, ok +} diff --git a/service/aiproxy/relay/controller/rerank.go b/service/aiproxy/relay/controller/rerank.go index 737e2bc2d2e..d377b08e9cc 100644 --- a/service/aiproxy/relay/controller/rerank.go +++ b/service/aiproxy/relay/controller/rerank.go @@ -8,13 +8,12 @@ import ( "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/relay/meta" relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" - billingprice "github.com/labring/sealos/service/aiproxy/relay/price" "github.com/labring/sealos/service/aiproxy/relay/utils" ) func RerankHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatusCode { return Handle(meta, c, func() (*PreCheckGroupBalanceReq, error) { - price, completionPrice, ok := billingprice.GetModelPrice(meta.OriginModel) + price, completionPrice, ok := GetModelPrice(meta.ModelConfig) if !ok { return nil, fmt.Errorf("model price not found: %s", meta.OriginModel) } diff --git a/service/aiproxy/relay/controller/stt.go b/service/aiproxy/relay/controller/stt.go index e12d130c4e1..6b570d4547c 100644 --- a/service/aiproxy/relay/controller/stt.go +++ b/service/aiproxy/relay/controller/stt.go @@ -6,12 +6,11 @@ import ( "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/relay/meta" relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" - billingprice "github.com/labring/sealos/service/aiproxy/relay/price" ) func RelaySTTHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatusCode { return Handle(meta, c, func() (*PreCheckGroupBalanceReq, error) { - price, completionPrice, ok := billingprice.GetModelPrice(meta.OriginModel) + price, completionPrice, ok := GetModelPrice(meta.ModelConfig) if !ok { return nil, fmt.Errorf("model price not found: %s", meta.OriginModel) } diff --git a/service/aiproxy/relay/controller/text.go b/service/aiproxy/relay/controller/text.go index da3ee8d40e1..d260f0c8d93 100644 --- a/service/aiproxy/relay/controller/text.go +++ b/service/aiproxy/relay/controller/text.go @@ -7,13 +7,12 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/meta" relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" - billingprice "github.com/labring/sealos/service/aiproxy/relay/price" "github.com/labring/sealos/service/aiproxy/relay/utils" ) func RelayTextHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatusCode { return Handle(meta, c, func() (*PreCheckGroupBalanceReq, error) { - price, completionPrice, ok := billingprice.GetModelPrice(meta.OriginModel) + price, completionPrice, ok := GetModelPrice(meta.ModelConfig) if !ok { return nil, fmt.Errorf("model price not found: %s", meta.OriginModel) } diff --git a/service/aiproxy/relay/controller/tts.go b/service/aiproxy/relay/controller/tts.go index 56e0298c9cb..8afcf22b8ce 100644 --- a/service/aiproxy/relay/controller/tts.go +++ b/service/aiproxy/relay/controller/tts.go @@ -7,13 +7,12 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/meta" relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" - billingprice "github.com/labring/sealos/service/aiproxy/relay/price" "github.com/labring/sealos/service/aiproxy/relay/utils" ) func RelayTTSHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatusCode { return Handle(meta, c, func() (*PreCheckGroupBalanceReq, error) { - price, completionPrice, ok := billingprice.GetModelPrice(meta.OriginModel) + price, completionPrice, ok := GetModelPrice(meta.ModelConfig) if !ok { return nil, fmt.Errorf("model price not found: %s", meta.OriginModel) } diff --git a/service/aiproxy/relay/meta/meta.go b/service/aiproxy/relay/meta/meta.go index 7519d507eec..4e1ab6b0a0d 100644 --- a/service/aiproxy/relay/meta/meta.go +++ b/service/aiproxy/relay/meta/meta.go @@ -16,10 +16,11 @@ type ChannelMeta struct { } type Meta struct { - values map[string]any - Channel *ChannelMeta - Group *model.GroupCache - Token *model.TokenCache + values map[string]any + Channel *ChannelMeta + Group *model.GroupCache + Token *model.TokenCache + ModelConfig *model.ModelConfig Endpoint string RequestAt time.Time @@ -69,12 +70,19 @@ func WithToken(token *model.TokenCache) Option { } } -func NewMeta(channel *model.Channel, mode int, modelName string, opts ...Option) *Meta { +func NewMeta( + channel *model.Channel, + mode int, + modelName string, + modelConfig *model.ModelConfig, + opts ...Option, +) *Meta { meta := Meta{ values: make(map[string]any), Mode: mode, OriginModel: modelName, RequestAt: time.Now(), + ModelConfig: modelConfig, } for _, opt := range opts { diff --git a/service/aiproxy/relay/price/image.go b/service/aiproxy/relay/price/image.go deleted file mode 100644 index e5171f1a5e0..00000000000 --- a/service/aiproxy/relay/price/image.go +++ /dev/null @@ -1,45 +0,0 @@ -package price - -import ( - "errors" - "fmt" - - "github.com/labring/sealos/service/aiproxy/common/config" - "github.com/labring/sealos/service/aiproxy/model" -) - -func GetImageSizePrice(model string, size string) (float64, bool) { - if !config.GetBillingEnabled() { - return 0, false - } - return getImageSizePrice(model, size) -} - -func getImageSizePrice(modelName string, size string) (float64, bool) { - modelConfig, ok := model.CacheGetModelConfig(modelName) - if !ok { - return 0, false - } - if len(modelConfig.ImagePrices) == 0 { - return 0, true - } - price, ok := modelConfig.ImagePrices[size] - return price, ok -} - -func ValidateImageMaxBatchSize(modelName string, batchSize int) error { - if batchSize <= 1 { - return nil - } - modelConfig, ok := model.CacheGetModelConfig(modelName) - if !ok { - return errors.New("model not found") - } - if modelConfig.ImageMaxBatchSize <= 0 { - return nil - } - if batchSize > modelConfig.ImageMaxBatchSize { - return fmt.Errorf("batch size %d is greater than the maximum batch size %d", batchSize, modelConfig.ImageMaxBatchSize) - } - return nil -} diff --git a/service/aiproxy/relay/price/model.go b/service/aiproxy/relay/price/model.go deleted file mode 100644 index 48da1919591..00000000000 --- a/service/aiproxy/relay/price/model.go +++ /dev/null @@ -1,28 +0,0 @@ -package price - -import ( - "github.com/labring/sealos/service/aiproxy/common/config" - "github.com/labring/sealos/service/aiproxy/model" -) - -const ( - // /1K tokens - PriceUnit = 1000 -) - -// ModelPrice -// https://platform.openai.com/docs/models/model-endpoint-compatibility -// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Blfmc9dlf -// https://openai.com/pricing -// 价格单位:人民币/1K tokens - -func GetModelPrice(modelName string) (float64, float64, bool) { - if !config.GetBillingEnabled() { - return 0, 0, true - } - modelConfig, ok := model.CacheGetModelConfig(modelName) - if !ok { - return 0, 0, false - } - return modelConfig.InputPrice, modelConfig.OutputPrice, true -} diff --git a/service/aiproxy/relay/utils/testreq.go b/service/aiproxy/relay/utils/testreq.go index 9fc92d5fb04..6c6410fa107 100644 --- a/service/aiproxy/relay/utils/testreq.go +++ b/service/aiproxy/relay/utils/testreq.go @@ -3,7 +3,6 @@ package utils import ( "bytes" "encoding/json" - "errors" "fmt" "io" "strconv" @@ -25,14 +24,10 @@ func NewErrUnsupportedModelType(modelType string) *UnsupportedModelTypeError { return &UnsupportedModelTypeError{ModelType: modelType} } -func BuildRequest(modelName string) (io.Reader, int, error) { - modelConfig, ok := model.CacheGetModelConfig(modelName) - if !ok { - return nil, relaymode.Unknown, errors.New(modelName + " model config not found") - } +func BuildRequest(modelConfig *model.ModelConfig) (io.Reader, int, error) { switch modelConfig.Type { case relaymode.ChatCompletions: - body, err := BuildChatCompletionRequest(modelName) + body, err := BuildChatCompletionRequest(modelConfig.Model) if err != nil { return nil, relaymode.Unknown, err } @@ -40,13 +35,13 @@ func BuildRequest(modelName string) (io.Reader, int, error) { case relaymode.Completions: return nil, relaymode.Unknown, NewErrUnsupportedModelType("completions") case relaymode.Embeddings: - body, err := BuildEmbeddingsRequest(modelName) + body, err := BuildEmbeddingsRequest(modelConfig.Model) if err != nil { return nil, relaymode.Unknown, err } return body, relaymode.Embeddings, nil case relaymode.Moderations: - body, err := BuildModerationsRequest(modelName) + body, err := BuildModerationsRequest(modelConfig.Model) if err != nil { return nil, relaymode.Unknown, err } @@ -60,7 +55,7 @@ func BuildRequest(modelName string) (io.Reader, int, error) { case relaymode.Edits: return nil, relaymode.Unknown, NewErrUnsupportedModelType("edits") case relaymode.AudioSpeech: - body, err := BuildAudioSpeechRequest(modelName) + body, err := BuildAudioSpeechRequest(modelConfig.Model) if err != nil { return nil, relaymode.Unknown, err } @@ -70,7 +65,7 @@ func BuildRequest(modelName string) (io.Reader, int, error) { case relaymode.AudioTranslation: return nil, relaymode.Unknown, NewErrUnsupportedModelType("audio translation") case relaymode.Rerank: - body, err := BuildRerankRequest(modelName) + body, err := BuildRerankRequest(modelConfig.Model) if err != nil { return nil, relaymode.Unknown, err } From 66e555770afdfdc171dc5b0357930991cb1b955c Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sun, 5 Jan 2025 01:41:16 +0800 Subject: [PATCH 079/167] feat: consume --- service/aiproxy/common/consume/consume.go | 162 ++++++++++++++++++++ service/aiproxy/main.go | 6 +- service/aiproxy/middleware/distributor.go | 33 ++-- service/aiproxy/model/modelconfig.go | 5 + service/aiproxy/relay/controller/consume.go | 120 +-------------- service/aiproxy/relay/controller/handle.go | 35 +++-- service/aiproxy/relay/controller/price.go | 5 - service/aiproxy/router/relay.go | 9 +- 8 files changed, 213 insertions(+), 162 deletions(-) create mode 100644 service/aiproxy/common/consume/consume.go diff --git a/service/aiproxy/common/consume/consume.go b/service/aiproxy/common/consume/consume.go new file mode 100644 index 00000000000..7ba0d3d2ef6 --- /dev/null +++ b/service/aiproxy/common/consume/consume.go @@ -0,0 +1,162 @@ +package consume + +import ( + "context" + "sync" + + "github.com/labring/sealos/service/aiproxy/common/balance" + "github.com/labring/sealos/service/aiproxy/model" + "github.com/labring/sealos/service/aiproxy/relay/meta" + relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" + "github.com/shopspring/decimal" + log "github.com/sirupsen/logrus" +) + +var consumeWaitGroup sync.WaitGroup + +func Wait() { + consumeWaitGroup.Wait() +} + +func AsyncConsume( + ctx context.Context, + postGroupConsumer balance.PostGroupConsumer, + code int, + usage *relaymodel.Usage, + meta *meta.Meta, + inputPrice, + outputPrice float64, + content string, + requestDetail *model.RequestDetail, +) { + if meta.IsChannelTest { + return + } + + consumeWaitGroup.Add(1) + defer func() { + consumeWaitGroup.Done() + if r := recover(); r != nil { + log.Errorf("panic in consume: %v", r) + } + }() + + go Consume(ctx, postGroupConsumer, code, usage, meta, inputPrice, outputPrice, content, requestDetail) +} + +func Consume( + ctx context.Context, + postGroupConsumer balance.PostGroupConsumer, + code int, + usage *relaymodel.Usage, + meta *meta.Meta, + inputPrice, + outputPrice float64, + content string, + requestDetail *model.RequestDetail, +) { + if meta.IsChannelTest { + return + } + + amount := calculateAmount(ctx, usage, inputPrice, outputPrice, postGroupConsumer, meta) + + err := recordConsume(meta, code, usage, inputPrice, outputPrice, content, requestDetail, amount) + if err != nil { + log.Error("error batch record consume: " + err.Error()) + } +} + +func calculateAmount( + ctx context.Context, + usage *relaymodel.Usage, + inputPrice, outputPrice float64, + postGroupConsumer balance.PostGroupConsumer, + meta *meta.Meta, +) float64 { + if usage == nil { + return 0 + } + + promptTokens := usage.PromptTokens + completionTokens := usage.CompletionTokens + totalTokens := promptTokens + completionTokens + + if totalTokens == 0 { + return 0 + } + + promptAmount := decimal.NewFromInt(int64(promptTokens)). + Mul(decimal.NewFromFloat(inputPrice)). + Div(decimal.NewFromInt(model.PriceUnit)) + completionAmount := decimal.NewFromInt(int64(completionTokens)). + Mul(decimal.NewFromFloat(outputPrice)). + Div(decimal.NewFromInt(model.PriceUnit)) + amount := promptAmount.Add(completionAmount).InexactFloat64() + + if amount > 0 { + return processGroupConsume(ctx, amount, postGroupConsumer, meta) + } + + return 0 +} + +func processGroupConsume( + ctx context.Context, + amount float64, + postGroupConsumer balance.PostGroupConsumer, + meta *meta.Meta, +) float64 { + consumedAmount, err := postGroupConsumer.PostGroupConsume(ctx, meta.Token.Name, amount) + if err != nil { + log.Error("error consuming token remain amount: " + err.Error()) + if err := model.CreateConsumeError( + meta.RequestID, + meta.RequestAt, + meta.Group.ID, + meta.Token.Name, + meta.OriginModel, + err.Error(), + amount, + meta.Token.ID, + ); err != nil { + log.Error("failed to create consume error: " + err.Error()) + } + return amount + } + return consumedAmount +} + +func recordConsume(meta *meta.Meta, code int, usage *relaymodel.Usage, inputPrice, outputPrice float64, content string, requestDetail *model.RequestDetail, amount float64) error { + promptTokens := 0 + completionTokens := 0 + if usage != nil { + promptTokens = usage.PromptTokens + completionTokens = usage.CompletionTokens + } + + var channelID int + if meta.Channel != nil { + channelID = meta.Channel.ID + } + + return model.BatchRecordConsume( + meta.RequestID, + meta.RequestAt, + meta.Group.ID, + code, + channelID, + promptTokens, + completionTokens, + meta.OriginModel, + meta.Token.ID, + meta.Token.Name, + amount, + inputPrice, + outputPrice, + meta.Endpoint, + content, + meta.Mode, + requestDetail, + ) +} diff --git a/service/aiproxy/main.go b/service/aiproxy/main.go index 9b12bb947d8..2742f42ce00 100644 --- a/service/aiproxy/main.go +++ b/service/aiproxy/main.go @@ -18,10 +18,10 @@ import ( "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/balance" "github.com/labring/sealos/service/aiproxy/common/config" + "github.com/labring/sealos/service/aiproxy/common/consume" "github.com/labring/sealos/service/aiproxy/controller" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" - relaycontroller "github.com/labring/sealos/service/aiproxy/relay/controller" "github.com/labring/sealos/service/aiproxy/router" log "github.com/sirupsen/logrus" ) @@ -185,8 +185,8 @@ func main() { log.Info("server shutdown successfully") } - log.Info("shutting down relay consumer...") - relaycontroller.ConsumeWaitGroup.Wait() + log.Info("shutting down consumer...") + consume.Wait() log.Info("shutting down sync services...") wg.Wait() diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index f4322e2975c..136adeaa5b2 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -46,9 +46,9 @@ func getGroupRPMRatio(group *model.GroupCache) float64 { return groupRPMRatio } -func checkGroupModelRPMAndTPM(c *gin.Context, group *model.GroupCache, requestModel string, modelRPM int64, modelTPM int64) bool { +func checkGroupModelRPMAndTPM(c *gin.Context, group *model.GroupCache, requestModel string, modelRPM int64, modelTPM int64) error { if modelRPM <= 0 { - return true + return nil } groupConsumeLevelRpmRatio := calculateGroupConsumeLevelRpmRatio(group.UsedAmount) @@ -65,10 +65,7 @@ func checkGroupModelRPMAndTPM(c *gin.Context, group *model.GroupCache, requestMo ) if !ok { - abortWithMessage(c, http.StatusTooManyRequests, - group.ID+" is requesting too frequently", - ) - return false + return fmt.Errorf("group (%s) is requesting too frequently", group.ID) } if modelTPM > 0 { @@ -76,17 +73,14 @@ func checkGroupModelRPMAndTPM(c *gin.Context, group *model.GroupCache, requestMo if err != nil { log.Errorf("get group model tpm (%s:%s) error: %s", group.ID, requestModel, err.Error()) // ignore error - return true + return nil } if tpm >= modelTPM { - abortWithMessage(c, http.StatusTooManyRequests, - group.ID+" tpm is too high", - ) - return false + return fmt.Errorf("group (%s) tpm is too high", group.ID) } } - return true + return nil } func Distribute(c *gin.Context) { @@ -111,6 +105,12 @@ func Distribute(c *gin.Context) { SetLogModelFields(log.Data, requestModel) + mc, ok := GetModelCaches(c).ModelConfigMap[requestModel] + if !ok { + abortWithMessage(c, http.StatusServiceUnavailable, requestModel+" is not available") + return + } + token := GetToken(c) if len(token.Models) == 0 || !slices.Contains(token.Models, requestModel) { abortWithMessage(c, @@ -122,13 +122,8 @@ func Distribute(c *gin.Context) { return } - mc, ok := GetModelCaches(c).ModelConfigMap[requestModel] - if !ok { - abortWithMessage(c, http.StatusServiceUnavailable, requestModel+" is not available") - return - } - - if !checkGroupModelRPMAndTPM(c, group, requestModel, mc.RPM, mc.TPM) { + if err := checkGroupModelRPMAndTPM(c, group, requestModel, mc.RPM, mc.TPM); err != nil { + abortWithMessage(c, http.StatusTooManyRequests, err.Error()) return } diff --git a/service/aiproxy/model/modelconfig.go b/service/aiproxy/model/modelconfig.go index a1b51f886be..160db525d56 100644 --- a/service/aiproxy/model/modelconfig.go +++ b/service/aiproxy/model/modelconfig.go @@ -10,6 +10,11 @@ import ( "gorm.io/gorm" ) +const ( + // /1K tokens + PriceUnit = 1000 +) + //nolint:revive type ModelConfig struct { CreatedAt time.Time `gorm:"index;autoCreateTime" json:"created_at"` diff --git a/service/aiproxy/relay/controller/consume.go b/service/aiproxy/relay/controller/consume.go index 9142b200194..b343dbc495f 100644 --- a/service/aiproxy/relay/controller/consume.go +++ b/service/aiproxy/relay/controller/consume.go @@ -2,19 +2,13 @@ package controller import ( "context" - "sync" "github.com/labring/sealos/service/aiproxy/common/balance" - "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/meta" - relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" "github.com/shopspring/decimal" - log "github.com/sirupsen/logrus" ) -var ConsumeWaitGroup sync.WaitGroup - type PreCheckGroupBalanceReq struct { InputTokens int MaxTokens int @@ -33,7 +27,7 @@ func getPreConsumedAmount(req *PreCheckGroupBalanceReq) float64 { return decimal. NewFromInt(preConsumedTokens). Mul(decimal.NewFromFloat(req.InputPrice)). - Div(decimal.NewFromInt(PriceUnit)). + Div(decimal.NewFromInt(model.PriceUnit)). InexactFloat64() } @@ -57,115 +51,3 @@ func getGroupBalance(ctx context.Context, meta *meta.Meta) (float64, balance.Pos return balance.Default.GetGroupRemainBalance(ctx, meta.Group.ID) } - -func postConsumeAmount( - ctx context.Context, - consumeWaitGroup *sync.WaitGroup, - postGroupConsumer balance.PostGroupConsumer, - code int, - usage *relaymodel.Usage, - meta *meta.Meta, - inputPrice, - outputPrice float64, - content string, - requestDetail *model.RequestDetail, -) { - defer func() { - consumeWaitGroup.Done() - if r := recover(); r != nil { - log.Errorf("panic in post consume amount: %v", r) - } - }() - - if meta.IsChannelTest { - return - } - - log := middleware.NewLogger() - middleware.SetLogFieldsFromMeta(meta, log.Data) - - amount := calculateAmount(ctx, usage, inputPrice, outputPrice, postGroupConsumer, meta, log) - - err := recordConsume(meta, code, usage, inputPrice, outputPrice, content, requestDetail, amount) - if err != nil { - log.Error("error batch record consume: " + err.Error()) - } -} - -func calculateAmount(ctx context.Context, usage *relaymodel.Usage, inputPrice, outputPrice float64, postGroupConsumer balance.PostGroupConsumer, meta *meta.Meta, log *log.Entry) float64 { - if usage == nil { - return 0 - } - - promptTokens := usage.PromptTokens - completionTokens := usage.CompletionTokens - totalTokens := promptTokens + completionTokens - - if totalTokens == 0 { - return 0 - } - - promptAmount := decimal.NewFromInt(int64(promptTokens)). - Mul(decimal.NewFromFloat(inputPrice)). - Div(decimal.NewFromInt(PriceUnit)) - completionAmount := decimal.NewFromInt(int64(completionTokens)). - Mul(decimal.NewFromFloat(outputPrice)). - Div(decimal.NewFromInt(PriceUnit)) - amount := promptAmount.Add(completionAmount).InexactFloat64() - - if amount > 0 { - return processGroupConsume(ctx, amount, postGroupConsumer, meta, log) - } - - return 0 -} - -func processGroupConsume(ctx context.Context, amount float64, postGroupConsumer balance.PostGroupConsumer, meta *meta.Meta, log *log.Entry) float64 { - consumedAmount, err := postGroupConsumer.PostGroupConsume(ctx, meta.Token.Name, amount) - if err != nil { - log.Error("error consuming token remain amount: " + err.Error()) - if err := model.CreateConsumeError( - meta.RequestID, - meta.RequestAt, - meta.Group.ID, - meta.Token.Name, - meta.OriginModel, - err.Error(), - amount, - meta.Token.ID, - ); err != nil { - log.Error("failed to create consume error: " + err.Error()) - } - return amount - } - return consumedAmount -} - -func recordConsume(meta *meta.Meta, code int, usage *relaymodel.Usage, inputPrice, outputPrice float64, content string, requestDetail *model.RequestDetail, amount float64) error { - promptTokens := 0 - completionTokens := 0 - if usage != nil { - promptTokens = usage.PromptTokens - completionTokens = usage.CompletionTokens - } - - return model.BatchRecordConsume( - meta.RequestID, - meta.RequestAt, - meta.Group.ID, - code, - meta.Channel.ID, - promptTokens, - completionTokens, - meta.OriginModel, - meta.Token.ID, - meta.Token.Name, - amount, - inputPrice, - outputPrice, - meta.Endpoint, - content, - meta.Mode, - requestDetail, - ) -} diff --git a/service/aiproxy/relay/controller/handle.go b/service/aiproxy/relay/controller/handle.go index 68cb02cbcba..99b90696b0d 100644 --- a/service/aiproxy/relay/controller/handle.go +++ b/service/aiproxy/relay/controller/handle.go @@ -9,6 +9,7 @@ import ( "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" + "github.com/labring/sealos/service/aiproxy/common/consume" "github.com/labring/sealos/service/aiproxy/common/conv" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" @@ -26,15 +27,28 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa adaptor, ok := channeltype.GetAdaptor(meta.Channel.Type) if !ok { log.Errorf("invalid (%s[%d]) channel type: %d", meta.Channel.Name, meta.Channel.ID, meta.Channel.Type) - return openai.ErrorWrapperWithMessage("invalid channel error", "invalid_channel_type", http.StatusInternalServerError) + return openai.ErrorWrapperWithMessage( + "invalid channel error", "invalid_channel_type", http.StatusInternalServerError) } // 2. Get group balance groupRemainBalance, postGroupConsumer, err := getGroupBalance(ctx, meta) if err != nil { log.Errorf("get group (%s) balance failed: %v", meta.Group.ID, err) - return openai.ErrorWrapper( - fmt.Errorf("get group (%s) balance failed", meta.Group.ID), + errMsg := fmt.Sprintf("get group (%s) balance failed", meta.Group.ID) + consume.AsyncConsume( + context.Background(), + nil, + http.StatusInternalServerError, + nil, + meta, + 0, + 0, + errMsg, + nil, + ) + return openai.ErrorWrapperWithMessage( + errMsg, "get_group_quota_failed", http.StatusInternalServerError, ) @@ -53,9 +67,8 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa RequestBody: conv.BytesToString(body), } } - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, + consume.AsyncConsume( + context.Background(), nil, http.StatusBadRequest, nil, @@ -90,9 +103,8 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa log.Errorf("handle failed: %+v", respErr.Error) } - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, + consume.AsyncConsume( + context.Background(), postGroupConsumer, respErr.StatusCode, usage, @@ -106,9 +118,8 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa } // 6. Post consume - ConsumeWaitGroup.Add(1) - go postConsumeAmount(context.Background(), - &ConsumeWaitGroup, + consume.AsyncConsume( + context.Background(), postGroupConsumer, http.StatusOK, usage, diff --git a/service/aiproxy/relay/controller/price.go b/service/aiproxy/relay/controller/price.go index aa5652912ee..b36b2a29377 100644 --- a/service/aiproxy/relay/controller/price.go +++ b/service/aiproxy/relay/controller/price.go @@ -5,11 +5,6 @@ import ( "github.com/labring/sealos/service/aiproxy/model" ) -const ( - // /1K tokens - PriceUnit = 1000 -) - func GetModelPrice(modelConfig *model.ModelConfig) (float64, float64, bool) { if !config.GetBillingEnabled() { return 0, 0, true diff --git a/service/aiproxy/router/relay.go b/service/aiproxy/router/relay.go index 32f54850c77..9a55af893f2 100644 --- a/service/aiproxy/router/relay.go +++ b/service/aiproxy/router/relay.go @@ -9,22 +9,23 @@ import ( ) func SetRelayRouter(router *gin.Engine) { - router.Use(middleware.CORS()) + router.Use( + middleware.CORS(), + middleware.TokenAuth, + ) // https://platform.openai.com/docs/api-reference/introduction modelsRouter := router.Group("/v1/models") - modelsRouter.Use(middleware.TokenAuth) { modelsRouter.GET("", controller.ListModels) modelsRouter.GET("/:model", controller.RetrieveModel) } dashboardRouter := router.Group("/v1/dashboard") - dashboardRouter.Use(middleware.TokenAuth) { dashboardRouter.GET("/billing/subscription", controller.GetSubscription) dashboardRouter.GET("/billing/usage", controller.GetUsage) } relayV1Router := router.Group("/v1") - relayV1Router.Use(middleware.TokenAuth, middleware.Distribute) + relayV1Router.Use(middleware.Distribute) { relayV1Router.POST("/completions", controller.NewRelay(relaymode.Completions)) relayV1Router.POST("/chat/completions", controller.NewRelay(relaymode.ChatCompletions)) From bdc316fe1e5f630f072ecb18ae91389fda1f5850 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sun, 5 Jan 2025 02:24:34 +0800 Subject: [PATCH 080/167] feat: group custom model rpm tpm --- service/aiproxy/controller/group.go | 84 +++++++++++++++- service/aiproxy/middleware/distributor.go | 9 +- service/aiproxy/model/cache.go | 113 ++++++++++++++++++---- service/aiproxy/model/group.go | 66 +++++++++++-- service/aiproxy/router/api.go | 3 + 5 files changed, 245 insertions(+), 30 deletions(-) diff --git a/service/aiproxy/controller/group.go b/service/aiproxy/controller/group.go index 2de78ed4252..dd5580ae6a0 100644 --- a/service/aiproxy/controller/group.go +++ b/service/aiproxy/controller/group.go @@ -115,10 +115,34 @@ func GetGroup(c *gin.Context) { middleware.SuccessResponse(c, groupResponse) } -type UpdateGroupRPMRequest struct { +type UpdateGroupRPMRatioRequest struct { RPMRatio float64 `json:"rpm_ratio"` } +func UpdateGroupRPMRatio(c *gin.Context) { + group := c.Param("group") + if group == "" { + middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") + return + } + req := UpdateGroupRPMRatioRequest{} + err := json.NewDecoder(c.Request.Body).Decode(&req) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") + return + } + err = model.UpdateGroupRPMRatio(group, req.RPMRatio) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, err.Error()) + return + } + middleware.SuccessResponse(c, nil) +} + +type UpdateGroupRPMRequest struct { + RPM map[string]int64 `json:"rpm"` +} + func UpdateGroupRPM(c *gin.Context) { group := c.Param("group") if group == "" { @@ -131,7 +155,55 @@ func UpdateGroupRPM(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") return } - err = model.UpdateGroupRPM(group, req.RPMRatio) + err = model.UpdateGroupRPM(group, req.RPM) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, err.Error()) + return + } + middleware.SuccessResponse(c, nil) +} + +type UpdateGroupTPMRequest struct { + TPM map[string]int64 `json:"tpm"` +} + +func UpdateGroupTPM(c *gin.Context) { + group := c.Param("group") + if group == "" { + middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") + return + } + req := UpdateGroupTPMRequest{} + err := json.NewDecoder(c.Request.Body).Decode(&req) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") + return + } + err = model.UpdateGroupTPM(group, req.TPM) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, err.Error()) + return + } + middleware.SuccessResponse(c, nil) +} + +type UpdateGroupTPMRatioRequest struct { + TPMRatio float64 `json:"tpm_ratio"` +} + +func UpdateGroupTPMRatio(c *gin.Context) { + group := c.Param("group") + if group == "" { + middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") + return + } + req := UpdateGroupTPMRatioRequest{} + err := json.NewDecoder(c.Request.Body).Decode(&req) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") + return + } + err = model.UpdateGroupTPMRatio(group, req.TPMRatio) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return @@ -193,7 +265,10 @@ func DeleteGroups(c *gin.Context) { } type CreateGroupRequest struct { - RPMRatio float64 `json:"rpm_ratio"` + RPM map[string]int64 `json:"rpm"` + RPMRatio float64 `json:"rpm_ratio"` + TPM map[string]int64 `json:"tpm"` + TPMRatio float64 `json:"tpm_ratio"` } func CreateGroup(c *gin.Context) { @@ -211,6 +286,9 @@ func CreateGroup(c *gin.Context) { if err := model.CreateGroup(&model.Group{ ID: group, RPMRatio: req.RPMRatio, + RPM: req.RPM, + TPMRatio: req.TPMRatio, + TPM: req.TPM, }); err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) return diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 136adeaa5b2..5876014e06b 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -47,7 +47,14 @@ func getGroupRPMRatio(group *model.GroupCache) float64 { } func checkGroupModelRPMAndTPM(c *gin.Context, group *model.GroupCache, requestModel string, modelRPM int64, modelTPM int64) error { - if modelRPM <= 0 { + if group.RPM != nil && group.RPM[requestModel] > 0 { + modelRPM = group.RPM[requestModel] + } + if group.TPM != nil && group.TPM[requestModel] > 0 { + modelTPM = group.TPM[requestModel] + } + + if modelRPM <= 0 && modelTPM <= 0 { return nil } diff --git a/service/aiproxy/model/cache.go b/service/aiproxy/model/cache.go index d2213905943..a5d87712628 100644 --- a/service/aiproxy/model/cache.go +++ b/service/aiproxy/model/cache.go @@ -13,12 +13,11 @@ import ( "time" json "github.com/json-iterator/go" - "github.com/maruel/natural" - "github.com/redis/go-redis/v9" - "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" "github.com/labring/sealos/service/aiproxy/common/conv" + "github.com/maruel/natural" + "github.com/redis/go-redis/v9" log "github.com/sirupsen/logrus" ) @@ -46,6 +45,11 @@ func (r redisStringSlice) MarshalBinary() ([]byte, error) { type redisTime time.Time +var ( + _ redis.Scanner = (*redisTime)(nil) + _ encoding.BinaryMarshaler = (*redisTime)(nil) +) + func (t *redisTime) ScanRedis(value string) error { return (*time.Time)(t).UnmarshalBinary(conv.StringToBytes(value)) } @@ -90,13 +94,13 @@ func CacheDeleteToken(key string) error { } //nolint:gosec -func CacheSetToken(token *Token) error { +func CacheSetToken(token *TokenCache) error { if !common.RedisEnabled { return nil } key := fmt.Sprintf(TokenCacheKey, token.Key) pipe := common.RDB.Pipeline() - pipe.HSet(context.Background(), key, token.ToTokenCache()) + pipe.HSet(context.Background(), key, token) expireTime := SyncFrequency + time.Duration(rand.Int64N(60)-30)*time.Second pipe.Expire(context.Background(), key, expireTime) _, err := pipe.Exec(context.Background()) @@ -127,11 +131,13 @@ func CacheGetTokenByKey(key string) (*TokenCache, error) { return nil, err } - if err := CacheSetToken(token); err != nil { + tc := token.ToTokenCache() + + if err := CacheSetToken(tc); err != nil { log.Error("redis set token error: " + err.Error()) } - return token.ToTokenCache(), nil + return tc, nil } var updateTokenUsedAmountOnlyIncreaseScript = redis.NewScript(` @@ -181,11 +187,29 @@ func CacheUpdateTokenStatus(key string, status int) error { return updateTokenStatusScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(TokenCacheKey, key)}, status).Err() } +type redisMapStringInt64 map[string]int64 + +var ( + _ redis.Scanner = (*redisMapStringInt64)(nil) + _ encoding.BinaryMarshaler = (*redisMapStringInt64)(nil) +) + +func (r *redisMapStringInt64) ScanRedis(value string) error { + return json.Unmarshal(conv.StringToBytes(value), r) +} + +func (r redisMapStringInt64) MarshalBinary() ([]byte, error) { + return json.Marshal(r) +} + type GroupCache struct { - ID string `json:"-" redis:"-"` - Status int `json:"status" redis:"st"` - UsedAmount float64 `json:"used_amount" redis:"ua"` - RPMRatio float64 `json:"rpm_ratio" redis:"rpm"` + ID string `json:"-" redis:"-"` + Status int `json:"status" redis:"st"` + UsedAmount float64 `json:"used_amount" redis:"ua"` + RPMRatio float64 `json:"rpm_ratio" redis:"rpm_r"` + RPM redisMapStringInt64 `json:"rpm" redis:"rpm"` + TPMRatio float64 `json:"tpm_ratio" redis:"tpm_r"` + TPM redisMapStringInt64 `json:"tpm" redis:"tpm"` } func (g *Group) ToGroupCache() *GroupCache { @@ -194,6 +218,9 @@ func (g *Group) ToGroupCache() *GroupCache { Status: g.Status, UsedAmount: g.UsedAmount, RPMRatio: g.RPMRatio, + RPM: g.RPM, + TPMRatio: g.TPMRatio, + TPM: g.TPM, } } @@ -204,6 +231,20 @@ func CacheDeleteGroup(id string) error { return common.RedisDel(fmt.Sprintf(GroupCacheKey, id)) } +var updateGroupRPMRatioScript = redis.NewScript(` + if redis.call("HExists", KEYS[1], "rpm_r") then + redis.call("HSet", KEYS[1], "rpm_r", ARGV[1]) + end + return redis.status_reply("ok") +`) + +func CacheUpdateGroupRPMRatio(id string, rpmRatio float64) error { + if !common.RedisEnabled { + return nil + } + return updateGroupRPMRatioScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, rpmRatio).Err() +} + var updateGroupRPMScript = redis.NewScript(` if redis.call("HExists", KEYS[1], "rpm") then redis.call("HSet", KEYS[1], "rpm", ARGV[1]) @@ -211,11 +252,47 @@ var updateGroupRPMScript = redis.NewScript(` return redis.status_reply("ok") `) -func CacheUpdateGroupRPM(id string, rpmRatio float64) error { +func CacheUpdateGroupRPM(id string, rpm map[string]int64) error { + if !common.RedisEnabled { + return nil + } + jsonRPM, err := json.Marshal(rpm) + if err != nil { + return err + } + return updateGroupRPMScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, conv.BytesToString(jsonRPM)).Err() +} + +var updateGroupTPMRatioScript = redis.NewScript(` + if redis.call("HExists", KEYS[1], "tpm_r") then + redis.call("HSet", KEYS[1], "tpm_r", ARGV[1]) + end + return redis.status_reply("ok") +`) + +func CacheUpdateGroupTPMRatio(id string, tpmRatio float64) error { if !common.RedisEnabled { return nil } - return updateGroupRPMScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, rpmRatio).Err() + return updateGroupTPMRatioScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, tpmRatio).Err() +} + +var updateGroupTPMScript = redis.NewScript(` + if redis.call("HExists", KEYS[1], "tpm") then + redis.call("HSet", KEYS[1], "tpm", ARGV[1]) + end + return redis.status_reply("ok") +`) + +func CacheUpdateGroupTPM(id string, tpm map[string]int64) error { + if !common.RedisEnabled { + return nil + } + jsonTPM, err := json.Marshal(tpm) + if err != nil { + return err + } + return updateGroupTPMScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, conv.BytesToString(jsonTPM)).Err() } var updateGroupStatusScript = redis.NewScript(` @@ -233,13 +310,13 @@ func CacheUpdateGroupStatus(id string, status int) error { } //nolint:gosec -func CacheSetGroup(group *Group) error { +func CacheSetGroup(group *GroupCache) error { if !common.RedisEnabled { return nil } key := fmt.Sprintf(GroupCacheKey, group.ID) pipe := common.RDB.Pipeline() - pipe.HSet(context.Background(), key, group.ToGroupCache()) + pipe.HSet(context.Background(), key, group) expireTime := SyncFrequency + time.Duration(rand.Int64N(60)-30)*time.Second pipe.Expire(context.Background(), key, expireTime) _, err := pipe.Exec(context.Background()) @@ -270,11 +347,13 @@ func CacheGetGroup(id string) (*GroupCache, error) { return nil, err } - if err := CacheSetGroup(group); err != nil { + gc := group.ToGroupCache() + + if err := CacheSetGroup(gc); err != nil { log.Error("redis set group error: " + err.Error()) } - return group.ToGroupCache(), nil + return gc, nil } var updateGroupUsedAmountOnlyIncreaseScript = redis.NewScript(` diff --git a/service/aiproxy/model/group.go b/service/aiproxy/model/group.go index febc7529cb9..5f5e343e56c 100644 --- a/service/aiproxy/model/group.go +++ b/service/aiproxy/model/group.go @@ -6,6 +6,7 @@ import ( "strings" "time" + json "github.com/json-iterator/go" "github.com/labring/sealos/service/aiproxy/common" log "github.com/sirupsen/logrus" "gorm.io/gorm" @@ -22,13 +23,16 @@ const ( ) type Group struct { - CreatedAt time.Time `json:"created_at"` - ID string `gorm:"primaryKey" json:"id"` - Tokens []*Token `gorm:"foreignKey:GroupID" json:"-"` - Status int `gorm:"default:1;index" json:"status"` - UsedAmount float64 `gorm:"index" json:"used_amount"` - RPMRatio float64 `gorm:"index" json:"rpm_ratio"` - RequestCount int `gorm:"index" json:"request_count"` + CreatedAt time.Time `json:"created_at"` + ID string `gorm:"primaryKey" json:"id"` + Tokens []*Token `gorm:"foreignKey:GroupID" json:"-"` + Status int `gorm:"default:1;index" json:"status"` + UsedAmount float64 `gorm:"index" json:"used_amount"` + RPMRatio float64 `gorm:"index" json:"rpm_ratio"` + RPM map[string]int64 `gorm:"serializer:fastjson" json:"rpm"` + TPMRatio float64 `gorm:"index" json:"tpm_ratio"` + TPM map[string]int64 `gorm:"serializer:fastjson" json:"tpm"` + RequestCount int `gorm:"index" json:"request_count"` } func (g *Group) BeforeDelete(tx *gorm.DB) (err error) { @@ -151,10 +155,10 @@ func UpdateGroupUsedAmountAndRequestCount(id string, amount float64, count int) return HandleUpdateResult(result, ErrGroupNotFound) } -func UpdateGroupRPM(id string, rpmRatio float64) (err error) { +func UpdateGroupRPMRatio(id string, rpmRatio float64) (err error) { defer func() { if err == nil { - if err := CacheUpdateGroupRPM(id, rpmRatio); err != nil { + if err := CacheUpdateGroupRPMRatio(id, rpmRatio); err != nil { log.Error("cache update group rpm failed: " + err.Error()) } } @@ -163,6 +167,50 @@ func UpdateGroupRPM(id string, rpmRatio float64) (err error) { return HandleUpdateResult(result, ErrGroupNotFound) } +func UpdateGroupRPM(id string, rpm map[string]int64) (err error) { + defer func() { + if err == nil { + if err := CacheUpdateGroupRPM(id, rpm); err != nil { + log.Error("cache update group rpm failed: " + err.Error()) + } + } + }() + jsonRpm, err := json.Marshal(rpm) + if err != nil { + return err + } + result := DB.Model(&Group{}).Where("id = ?", id).Update("rpm", jsonRpm) + return HandleUpdateResult(result, ErrGroupNotFound) +} + +func UpdateGroupTPMRatio(id string, tpmRatio float64) (err error) { + defer func() { + if err == nil { + if err := CacheUpdateGroupTPMRatio(id, tpmRatio); err != nil { + log.Error("cache update group tpm ratio failed: " + err.Error()) + } + } + }() + result := DB.Model(&Group{}).Where("id = ?", id).Update("tpm_ratio", tpmRatio) + return HandleUpdateResult(result, ErrGroupNotFound) +} + +func UpdateGroupTPM(id string, tpm map[string]int64) (err error) { + defer func() { + if err == nil { + if err := CacheUpdateGroupTPM(id, tpm); err != nil { + log.Error("cache update group tpm failed: " + err.Error()) + } + } + }() + jsonTpm, err := json.Marshal(tpm) + if err != nil { + return err + } + result := DB.Model(&Group{}).Where("id = ?", id).Update("tpm", jsonTpm) + return HandleUpdateResult(result, ErrGroupNotFound) +} + func UpdateGroupStatus(id string, status int) (err error) { defer func() { if err == nil { diff --git a/service/aiproxy/router/api.go b/service/aiproxy/router/api.go index e302ccb80d1..6bfedf07972 100644 --- a/service/aiproxy/router/api.go +++ b/service/aiproxy/router/api.go @@ -51,7 +51,10 @@ func SetAPIRouter(router *gin.Engine) { groupRoute.GET("/:group", controller.GetGroup) groupRoute.DELETE("/:group", controller.DeleteGroup) groupRoute.POST("/:group/status", controller.UpdateGroupStatus) + groupRoute.POST("/:group/rpm_ratio", controller.UpdateGroupRPMRatio) groupRoute.POST("/:group/rpm", controller.UpdateGroupRPM) + groupRoute.POST("/:group/tpm_ratio", controller.UpdateGroupTPMRatio) + groupRoute.POST("/:group/tpm", controller.UpdateGroupTPM) } optionRoute := apiRouter.Group("/option") From 662bf282e9e18d756d56ef2a6bafad22cb0750fd Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sun, 5 Jan 2025 13:04:17 +0800 Subject: [PATCH 081/167] fix: models --- service/aiproxy/controller/model.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/service/aiproxy/controller/model.go b/service/aiproxy/controller/model.go index 0728fb1593d..06e27e671e0 100644 --- a/service/aiproxy/controller/model.go +++ b/service/aiproxy/controller/model.go @@ -168,11 +168,11 @@ func ChannelDefaultModelsAndMappingByType(c *gin.Context) { } func EnabledModels(c *gin.Context) { - middleware.SuccessResponse(c, middleware.GetModelCaches(c).EnabledModelConfigs) + middleware.SuccessResponse(c, model.LoadModelCaches().EnabledModelConfigs) } func ChannelEnabledModels(c *gin.Context) { - middleware.SuccessResponse(c, middleware.GetModelCaches(c).EnabledChannelType2ModelConfigs) + middleware.SuccessResponse(c, model.LoadModelCaches().EnabledChannelType2ModelConfigs) } func ChannelEnabledModelsByType(c *gin.Context) { @@ -186,7 +186,7 @@ func ChannelEnabledModelsByType(c *gin.Context) { middleware.ErrorResponse(c, http.StatusOK, "invalid type") return } - middleware.SuccessResponse(c, middleware.GetModelCaches(c).EnabledChannelType2ModelConfigs[channelTypeInt]) + middleware.SuccessResponse(c, model.LoadModelCaches().EnabledChannelType2ModelConfigs[channelTypeInt]) } func ListModels(c *gin.Context) { From dc9eb906715825ebe494ec981e21d76ef24a11a5 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sun, 5 Jan 2025 15:39:20 +0800 Subject: [PATCH 082/167] fix: v1 route --- service/aiproxy/router/relay.go | 114 ++++++++++++++++---------------- 1 file changed, 58 insertions(+), 56 deletions(-) diff --git a/service/aiproxy/router/relay.go b/service/aiproxy/router/relay.go index 9a55af893f2..2c6bdaef639 100644 --- a/service/aiproxy/router/relay.go +++ b/service/aiproxy/router/relay.go @@ -9,73 +9,75 @@ import ( ) func SetRelayRouter(router *gin.Engine) { - router.Use( + // https://platform.openai.com/docs/api-reference/introduction + v1Router := router.Group("/v1") + v1Router.Use( middleware.CORS(), middleware.TokenAuth, ) - // https://platform.openai.com/docs/api-reference/introduction - modelsRouter := router.Group("/v1/models") + + modelsRouter := v1Router.Group("/models") { modelsRouter.GET("", controller.ListModels) modelsRouter.GET("/:model", controller.RetrieveModel) } - dashboardRouter := router.Group("/v1/dashboard") + dashboardRouter := v1Router.Group("/dashboard") { dashboardRouter.GET("/billing/subscription", controller.GetSubscription) dashboardRouter.GET("/billing/usage", controller.GetUsage) } - relayV1Router := router.Group("/v1") - relayV1Router.Use(middleware.Distribute) + relayRouter := v1Router.Group("") + relayRouter.Use(middleware.Distribute) { - relayV1Router.POST("/completions", controller.NewRelay(relaymode.Completions)) - relayV1Router.POST("/chat/completions", controller.NewRelay(relaymode.ChatCompletions)) - relayV1Router.POST("/edits", controller.NewRelay(relaymode.Edits)) - relayV1Router.POST("/images/generations", controller.NewRelay(relaymode.ImagesGenerations)) - relayV1Router.POST("/images/edits", controller.RelayNotImplemented) - relayV1Router.POST("/images/variations", controller.RelayNotImplemented) - relayV1Router.POST("/embeddings", controller.NewRelay(relaymode.Embeddings)) - relayV1Router.POST("/engines/:model/embeddings", controller.NewRelay(relaymode.Embeddings)) - relayV1Router.POST("/audio/transcriptions", controller.NewRelay(relaymode.AudioTranscription)) - relayV1Router.POST("/audio/translations", controller.NewRelay(relaymode.AudioTranslation)) - relayV1Router.POST("/audio/speech", controller.NewRelay(relaymode.AudioSpeech)) - relayV1Router.POST("/rerank", controller.NewRelay(relaymode.Rerank)) - relayV1Router.GET("/files", controller.RelayNotImplemented) - relayV1Router.POST("/files", controller.RelayNotImplemented) - relayV1Router.DELETE("/files/:id", controller.RelayNotImplemented) - relayV1Router.GET("/files/:id", controller.RelayNotImplemented) - relayV1Router.GET("/files/:id/content", controller.RelayNotImplemented) - relayV1Router.POST("/fine_tuning/jobs", controller.RelayNotImplemented) - relayV1Router.GET("/fine_tuning/jobs", controller.RelayNotImplemented) - relayV1Router.GET("/fine_tuning/jobs/:id", controller.RelayNotImplemented) - relayV1Router.POST("/fine_tuning/jobs/:id/cancel", controller.RelayNotImplemented) - relayV1Router.GET("/fine_tuning/jobs/:id/events", controller.RelayNotImplemented) - relayV1Router.DELETE("/models/:model", controller.RelayNotImplemented) - relayV1Router.POST("/moderations", controller.NewRelay(relaymode.Moderations)) - relayV1Router.POST("/assistants", controller.RelayNotImplemented) - relayV1Router.GET("/assistants/:id", controller.RelayNotImplemented) - relayV1Router.POST("/assistants/:id", controller.RelayNotImplemented) - relayV1Router.DELETE("/assistants/:id", controller.RelayNotImplemented) - relayV1Router.GET("/assistants", controller.RelayNotImplemented) - relayV1Router.POST("/assistants/:id/files", controller.RelayNotImplemented) - relayV1Router.GET("/assistants/:id/files/:fileId", controller.RelayNotImplemented) - relayV1Router.DELETE("/assistants/:id/files/:fileId", controller.RelayNotImplemented) - relayV1Router.GET("/assistants/:id/files", controller.RelayNotImplemented) - relayV1Router.POST("/threads", controller.RelayNotImplemented) - relayV1Router.GET("/threads/:id", controller.RelayNotImplemented) - relayV1Router.POST("/threads/:id", controller.RelayNotImplemented) - relayV1Router.DELETE("/threads/:id", controller.RelayNotImplemented) - relayV1Router.POST("/threads/:id/messages", controller.RelayNotImplemented) - relayV1Router.GET("/threads/:id/messages/:messageId", controller.RelayNotImplemented) - relayV1Router.POST("/threads/:id/messages/:messageId", controller.RelayNotImplemented) - relayV1Router.GET("/threads/:id/messages/:messageId/files/:filesId", controller.RelayNotImplemented) - relayV1Router.GET("/threads/:id/messages/:messageId/files", controller.RelayNotImplemented) - relayV1Router.POST("/threads/:id/runs", controller.RelayNotImplemented) - relayV1Router.GET("/threads/:id/runs/:runsId", controller.RelayNotImplemented) - relayV1Router.POST("/threads/:id/runs/:runsId", controller.RelayNotImplemented) - relayV1Router.GET("/threads/:id/runs", controller.RelayNotImplemented) - relayV1Router.POST("/threads/:id/runs/:runsId/submit_tool_outputs", controller.RelayNotImplemented) - relayV1Router.POST("/threads/:id/runs/:runsId/cancel", controller.RelayNotImplemented) - relayV1Router.GET("/threads/:id/runs/:runsId/steps/:stepId", controller.RelayNotImplemented) - relayV1Router.GET("/threads/:id/runs/:runsId/steps", controller.RelayNotImplemented) + relayRouter.POST("/completions", controller.NewRelay(relaymode.Completions)) + relayRouter.POST("/chat/completions", controller.NewRelay(relaymode.ChatCompletions)) + relayRouter.POST("/edits", controller.NewRelay(relaymode.Edits)) + relayRouter.POST("/images/generations", controller.NewRelay(relaymode.ImagesGenerations)) + relayRouter.POST("/images/edits", controller.RelayNotImplemented) + relayRouter.POST("/images/variations", controller.RelayNotImplemented) + relayRouter.POST("/embeddings", controller.NewRelay(relaymode.Embeddings)) + relayRouter.POST("/engines/:model/embeddings", controller.NewRelay(relaymode.Embeddings)) + relayRouter.POST("/audio/transcriptions", controller.NewRelay(relaymode.AudioTranscription)) + relayRouter.POST("/audio/translations", controller.NewRelay(relaymode.AudioTranslation)) + relayRouter.POST("/audio/speech", controller.NewRelay(relaymode.AudioSpeech)) + relayRouter.POST("/rerank", controller.NewRelay(relaymode.Rerank)) + relayRouter.GET("/files", controller.RelayNotImplemented) + relayRouter.POST("/files", controller.RelayNotImplemented) + relayRouter.DELETE("/files/:id", controller.RelayNotImplemented) + relayRouter.GET("/files/:id", controller.RelayNotImplemented) + relayRouter.GET("/files/:id/content", controller.RelayNotImplemented) + relayRouter.POST("/fine_tuning/jobs", controller.RelayNotImplemented) + relayRouter.GET("/fine_tuning/jobs", controller.RelayNotImplemented) + relayRouter.GET("/fine_tuning/jobs/:id", controller.RelayNotImplemented) + relayRouter.POST("/fine_tuning/jobs/:id/cancel", controller.RelayNotImplemented) + relayRouter.GET("/fine_tuning/jobs/:id/events", controller.RelayNotImplemented) + relayRouter.DELETE("/models/:model", controller.RelayNotImplemented) + relayRouter.POST("/moderations", controller.NewRelay(relaymode.Moderations)) + relayRouter.POST("/assistants", controller.RelayNotImplemented) + relayRouter.GET("/assistants/:id", controller.RelayNotImplemented) + relayRouter.POST("/assistants/:id", controller.RelayNotImplemented) + relayRouter.DELETE("/assistants/:id", controller.RelayNotImplemented) + relayRouter.GET("/assistants", controller.RelayNotImplemented) + relayRouter.POST("/assistants/:id/files", controller.RelayNotImplemented) + relayRouter.GET("/assistants/:id/files/:fileId", controller.RelayNotImplemented) + relayRouter.DELETE("/assistants/:id/files/:fileId", controller.RelayNotImplemented) + relayRouter.GET("/assistants/:id/files", controller.RelayNotImplemented) + relayRouter.POST("/threads", controller.RelayNotImplemented) + relayRouter.GET("/threads/:id", controller.RelayNotImplemented) + relayRouter.POST("/threads/:id", controller.RelayNotImplemented) + relayRouter.DELETE("/threads/:id", controller.RelayNotImplemented) + relayRouter.POST("/threads/:id/messages", controller.RelayNotImplemented) + relayRouter.GET("/threads/:id/messages/:messageId", controller.RelayNotImplemented) + relayRouter.POST("/threads/:id/messages/:messageId", controller.RelayNotImplemented) + relayRouter.GET("/threads/:id/messages/:messageId/files/:filesId", controller.RelayNotImplemented) + relayRouter.GET("/threads/:id/messages/:messageId/files", controller.RelayNotImplemented) + relayRouter.POST("/threads/:id/runs", controller.RelayNotImplemented) + relayRouter.GET("/threads/:id/runs/:runsId", controller.RelayNotImplemented) + relayRouter.POST("/threads/:id/runs/:runsId", controller.RelayNotImplemented) + relayRouter.GET("/threads/:id/runs", controller.RelayNotImplemented) + relayRouter.POST("/threads/:id/runs/:runsId/submit_tool_outputs", controller.RelayNotImplemented) + relayRouter.POST("/threads/:id/runs/:runsId/cancel", controller.RelayNotImplemented) + relayRouter.GET("/threads/:id/runs/:runsId/steps/:stepId", controller.RelayNotImplemented) + relayRouter.GET("/threads/:id/runs/:runsId/steps", controller.RelayNotImplemented) } } From a4e15a1896abad5d844c6652adef3a1a1fd6f471 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sun, 5 Jan 2025 20:42:00 +0800 Subject: [PATCH 083/167] fix: cros --- service/aiproxy/main.go | 4 ++-- service/aiproxy/router/relay.go | 5 +---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/service/aiproxy/main.go b/service/aiproxy/main.go index 2742f42ce00..42c997935be 100644 --- a/service/aiproxy/main.go +++ b/service/aiproxy/main.go @@ -117,9 +117,9 @@ func setupHTTPServer() (*http.Server, *gin.Engine) { w := log.StandardLogger().Writer() server. - Use(middleware.NewLog(log.StandardLogger())). Use(gin.RecoveryWithWriter(w)). - Use(middleware.RequestID) + Use(middleware.NewLog(log.StandardLogger())). + Use(middleware.RequestID, middleware.CORS()) router.SetRouter(server) port := os.Getenv("PORT") diff --git a/service/aiproxy/router/relay.go b/service/aiproxy/router/relay.go index 2c6bdaef639..f33925bc528 100644 --- a/service/aiproxy/router/relay.go +++ b/service/aiproxy/router/relay.go @@ -11,10 +11,7 @@ import ( func SetRelayRouter(router *gin.Engine) { // https://platform.openai.com/docs/api-reference/introduction v1Router := router.Group("/v1") - v1Router.Use( - middleware.CORS(), - middleware.TokenAuth, - ) + v1Router.Use(middleware.TokenAuth) modelsRouter := v1Router.Group("/models") { From 610fd8c6aaad027a77d4960861d0604a7ceea3f1 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sun, 5 Jan 2025 23:14:30 +0800 Subject: [PATCH 084/167] feat: rate limit err log record --- service/aiproxy/common/consume/consume.go | 13 +- service/aiproxy/common/rpmlimit/rate-limit.go | 122 ++++++++++++------ service/aiproxy/middleware/distributor.go | 59 +++++++-- service/aiproxy/middleware/utils.go | 20 --- service/aiproxy/relay/controller/handle.go | 5 - service/aiproxy/relay/meta/meta.go | 5 +- service/aiproxy/router/relay.go | 69 ++++++++-- 7 files changed, 203 insertions(+), 90 deletions(-) diff --git a/service/aiproxy/common/consume/consume.go b/service/aiproxy/common/consume/consume.go index 7ba0d3d2ef6..139de8675b1 100644 --- a/service/aiproxy/common/consume/consume.go +++ b/service/aiproxy/common/consume/consume.go @@ -19,7 +19,6 @@ func Wait() { } func AsyncConsume( - ctx context.Context, postGroupConsumer balance.PostGroupConsumer, code int, usage *relaymodel.Usage, @@ -41,7 +40,17 @@ func AsyncConsume( } }() - go Consume(ctx, postGroupConsumer, code, usage, meta, inputPrice, outputPrice, content, requestDetail) + go Consume( + context.Background(), + postGroupConsumer, + code, + usage, + meta, + inputPrice, + outputPrice, + content, + requestDetail, + ) } func Consume( diff --git a/service/aiproxy/common/rpmlimit/rate-limit.go b/service/aiproxy/common/rpmlimit/rate-limit.go index 3c186d707c8..56abc90eced 100644 --- a/service/aiproxy/common/rpmlimit/rate-limit.go +++ b/service/aiproxy/common/rpmlimit/rate-limit.go @@ -16,60 +16,89 @@ const ( groupModelRPMKey = "group_model_rpm:%s:%s" ) -// 1. 使用Redis列表存储请求时间戳 -// 2. 列表长度代表当前窗口内的请求数 -// 3. 如果请求数未达到限制,直接添加新请求并返回成功 -// 4. 如果达到限制,则检查最老的请求是否已经过期 -// 5. 如果最老的请求已过期,最多移除3个过期请求并添加新请求,否则拒绝新请求 -// 6. 通过EXPIRE命令设置键的过期时间,自动清理过期数据 -var luaScript = ` +var pushRequestScript = ` local key = KEYS[1] -local max_requests = tonumber(ARGV[1]) -local window = tonumber(ARGV[2]) -local current_time = tonumber(ARGV[3]) +local window = tonumber(ARGV[1]) +local current_time = tonumber(ARGV[2]) +local cutoff = current_time - window + +local page_size = 100 +local remove_count = 0 -local count = redis.call('LLEN', key) - -if count < max_requests then - redis.call('LPUSH', key, current_time) - redis.call('PEXPIRE', key, window) - return 1 -else - local removed = 0 - for i = 1, 3 do - local oldest = redis.call('LINDEX', key, -1) - if current_time - tonumber(oldest) >= window then - redis.call('RPOP', key) - removed = removed + 1 +while true do + local timestamps = redis.call('LRANGE', key, remove_count, remove_count + page_size - 1) + if #timestamps == 0 then + break + end + + local found_non_expired = false + for i = 1, #timestamps do + local timestamp = tonumber(timestamps[i]) + if timestamp < cutoff then + remove_count = remove_count + 1 else + found_non_expired = true break end end - if removed > 0 then - redis.call('LPUSH', key, current_time) - redis.call('PEXPIRE', key, window) - return 1 - else - return 0 + + if found_non_expired then + break end end + +if remove_count > 0 then + redis.call('LTRIM', key, remove_count, -1) +end + +redis.call('LPUSH', key, current_time) + +redis.call('PEXPIRE', key, window) + +return redis.call('LLEN', key) ` -var getRPMSumLuaScript = ` +var getRequestCountScript = ` local pattern = ARGV[1] local window = tonumber(ARGV[2]) local current_time = tonumber(ARGV[3]) +local cutoff = current_time - window +local page_size = 100 local keys = redis.call('KEYS', pattern) local total = 0 for _, key in ipairs(keys) do - local timestamps = redis.call('LRANGE', key, 0, -1) - for _, ts in ipairs(timestamps) do - if current_time - tonumber(ts) < window then - total = total + 1 + local remove_count = 0 + + while true do + local timestamps = redis.call('LRANGE', key, remove_count, remove_count + page_size - 1) + if #timestamps == 0 then + break + end + + local found_non_expired = false + for i = 1, #timestamps do + local timestamp = tonumber(timestamps[i]) + if timestamp < cutoff then + remove_count = remove_count + 1 + else + found_non_expired = true + break + end + end + + if found_non_expired then + break end end + + if remove_count > 0 then + redis.call('LTRIM', key, remove_count, -1) + end + + local total_count = redis.call('LLEN', key) + total = total + total_count end return total @@ -93,24 +122,35 @@ func GetRPM(ctx context.Context, group, model string) (int64, error) { rdb := common.RDB currentTime := time.Now().UnixMilli() - result, err := rdb.Eval(ctx, getRPMSumLuaScript, []string{}, pattern, time.Minute.Milliseconds(), currentTime).Int64() + result, err := rdb.Eval( + ctx, + getRequestCountScript, + []string{}, + pattern, + time.Minute.Milliseconds(), + currentTime, + ).Int64() if err != nil { return 0, err } - return result, nil } func redisRateLimitRequest(ctx context.Context, group, model string, maxRequestNum int64, duration time.Duration) (bool, error) { rdb := common.RDB - currentTime := time.Now().UnixMilli() - result, err := rdb.Eval(ctx, luaScript, []string{ - fmt.Sprintf(groupModelRPMKey, group, model), - }, maxRequestNum, duration.Milliseconds(), currentTime).Int64() + result, err := rdb.Eval( + ctx, + pushRequestScript, + []string{ + fmt.Sprintf(groupModelRPMKey, group, model), + }, + duration.Milliseconds(), + time.Now().UnixMilli(), + ).Int64() if err != nil { return false, err } - return result == 1, nil + return result <= maxRequestNum, nil } func RateLimit(ctx context.Context, group, model string, maxRequestNum int64, duration time.Duration) (bool, error) { diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 5876014e06b..71ee5b30447 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -4,10 +4,13 @@ import ( "fmt" "net/http" "slices" + "strings" "time" "github.com/gin-gonic/gin" + "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" + "github.com/labring/sealos/service/aiproxy/common/consume" "github.com/labring/sealos/service/aiproxy/common/ctxkey" "github.com/labring/sealos/service/aiproxy/common/rpmlimit" "github.com/labring/sealos/service/aiproxy/model" @@ -15,10 +18,6 @@ import ( log "github.com/sirupsen/logrus" ) -type ModelRequest struct { - Model string `form:"model" json:"model"` -} - func calculateGroupConsumeLevelRpmRatio(usedAmount float64) float64 { v := config.GetGroupConsumeLevelRpmRatio() var maxConsumeLevel float64 = -1 @@ -90,7 +89,13 @@ func checkGroupModelRPMAndTPM(c *gin.Context, group *model.GroupCache, requestMo return nil } -func Distribute(c *gin.Context) { +func NewDistribute(mode int) gin.HandlerFunc { + return func(c *gin.Context) { + distribute(c, mode) + } +} + +func distribute(c *gin.Context, mode int) { if config.GetDisableServe() { abortWithMessage(c, http.StatusServiceUnavailable, "service is under maintenance") return @@ -110,6 +115,8 @@ func Distribute(c *gin.Context) { return } + c.Set(ctxkey.OriginalModel, requestModel) + SetLogModelFields(log.Data, requestModel) mc, ok := GetModelCaches(c).ModelConfigMap[requestModel] @@ -118,7 +125,10 @@ func Distribute(c *gin.Context) { return } + c.Set(ctxkey.ModelConfig, mc) + token := GetToken(c) + if len(token.Models) == 0 || !slices.Contains(token.Models, requestModel) { abortWithMessage(c, http.StatusForbidden, @@ -130,13 +140,21 @@ func Distribute(c *gin.Context) { } if err := checkGroupModelRPMAndTPM(c, group, requestModel, mc.RPM, mc.TPM); err != nil { - abortWithMessage(c, http.StatusTooManyRequests, err.Error()) + errMsg := err.Error() + consume.AsyncConsume( + nil, + http.StatusTooManyRequests, + nil, + NewMetaByContext(c, nil, requestModel, mode), + 0, + 0, + errMsg, + nil, + ) + abortWithMessage(c, http.StatusTooManyRequests, errMsg) return } - c.Set(ctxkey.OriginalModel, requestModel) - c.Set(ctxkey.ModelConfig, mc) - c.Next() } @@ -164,3 +182,26 @@ func NewMetaByContext(c *gin.Context, channel *model.Channel, modelName string, meta.WithEndpoint(c.Request.URL.Path), ) } + +type ModelRequest struct { + Model string `form:"model" json:"model"` +} + +func getRequestModel(c *gin.Context) (string, error) { + path := c.Request.URL.Path + switch { + case strings.HasPrefix(path, "/v1/audio/transcriptions"), + strings.HasPrefix(path, "/v1/audio/translations"): + return c.Request.FormValue("model"), nil + case strings.HasPrefix(path, "/v1/engines") && strings.HasSuffix(path, "/embeddings"): + // /engines/:model/embeddings + return c.Param("model"), nil + default: + var modelRequest ModelRequest + err := common.UnmarshalBodyReusable(c.Request, &modelRequest) + if err != nil { + return "", fmt.Errorf("get request model failed: %w", err) + } + return modelRequest.Model, nil + } +} diff --git a/service/aiproxy/middleware/utils.go b/service/aiproxy/middleware/utils.go index 96e5534679e..b113dbc93be 100644 --- a/service/aiproxy/middleware/utils.go +++ b/service/aiproxy/middleware/utils.go @@ -2,10 +2,8 @@ package middleware import ( "fmt" - "strings" "github.com/gin-gonic/gin" - "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/relay/model" ) @@ -27,21 +25,3 @@ func abortWithMessage(c *gin.Context, statusCode int, message string) { }) c.Abort() } - -func getRequestModel(c *gin.Context) (string, error) { - path := c.Request.URL.Path - switch { - case strings.HasPrefix(path, "/v1/audio/transcriptions"), strings.HasPrefix(path, "/v1/audio/translations"): - return c.Request.FormValue("model"), nil - case strings.HasPrefix(path, "/v1/engines") && strings.HasSuffix(path, "/embeddings"): - // /engines/:model/embeddings - return c.Param("model"), nil - default: - var modelRequest ModelRequest - err := common.UnmarshalBodyReusable(c.Request, &modelRequest) - if err != nil { - return "", fmt.Errorf("get request model failed: %w", err) - } - return modelRequest.Model, nil - } -} diff --git a/service/aiproxy/relay/controller/handle.go b/service/aiproxy/relay/controller/handle.go index 99b90696b0d..e000266dfdc 100644 --- a/service/aiproxy/relay/controller/handle.go +++ b/service/aiproxy/relay/controller/handle.go @@ -1,7 +1,6 @@ package controller import ( - "context" "errors" "fmt" "net/http" @@ -37,7 +36,6 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa log.Errorf("get group (%s) balance failed: %v", meta.Group.ID, err) errMsg := fmt.Sprintf("get group (%s) balance failed", meta.Group.ID) consume.AsyncConsume( - context.Background(), nil, http.StatusInternalServerError, nil, @@ -68,7 +66,6 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa } } consume.AsyncConsume( - context.Background(), nil, http.StatusBadRequest, nil, @@ -104,7 +101,6 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa } consume.AsyncConsume( - context.Background(), postGroupConsumer, respErr.StatusCode, usage, @@ -119,7 +115,6 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa // 6. Post consume consume.AsyncConsume( - context.Background(), postGroupConsumer, http.StatusOK, usage, diff --git a/service/aiproxy/relay/meta/meta.go b/service/aiproxy/relay/meta/meta.go index 4e1ab6b0a0d..5fd49e10891 100644 --- a/service/aiproxy/relay/meta/meta.go +++ b/service/aiproxy/relay/meta/meta.go @@ -81,6 +81,7 @@ func NewMeta( values: make(map[string]any), Mode: mode, OriginModel: modelName, + ActualModel: modelName, RequestAt: time.Now(), ModelConfig: modelConfig, } @@ -89,7 +90,9 @@ func NewMeta( opt(&meta) } - meta.Reset(channel) + if channel != nil { + meta.Reset(channel) + } return &meta } diff --git a/service/aiproxy/router/relay.go b/service/aiproxy/router/relay.go index f33925bc528..513374c4a55 100644 --- a/service/aiproxy/router/relay.go +++ b/service/aiproxy/router/relay.go @@ -24,20 +24,66 @@ func SetRelayRouter(router *gin.Engine) { dashboardRouter.GET("/billing/usage", controller.GetUsage) } relayRouter := v1Router.Group("") - relayRouter.Use(middleware.Distribute) { - relayRouter.POST("/completions", controller.NewRelay(relaymode.Completions)) - relayRouter.POST("/chat/completions", controller.NewRelay(relaymode.ChatCompletions)) - relayRouter.POST("/edits", controller.NewRelay(relaymode.Edits)) - relayRouter.POST("/images/generations", controller.NewRelay(relaymode.ImagesGenerations)) + relayRouter.POST( + "/completions", + middleware.NewDistribute(relaymode.Completions), + controller.NewRelay(relaymode.Completions), + ) + + relayRouter.POST( + "/chat/completions", + middleware.NewDistribute(relaymode.ChatCompletions), + controller.NewRelay(relaymode.ChatCompletions), + ) + relayRouter.POST( + "/edits", + middleware.NewDistribute(relaymode.Edits), + controller.NewRelay(relaymode.Edits), + ) + relayRouter.POST( + "/images/generations", + middleware.NewDistribute(relaymode.ImagesGenerations), + controller.NewRelay(relaymode.ImagesGenerations), + ) + relayRouter.POST( + "/embeddings", + middleware.NewDistribute(relaymode.Embeddings), + controller.NewRelay(relaymode.Embeddings), + ) + relayRouter.POST( + "/engines/:model/embeddings", + middleware.NewDistribute(relaymode.Embeddings), + controller.NewRelay(relaymode.Embeddings), + ) + relayRouter.POST( + "/audio/transcriptions", + middleware.NewDistribute(relaymode.AudioTranscription), + controller.NewRelay(relaymode.AudioTranscription), + ) + relayRouter.POST( + "/audio/translations", + middleware.NewDistribute(relaymode.AudioTranslation), + controller.NewRelay(relaymode.AudioTranslation), + ) + relayRouter.POST( + "/audio/speech", + middleware.NewDistribute(relaymode.AudioSpeech), + controller.NewRelay(relaymode.AudioSpeech), + ) + relayRouter.POST( + "/rerank", + middleware.NewDistribute(relaymode.Rerank), + controller.NewRelay(relaymode.Rerank), + ) + relayRouter.POST( + "/moderations", + middleware.NewDistribute(relaymode.Moderations), + controller.NewRelay(relaymode.Moderations), + ) + relayRouter.POST("/images/edits", controller.RelayNotImplemented) relayRouter.POST("/images/variations", controller.RelayNotImplemented) - relayRouter.POST("/embeddings", controller.NewRelay(relaymode.Embeddings)) - relayRouter.POST("/engines/:model/embeddings", controller.NewRelay(relaymode.Embeddings)) - relayRouter.POST("/audio/transcriptions", controller.NewRelay(relaymode.AudioTranscription)) - relayRouter.POST("/audio/translations", controller.NewRelay(relaymode.AudioTranslation)) - relayRouter.POST("/audio/speech", controller.NewRelay(relaymode.AudioSpeech)) - relayRouter.POST("/rerank", controller.NewRelay(relaymode.Rerank)) relayRouter.GET("/files", controller.RelayNotImplemented) relayRouter.POST("/files", controller.RelayNotImplemented) relayRouter.DELETE("/files/:id", controller.RelayNotImplemented) @@ -49,7 +95,6 @@ func SetRelayRouter(router *gin.Engine) { relayRouter.POST("/fine_tuning/jobs/:id/cancel", controller.RelayNotImplemented) relayRouter.GET("/fine_tuning/jobs/:id/events", controller.RelayNotImplemented) relayRouter.DELETE("/models/:model", controller.RelayNotImplemented) - relayRouter.POST("/moderations", controller.NewRelay(relaymode.Moderations)) relayRouter.POST("/assistants", controller.RelayNotImplemented) relayRouter.GET("/assistants/:id", controller.RelayNotImplemented) relayRouter.POST("/assistants/:id", controller.RelayNotImplemented) From dad91b77cfd825b1165167c18aeee86380ca4010 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sun, 5 Jan 2025 23:33:14 +0800 Subject: [PATCH 085/167] fix: rpush --- service/aiproxy/common/rpmlimit/rate-limit.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/service/aiproxy/common/rpmlimit/rate-limit.go b/service/aiproxy/common/rpmlimit/rate-limit.go index 56abc90eced..c990a1ef313 100644 --- a/service/aiproxy/common/rpmlimit/rate-limit.go +++ b/service/aiproxy/common/rpmlimit/rate-limit.go @@ -51,7 +51,7 @@ if remove_count > 0 then redis.call('LTRIM', key, remove_count, -1) end -redis.call('LPUSH', key, current_time) +redis.call('RPUSH', key, current_time) redis.call('PEXPIRE', key, window) From 135e85d699ed00a9a14a15c306ce46b8bc370c15 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 6 Jan 2025 10:58:10 +0800 Subject: [PATCH 086/167] fix: dashboard time span --- service/aiproxy/controller/dashboard.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/service/aiproxy/controller/dashboard.go b/service/aiproxy/controller/dashboard.go index c9f8c92ef89..690bd5377e3 100644 --- a/service/aiproxy/controller/dashboard.go +++ b/service/aiproxy/controller/dashboard.go @@ -22,10 +22,10 @@ func getDashboardTime(t string) (time.Time, time.Time, time.Duration) { timeSpan = time.Hour * 24 case "two_week": start = end.AddDate(0, 0, -15) - timeSpan = time.Hour * 12 + timeSpan = time.Hour * 24 case "week": start = end.AddDate(0, 0, -7) - timeSpan = time.Hour * 6 + timeSpan = time.Hour * 24 case "day": fallthrough default: From 4c749e589fcab140c2ad8944f10ce8ac06ffc7fd Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 6 Jan 2025 11:39:24 +0800 Subject: [PATCH 087/167] feat: group model list adjusted tpm rpm --- service/aiproxy/common/config/config.go | 14 ++-- service/aiproxy/controller/dashboard.go | 20 +++++ service/aiproxy/middleware/distributor.go | 96 +++++++++++++---------- service/aiproxy/model/option.go | 8 +- service/aiproxy/router/api.go | 1 + 5 files changed, 88 insertions(+), 51 deletions(-) diff --git a/service/aiproxy/common/config/config.go b/service/aiproxy/common/config/config.go index dc5b09036db..f960be3d34a 100644 --- a/service/aiproxy/common/config/config.go +++ b/service/aiproxy/common/config/config.go @@ -98,14 +98,14 @@ var ( defaultChannelModels atomic.Value defaultChannelModelMapping atomic.Value groupMaxTokenNum atomic.Int32 - // group消费金额对应的rpm乘数,使用map[float64]float64 - groupConsumeLevelRpmRatio atomic.Value + // group消费金额对应的rpm/tpm乘数,使用map[float64]float64 + groupConsumeLevelRatio atomic.Value ) func init() { defaultChannelModels.Store(make(map[int][]string)) defaultChannelModelMapping.Store(make(map[int]map[string]string)) - groupConsumeLevelRpmRatio.Store(make(map[float64]float64)) + groupConsumeLevelRatio.Store(make(map[float64]float64)) } func GetDefaultChannelModels() map[int][]string { @@ -128,12 +128,12 @@ func SetDefaultChannelModelMapping(mapping map[int]map[string]string) { defaultChannelModelMapping.Store(mapping) } -func GetGroupConsumeLevelRpmRatio() map[float64]float64 { - return groupConsumeLevelRpmRatio.Load().(map[float64]float64) +func GetGroupConsumeLevelRatio() map[float64]float64 { + return groupConsumeLevelRatio.Load().(map[float64]float64) } -func SetGroupConsumeLevelRpmRatio(ratio map[float64]float64) { - groupConsumeLevelRpmRatio.Store(ratio) +func SetGroupConsumeLevelRatio(ratio map[float64]float64) { + groupConsumeLevelRatio.Store(ratio) } // 那个group最多可创建的token数量,0表示不限制 diff --git a/service/aiproxy/controller/dashboard.go b/service/aiproxy/controller/dashboard.go index 690bd5377e3..86294f85c73 100644 --- a/service/aiproxy/controller/dashboard.go +++ b/service/aiproxy/controller/dashboard.go @@ -189,3 +189,23 @@ func GetGroupDashboard(c *gin.Context) { middleware.SuccessResponse(c, dashboards) } + +func GetGroupDashboardModels(c *gin.Context) { + group := c.Param("group") + if group == "" { + middleware.ErrorResponse(c, http.StatusOK, "invalid parameter") + return + } + groupCache, err := model.CacheGetGroup(group) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, "failed to get group") + return + } + + enabledModelConfigs := model.LoadModelCaches().EnabledModelConfigs + newEnabledModelConfigs := make([]*model.ModelConfig, len(enabledModelConfigs)) + for i, mc := range enabledModelConfigs { + newEnabledModelConfigs[i] = middleware.GetGroupAdjustedModelConfig(groupCache, mc) + } + middleware.SuccessResponse(c, newEnabledModelConfigs) +} diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 71ee5b30447..7606fb3a67f 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -18,71 +18,87 @@ import ( log "github.com/sirupsen/logrus" ) -func calculateGroupConsumeLevelRpmRatio(usedAmount float64) float64 { - v := config.GetGroupConsumeLevelRpmRatio() +func calculateGroupConsumeLevelRatio(usedAmount float64) float64 { + v := config.GetGroupConsumeLevelRatio() + if len(v) == 0 { + return 1 + } var maxConsumeLevel float64 = -1 - var groupConsumeLevelRpmRatio float64 + var groupConsumeLevelRatio float64 for consumeLevel, ratio := range v { if usedAmount < consumeLevel { continue } if consumeLevel > maxConsumeLevel { maxConsumeLevel = consumeLevel - groupConsumeLevelRpmRatio = ratio + groupConsumeLevelRatio = ratio } } - if groupConsumeLevelRpmRatio <= 0 { - groupConsumeLevelRpmRatio = 1 + if groupConsumeLevelRatio <= 0 { + groupConsumeLevelRatio = 1 } - return groupConsumeLevelRpmRatio + return groupConsumeLevelRatio } -func getGroupRPMRatio(group *model.GroupCache) float64 { +func getGroupPMRatio(group *model.GroupCache) (float64, float64) { groupRPMRatio := group.RPMRatio if groupRPMRatio <= 0 { groupRPMRatio = 1 } - return groupRPMRatio -} - -func checkGroupModelRPMAndTPM(c *gin.Context, group *model.GroupCache, requestModel string, modelRPM int64, modelTPM int64) error { - if group.RPM != nil && group.RPM[requestModel] > 0 { - modelRPM = group.RPM[requestModel] - } - if group.TPM != nil && group.TPM[requestModel] > 0 { - modelTPM = group.TPM[requestModel] + groupTPMRatio := group.TPMRatio + if groupTPMRatio <= 0 { + groupTPMRatio = 1 } + return groupRPMRatio, groupTPMRatio +} - if modelRPM <= 0 && modelTPM <= 0 { - return nil - } - - groupConsumeLevelRpmRatio := calculateGroupConsumeLevelRpmRatio(group.UsedAmount) - groupRPMRatio := getGroupRPMRatio(group) - - adjustedModelRPM := int64(float64(modelRPM) * groupRPMRatio * groupConsumeLevelRpmRatio) +func GetGroupAdjustedModelConfig(group *model.GroupCache, mc *model.ModelConfig) *model.ModelConfig { + rpm := mc.RPM + tpm := mc.TPM + if group.RPM != nil && group.RPM[mc.Model] > 0 { + rpm = group.RPM[mc.Model] + } + if group.TPM != nil && group.TPM[mc.Model] > 0 { + tpm = group.TPM[mc.Model] + } + rpmRatio, tpmRatio := getGroupPMRatio(group) + groupConsumeLevelRatio := calculateGroupConsumeLevelRatio(group.UsedAmount) + rpm = int64(float64(rpm) * rpmRatio * groupConsumeLevelRatio) + tpm = int64(float64(tpm) * tpmRatio * groupConsumeLevelRatio) + if rpm != mc.RPM || tpm != mc.TPM { + newMc := *mc + newMc.RPM = rpm + newMc.TPM = tpm + return &newMc + } + return mc +} - ok := rpmlimit.ForceRateLimit( - c.Request.Context(), - group.ID, - requestModel, - adjustedModelRPM, - time.Minute, - ) +func checkGroupModelRPMAndTPM(c *gin.Context, group *model.GroupCache, mc *model.ModelConfig) error { + adjustedModelConfig := GetGroupAdjustedModelConfig(group, mc) - if !ok { - return fmt.Errorf("group (%s) is requesting too frequently", group.ID) + if adjustedModelConfig.RPM > 0 { + ok := rpmlimit.ForceRateLimit( + c.Request.Context(), + group.ID, + mc.Model, + adjustedModelConfig.RPM, + time.Minute, + ) + if !ok { + return fmt.Errorf("group (%s) is requesting too frequently", group.ID) + } } - if modelTPM > 0 { - tpm, err := model.CacheGetGroupModelTPM(group.ID, requestModel) + if adjustedModelConfig.TPM > 0 { + tpm, err := model.CacheGetGroupModelTPM(group.ID, mc.Model) if err != nil { - log.Errorf("get group model tpm (%s:%s) error: %s", group.ID, requestModel, err.Error()) + log.Errorf("get group model tpm (%s:%s) error: %s", group.ID, mc.Model, err.Error()) // ignore error return nil } - if tpm >= modelTPM { + if tpm >= adjustedModelConfig.TPM { return fmt.Errorf("group (%s) tpm is too high", group.ID) } } @@ -139,13 +155,13 @@ func distribute(c *gin.Context, mode int) { return } - if err := checkGroupModelRPMAndTPM(c, group, requestModel, mc.RPM, mc.TPM); err != nil { + if err := checkGroupModelRPMAndTPM(c, group, mc); err != nil { errMsg := err.Error() consume.AsyncConsume( nil, http.StatusTooManyRequests, nil, - NewMetaByContext(c, nil, requestModel, mode), + NewMetaByContext(c, nil, mc.Model, mode), 0, 0, errMsg, diff --git a/service/aiproxy/model/option.go b/service/aiproxy/model/option.go index d13b8f06757..99c69d6dafc 100644 --- a/service/aiproxy/model/option.go +++ b/service/aiproxy/model/option.go @@ -79,11 +79,11 @@ func initOptionMap() error { optionMap["DefaultChannelModelMapping"] = conv.BytesToString(defaultChannelModelMappingJSON) optionMap["GeminiSafetySetting"] = config.GetGeminiSafetySetting() optionMap["GroupMaxTokenNum"] = strconv.FormatInt(int64(config.GetGroupMaxTokenNum()), 10) - groupConsumeLevelRpmRatioJSON, err := json.Marshal(config.GetGroupConsumeLevelRpmRatio()) + groupConsumeLevelRatioJSON, err := json.Marshal(config.GetGroupConsumeLevelRatio()) if err != nil { return err } - optionMap["GroupConsumeLevelRpmRatio"] = conv.BytesToString(groupConsumeLevelRpmRatioJSON) + optionMap["GroupConsumeLevelRatio"] = conv.BytesToString(groupConsumeLevelRatioJSON) optionKeys = make([]string, 0, len(optionMap)) for key := range optionMap { @@ -284,7 +284,7 @@ func updateOption(key string, value string, isInit bool) (err error) { } } config.SetTimeoutWithModelType(newTimeoutWithModelType) - case "GroupConsumeLevelRpmRatio": + case "GroupConsumeLevelRatio": var newGroupRpmRatio map[float64]float64 err := json.Unmarshal(conv.StringToBytes(value), &newGroupRpmRatio) if err != nil { @@ -298,7 +298,7 @@ func updateOption(key string, value string, isInit bool) (err error) { return errors.New("rpm ratio must be greater than 0") } } - config.SetGroupConsumeLevelRpmRatio(newGroupRpmRatio) + config.SetGroupConsumeLevelRatio(newGroupRpmRatio) default: return ErrUnknownOptionKey } diff --git a/service/aiproxy/router/api.go b/service/aiproxy/router/api.go index 6bfedf07972..c5dc93b1f58 100644 --- a/service/aiproxy/router/api.go +++ b/service/aiproxy/router/api.go @@ -37,6 +37,7 @@ func SetAPIRouter(router *gin.Engine) { { dashboardRoute.GET("/", controller.GetDashboard) dashboardRoute.GET("/:group", controller.GetGroupDashboard) + dashboardRoute.GET("/:group/models", controller.GetGroupDashboardModels) } groupsRoute := apiRouter.Group("/groups") From 94d1936c9b5308b25d2367a12f7d2c8b808345ed Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 6 Jan 2025 15:25:23 +0800 Subject: [PATCH 088/167] feat: baichuan model config --- .../relay/adaptor/baichuan/constants.go | 63 ++++++++++++++++--- 1 file changed, 54 insertions(+), 9 deletions(-) diff --git a/service/aiproxy/relay/adaptor/baichuan/constants.go b/service/aiproxy/relay/adaptor/baichuan/constants.go index 5cab26dcce5..a3ad414927a 100644 --- a/service/aiproxy/relay/adaptor/baichuan/constants.go +++ b/service/aiproxy/relay/adaptor/baichuan/constants.go @@ -7,18 +7,63 @@ import ( var ModelList = []*model.ModelConfig{ { - Model: "Baichuan2-Turbo", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerBaichuan, + Model: "Baichuan4-Turbo", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerBaichuan, + InputPrice: 0.015, + OutputPrice: 0.015, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + ), }, { - Model: "Baichuan2-Turbo-192k", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerBaichuan, + Model: "Baichuan4-Air", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerBaichuan, + InputPrice: 0.00098, + OutputPrice: 0.00098, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + ), }, { - Model: "Baichuan-Text-Embedding", - Type: relaymode.Embeddings, - Owner: model.ModelOwnerBaichuan, + Model: "Baichuan4", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerBaichuan, + InputPrice: 0.1, + OutputPrice: 0.1, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + ), + }, + { + Model: "Baichuan3-Turbo", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerBaichuan, + InputPrice: 0.012, + OutputPrice: 0.012, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + ), + }, + { + Model: "Baichuan3-Turbo-128k", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerBaichuan, + InputPrice: 0.024, + OutputPrice: 0.024, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + ), + }, + + { + Model: "Baichuan-Text-Embedding", + Type: relaymode.Embeddings, + Owner: model.ModelOwnerBaichuan, + InputPrice: 0.0005, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(512), + ), }, } From bbec03ae824d79cc4694133b3ba3a7bd6e4f586b Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 6 Jan 2025 15:40:06 +0800 Subject: [PATCH 089/167] fix: rpm limit recore ignore empty channel id --- service/aiproxy/model/utils.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/service/aiproxy/model/utils.go b/service/aiproxy/model/utils.go index f06c9531b3d..ba45e46f6b5 100644 --- a/service/aiproxy/model/utils.go +++ b/service/aiproxy/model/utils.go @@ -92,9 +92,11 @@ func BatchRecordConsume( if err != nil { errs = append(errs, fmt.Errorf("failed to update token used amount: %w", err)) } - err = UpdateChannelUsedAmount(channelID, amount, 1) - if err != nil { - errs = append(errs, fmt.Errorf("failed to update channel used amount: %w", err)) + if channelID > 0 { + err = UpdateChannelUsedAmount(channelID, amount, 1) + if err != nil { + errs = append(errs, fmt.Errorf("failed to update channel used amount: %w", err)) + } } if len(errs) == 0 { return nil From 59ee65e1e6057d8f2e24f9820f70d5561abcf4cb Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 6 Jan 2025 17:38:01 +0800 Subject: [PATCH 090/167] feat: disable model config --- service/aiproxy/common/config/config.go | 10 ++++++ service/aiproxy/controller/channel-test.go | 2 +- service/aiproxy/middleware/distributor.go | 2 +- service/aiproxy/model/cache.go | 42 +++++++++++++++++----- service/aiproxy/model/channel.go | 3 +- service/aiproxy/model/modelconfig.go | 6 ++++ service/aiproxy/model/option.go | 3 ++ 7 files changed, 57 insertions(+), 11 deletions(-) diff --git a/service/aiproxy/common/config/config.go b/service/aiproxy/common/config/config.go index f960be3d34a..0747e829464 100644 --- a/service/aiproxy/common/config/config.go +++ b/service/aiproxy/common/config/config.go @@ -32,8 +32,18 @@ var ( modelErrorAutoBanRate = math.Float64bits(0.5) // 模型类型超时时间,单位秒 timeoutWithModelType atomic.Value + + disableModelConfig atomic.Bool ) +func GetDisableModelConfig() bool { + return disableModelConfig.Load() +} + +func SetDisableModelConfig(disabled bool) { + disableModelConfig.Store(disabled) +} + func GetRetryTimes() int64 { return retryTimes.Load() } diff --git a/service/aiproxy/controller/channel-test.go b/service/aiproxy/controller/channel-test.go index 2ac9e26dd0a..7ae968d9769 100644 --- a/service/aiproxy/controller/channel-test.go +++ b/service/aiproxy/controller/channel-test.go @@ -30,7 +30,7 @@ const channelTestRequestID = "channel-test" // testSingleModel tests a single model in the channel func testSingleModel(mc *model.ModelCaches, channel *model.Channel, modelName string) (*model.ChannelTest, error) { - modelConfig, ok := mc.ModelConfigMap[modelName] + modelConfig, ok := mc.ModelConfig.GetModelConfig(modelName) if !ok { return nil, errors.New(modelName + " model config not found") } diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 7606fb3a67f..74611c11253 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -135,7 +135,7 @@ func distribute(c *gin.Context, mode int) { SetLogModelFields(log.Data, requestModel) - mc, ok := GetModelCaches(c).ModelConfigMap[requestModel] + mc, ok := GetModelCaches(c).ModelConfig.GetModelConfig(requestModel) if !ok { abortWithMessage(c, http.StatusServiceUnavailable, requestModel+" is not available") return diff --git a/service/aiproxy/model/cache.go b/service/aiproxy/model/cache.go index a5d87712628..87f38873e0a 100644 --- a/service/aiproxy/model/cache.go +++ b/service/aiproxy/model/cache.go @@ -406,11 +406,15 @@ func CacheGetGroupModelTPM(id string, model string) (int64, error) { return tpm, nil } +type ModelConfigCache interface { + GetModelConfig(model string) (*ModelConfig, bool) +} + // read-only cache // //nolint:revive type ModelCaches struct { - ModelConfigMap map[string]*ModelConfig + ModelConfig ModelConfigCache EnabledModel2channels map[string][]*Channel EnabledModels []string EnabledModelsMap map[string]struct{} @@ -432,7 +436,7 @@ func LoadModelCaches() *ModelCaches { // InitModelConfigAndChannelCache initializes the channel cache from database func InitModelConfigAndChannelCache() error { - modelConfigMap, err := initializeModelConfigCache() + modelConfig, err := initializeModelConfigCache() if err != nil { return err } @@ -455,14 +459,14 @@ func InitModelConfigAndChannelCache() error { sortChannelsByPriority(newEnabledModel2channels) // Build channel type to model configs map - newEnabledChannelType2ModelConfigs := buildChannelTypeToModelConfigsMap(newEnabledChannels, modelConfigMap) + newEnabledChannelType2ModelConfigs := buildChannelTypeToModelConfigsMap(newEnabledChannels, modelConfig) // Build enabled models and configs lists newEnabledModels, newEnabledModelsMap, newEnabledModelConfigs, newEnabledModelConfigsMap := buildEnabledModelsAndConfigs(newEnabledChannelType2ModelConfigs) // Update global cache atomically modelCaches.Store(&ModelCaches{ - ModelConfigMap: modelConfigMap, + ModelConfig: modelConfig, EnabledModel2channels: newEnabledModel2channels, EnabledModels: newEnabledModels, EnabledModelsMap: newEnabledModelsMap, @@ -518,7 +522,29 @@ func LoadChannelByID(id int) (*Channel, error) { return &channel, nil } -func initializeModelConfigCache() (map[string]*ModelConfig, error) { +var _ ModelConfigCache = (*modelConfigMapCache)(nil) + +type modelConfigMapCache struct { + modelConfigMap map[string]*ModelConfig +} + +func (m *modelConfigMapCache) GetModelConfig(model string) (*ModelConfig, bool) { + config, ok := m.modelConfigMap[model] + return config, ok +} + +var _ ModelConfigCache = (*disabledModelConfigCache)(nil) + +type disabledModelConfigCache struct{} + +func (d *disabledModelConfigCache) GetModelConfig(model string) (*ModelConfig, bool) { + return NewDefaultModelConfig(model), true +} + +func initializeModelConfigCache() (ModelConfigCache, error) { + if config.GetDisableModelConfig() { + return &disabledModelConfigCache{}, nil + } modelConfigs, err := GetAllModelConfigs() if err != nil { return nil, err @@ -528,7 +554,7 @@ func initializeModelConfigCache() (map[string]*ModelConfig, error) { newModelConfigMap[modelConfig.Model] = modelConfig } - return newModelConfigMap, nil + return &modelConfigMapCache{modelConfigMap: newModelConfigMap}, nil } func initializeChannelModels(channel *Channel) { @@ -582,7 +608,7 @@ func sortChannelsByPriority(modelMap map[string][]*Channel) { } } -func buildChannelTypeToModelConfigsMap(channels []*Channel, modelConfigMap map[string]*ModelConfig) map[int][]*ModelConfig { +func buildChannelTypeToModelConfigsMap(channels []*Channel, modelConfigMap ModelConfigCache) map[int][]*ModelConfig { typeMap := make(map[int][]*ModelConfig) for _, channel := range channels { @@ -592,7 +618,7 @@ func buildChannelTypeToModelConfigsMap(channels []*Channel, modelConfigMap map[s configs := typeMap[channel.Type] for _, model := range channel.Models { - if config, ok := modelConfigMap[model]; ok { + if config, ok := modelConfigMap.GetModelConfig(model); ok { configs = append(configs, config) } } diff --git a/service/aiproxy/model/channel.go b/service/aiproxy/model/channel.go index 7efe419454e..c9c07bd3bad 100644 --- a/service/aiproxy/model/channel.go +++ b/service/aiproxy/model/channel.go @@ -9,6 +9,7 @@ import ( json "github.com/json-iterator/go" "github.com/labring/sealos/service/aiproxy/common" + "github.com/labring/sealos/service/aiproxy/common/config" "gorm.io/gorm" "gorm.io/gorm/clause" ) @@ -48,7 +49,7 @@ func (c *Channel) BeforeDelete(tx *gorm.DB) (err error) { } func GetModelConfigWithModels(models []string) ([]string, []string, error) { - if len(models) == 0 { + if len(models) == 0 || config.GetDisableModelConfig() { return models, nil, nil } diff --git a/service/aiproxy/model/modelconfig.go b/service/aiproxy/model/modelconfig.go index 160db525d56..5727e17f2ab 100644 --- a/service/aiproxy/model/modelconfig.go +++ b/service/aiproxy/model/modelconfig.go @@ -31,6 +31,12 @@ type ModelConfig struct { TPM int64 `json:"tpm,omitempty"` } +func NewDefaultModelConfig(model string) *ModelConfig { + return &ModelConfig{ + Model: model, + } +} + func (c *ModelConfig) MarshalJSON() ([]byte, error) { type Alias ModelConfig return json.Marshal(&struct { diff --git a/service/aiproxy/model/option.go b/service/aiproxy/model/option.go index 99c69d6dafc..1d62a49247e 100644 --- a/service/aiproxy/model/option.go +++ b/service/aiproxy/model/option.go @@ -60,6 +60,7 @@ func initOptionMap() error { optionMap["DisableServe"] = strconv.FormatBool(config.GetDisableServe()) optionMap["BillingEnabled"] = strconv.FormatBool(config.GetBillingEnabled()) optionMap["RetryTimes"] = strconv.FormatInt(config.GetRetryTimes(), 10) + optionMap["DisableModelConfig"] = strconv.FormatBool(config.GetDisableModelConfig()) optionMap["ModelErrorAutoBanRate"] = strconv.FormatFloat(config.GetModelErrorAutoBanRate(), 'f', -1, 64) optionMap["EnableModelErrorAutoBan"] = strconv.FormatBool(config.GetEnableModelErrorAutoBan()) timeoutWithModelTypeJSON, err := json.Marshal(config.GetTimeoutWithModelType()) @@ -183,6 +184,8 @@ func isTrue(value string) bool { //nolint:gocyclo func updateOption(key string, value string, isInit bool) (err error) { switch key { + case "DisableModelConfig": + config.SetDisableModelConfig(isTrue(value)) case "LogDetailStorageHours": logDetailStorageHours, err := strconv.ParseInt(value, 10, 64) if err != nil { From b23189d71abd973fc15c0e4188505911222d160e Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 6 Jan 2025 17:47:42 +0800 Subject: [PATCH 091/167] feat: internal token --- service/aiproxy/middleware/auth.go | 47 ++++++++++++++++++++++++------ service/aiproxy/model/log.go | 6 ++-- service/aiproxy/model/utils.go | 16 ++++++---- 3 files changed, 51 insertions(+), 18 deletions(-) diff --git a/service/aiproxy/middleware/auth.go b/service/aiproxy/middleware/auth.go index 4dd278add0f..14ff7475bf3 100644 --- a/service/aiproxy/middleware/auth.go +++ b/service/aiproxy/middleware/auth.go @@ -3,6 +3,7 @@ package middleware import ( "fmt" "net/http" + "os" "strings" "github.com/gin-gonic/gin" @@ -51,6 +52,8 @@ func AdminAuth(c *gin.Context) { c.Next() } +var internalToken = os.Getenv("INTERNAL_TOKEN") + func TokenAuth(c *gin.Context) { log := GetLogger(c) key := c.Request.Header.Get("Authorization") @@ -60,12 +63,23 @@ func TokenAuth(c *gin.Context) { ) parts := strings.Split(key, "-") key = parts[0] - token, err := model.ValidateAndGetToken(key) - if err != nil { - abortWithMessage(c, http.StatusUnauthorized, err.Error()) - return + + var token *model.TokenCache + var useInternalToken bool + if internalToken != "" && internalToken == key { + token = &model.TokenCache{} + useInternalToken = true + } else { + var err error + token, err = model.ValidateAndGetToken(key) + if err != nil { + abortWithMessage(c, http.StatusUnauthorized, err.Error()) + return + } } + SetLogTokenFields(log.Data, token) + if token.Subnet != "" { if ok, err := network.IsIPInSubnets(c.ClientIP(), token.Subnet); err != nil { abortWithMessage(c, http.StatusInternalServerError, err.Error()) @@ -82,11 +96,19 @@ func TokenAuth(c *gin.Context) { return } } - group, err := model.CacheGetGroup(token.Group) - if err != nil { - abortWithMessage(c, http.StatusInternalServerError, err.Error()) - return + + var group *model.GroupCache + if useInternalToken { + group = &model.GroupCache{} + } else { + var err error + group, err = model.CacheGetGroup(token.Group) + if err != nil { + abortWithMessage(c, http.StatusInternalServerError, err.Error()) + return + } } + SetLogGroupFields(log.Data, group) modelCaches := model.LoadModelCaches() @@ -186,9 +208,16 @@ func SetLogGroupFields(fields logrus.Fields, group *model.GroupCache) { } func SetLogTokenFields(fields logrus.Fields, token *model.TokenCache) { - if token != nil { + if token == nil { + return + } + if token.ID > 0 { fields["tid"] = token.ID + } + if token.Name != "" { fields["tname"] = token.Name + } + if token.Key != "" { fields["key"] = maskTokenKey(token.Key) } } diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index 06c283c1e69..ab24eafe3ab 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -28,16 +28,16 @@ type Log struct { RequestDetail *RequestDetail `gorm:"foreignKey:LogID;constraint:OnUpdate:CASCADE,OnDelete:CASCADE;" json:"request_detail,omitempty"` RequestAt time.Time `gorm:"index;index:idx_request_at_group_id,priority:2;index:idx_group_reqat_token,priority:2" json:"request_at"` CreatedAt time.Time `gorm:"index" json:"created_at"` - TokenName string `gorm:"index;index:idx_group_token,priority:2;index:idx_group_reqat_token,priority:3" json:"token_name"` + TokenName string `gorm:"index;index:idx_group_token,priority:2;index:idx_group_reqat_token,priority:3" json:"token_name,omitempty"` Endpoint string `gorm:"index" json:"endpoint"` Content string `gorm:"type:text" json:"content,omitempty"` - GroupID string `gorm:"index;index:idx_group_token,priority:1;index:idx_request_at_group_id,priority:1;index:idx_group_reqat_token,priority:1" json:"group"` + GroupID string `gorm:"index;index:idx_group_token,priority:1;index:idx_request_at_group_id,priority:1;index:idx_group_reqat_token,priority:1" json:"group,omitempty"` Model string `gorm:"index" json:"model"` RequestID string `gorm:"index" json:"request_id"` Price float64 `json:"price"` ID int `gorm:"primaryKey" json:"id"` CompletionPrice float64 `json:"completion_price"` - TokenID int `gorm:"index" json:"token_id"` + TokenID int `gorm:"index" json:"token_id,omitempty"` UsedAmount float64 `gorm:"index" json:"used_amount"` PromptTokens int `json:"prompt_tokens"` CompletionTokens int `json:"completion_tokens"` diff --git a/service/aiproxy/model/utils.go b/service/aiproxy/model/utils.go index ba45e46f6b5..249554235e0 100644 --- a/service/aiproxy/model/utils.go +++ b/service/aiproxy/model/utils.go @@ -84,13 +84,17 @@ func BatchRecordConsume( if err != nil { errs = append(errs, fmt.Errorf("failed to record log: %w", err)) } - err = UpdateGroupUsedAmountAndRequestCount(group, amount, 1) - if err != nil { - errs = append(errs, fmt.Errorf("failed to update group used amount and request count: %w", err)) + if group != "" { + err = UpdateGroupUsedAmountAndRequestCount(group, amount, 1) + if err != nil { + errs = append(errs, fmt.Errorf("failed to update group used amount and request count: %w", err)) + } } - err = UpdateTokenUsedAmount(tokenID, amount, 1) - if err != nil { - errs = append(errs, fmt.Errorf("failed to update token used amount: %w", err)) + if tokenID > 0 { + err = UpdateTokenUsedAmount(tokenID, amount, 1) + if err != nil { + errs = append(errs, fmt.Errorf("failed to update token used amount: %w", err)) + } } if channelID > 0 { err = UpdateChannelUsedAmount(channelID, amount, 1) From 8e0336cd279245d3b9d9a91f7e001336576a2d3e Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 6 Jan 2025 17:50:33 +0800 Subject: [PATCH 092/167] fix: lint --- service/aiproxy/model/cache.go | 1 + 1 file changed, 1 insertion(+) diff --git a/service/aiproxy/model/cache.go b/service/aiproxy/model/cache.go index 87f38873e0a..ada30601912 100644 --- a/service/aiproxy/model/cache.go +++ b/service/aiproxy/model/cache.go @@ -406,6 +406,7 @@ func CacheGetGroupModelTPM(id string, model string) (int64, error) { return tpm, nil } +//nolint:revive type ModelConfigCache interface { GetModelConfig(model string) (*ModelConfig, bool) } From 2c89467be72377d1dbadf4fa5e5751ab26e64a68 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 6 Jan 2025 21:08:11 +0800 Subject: [PATCH 093/167] fix: recore req to redis --- service/aiproxy/common/rpmlimit/rate-limit.go | 15 +++++++++++---- service/aiproxy/middleware/distributor.go | 5 +++++ 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/service/aiproxy/common/rpmlimit/rate-limit.go b/service/aiproxy/common/rpmlimit/rate-limit.go index c990a1ef313..3199142356a 100644 --- a/service/aiproxy/common/rpmlimit/rate-limit.go +++ b/service/aiproxy/common/rpmlimit/rate-limit.go @@ -137,8 +137,15 @@ func GetRPM(ctx context.Context, group, model string) (int64, error) { } func redisRateLimitRequest(ctx context.Context, group, model string, maxRequestNum int64, duration time.Duration) (bool, error) { - rdb := common.RDB - result, err := rdb.Eval( + result, err := PushRequest(ctx, group, model, duration) + if err != nil { + return false, err + } + return result <= maxRequestNum, nil +} + +func PushRequest(ctx context.Context, group, model string, duration time.Duration) (int64, error) { + result, err := common.RDB.Eval( ctx, pushRequestScript, []string{ @@ -148,9 +155,9 @@ func redisRateLimitRequest(ctx context.Context, group, model string, maxRequestN time.Now().UnixMilli(), ).Int64() if err != nil { - return false, err + return 0, err } - return result <= maxRequestNum, nil + return result, nil } func RateLimit(ctx context.Context, group, model string, maxRequestNum int64, duration time.Duration) (bool, error) { diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 74611c11253..6b20d098ce5 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -88,6 +88,11 @@ func checkGroupModelRPMAndTPM(c *gin.Context, group *model.GroupCache, mc *model if !ok { return fmt.Errorf("group (%s) is requesting too frequently", group.ID) } + } else if common.RedisEnabled { + _, err := rpmlimit.PushRequest(c.Request.Context(), group.ID, mc.Model, time.Minute) + if err != nil { + log.Errorf("push request error: %s", err.Error()) + } } if adjustedModelConfig.TPM > 0 { From 25f098244e8c1fd1d0d2011e89b864de5c002af7 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 6 Jan 2025 21:59:19 +0800 Subject: [PATCH 094/167] feat: option from env --- service/aiproxy/common/config/config.go | 123 +++++++++--------- service/aiproxy/common/database.go | 4 +- service/aiproxy/common/env/helper.go | 61 +++++++-- service/aiproxy/common/init.go | 3 - service/aiproxy/common/rpmlimit/rate-limit.go | 3 +- service/aiproxy/middleware/auth.go | 10 +- service/aiproxy/model/main.go | 6 +- service/aiproxy/model/option.go | 4 +- service/aiproxy/model/token.go | 2 +- 9 files changed, 129 insertions(+), 87 deletions(-) diff --git a/service/aiproxy/common/config/config.go b/service/aiproxy/common/config/config.go index 0747e829464..4716d8f6919 100644 --- a/service/aiproxy/common/config/config.go +++ b/service/aiproxy/common/config/config.go @@ -4,43 +4,63 @@ import ( "math" "os" "slices" - "strconv" "sync/atomic" - "time" "github.com/labring/sealos/service/aiproxy/common/env" ) var ( - DebugEnabled, _ = strconv.ParseBool(os.Getenv("DEBUG")) - DebugSQLEnabled, _ = strconv.ParseBool(os.Getenv("DEBUG_SQL")) + DebugEnabled = env.Bool("DEBUG", false) + DebugSQLEnabled = env.Bool("DEBUG_SQL", false) ) var ( - // 暂停服务 - disableServe atomic.Bool - // log detail 存储时间(小时) + DisableAutoMigrateDB = env.Bool("DISABLE_AUTO_MIGRATE_DB", false) + OnlyOneLogFile = env.Bool("ONLY_ONE_LOG_FILE", false) + AdminKey = os.Getenv("ADMIN_KEY") +) + +var ( + disableServe atomic.Bool logDetailStorageHours int64 = 3 * 24 + internalToken atomic.Value ) var ( - // 重试次数 - retryTimes atomic.Int64 - // 是否开启模型错误率自动封禁 + retryTimes atomic.Int64 enableModelErrorAutoBan atomic.Bool - // 模型错误率自动封禁 - modelErrorAutoBanRate = math.Float64bits(0.5) - // 模型类型超时时间,单位秒 - timeoutWithModelType atomic.Value + modelErrorAutoBanRate = math.Float64bits(0.5) + timeoutWithModelType atomic.Value + disableModelConfig atomic.Bool +) - disableModelConfig atomic.Bool +var ( + defaultChannelModels atomic.Value + defaultChannelModelMapping atomic.Value + groupMaxTokenNum atomic.Int64 + groupConsumeLevelRatio atomic.Value ) +var geminiSafetySetting atomic.Value + +var billingEnabled atomic.Bool + +func init() { + timeoutWithModelType.Store(make(map[int]int64)) + defaultChannelModels.Store(make(map[int][]string)) + defaultChannelModelMapping.Store(make(map[int]map[string]string)) + groupConsumeLevelRatio.Store(make(map[float64]float64)) + geminiSafetySetting.Store("BLOCK_NONE") + billingEnabled.Store(true) + internalToken.Store(os.Getenv("INTERNAL_TOKEN")) +} + func GetDisableModelConfig() bool { return disableModelConfig.Load() } func SetDisableModelConfig(disabled bool) { + disabled = env.Bool("DISABLE_MODEL_CONFIG", disabled) disableModelConfig.Store(disabled) } @@ -48,11 +68,17 @@ func GetRetryTimes() int64 { return retryTimes.Load() } +func SetRetryTimes(times int64) { + times = env.Int64("RETRY_TIMES", times) + retryTimes.Store(times) +} + func GetEnableModelErrorAutoBan() bool { return enableModelErrorAutoBan.Load() } func SetEnableModelErrorAutoBan(enabled bool) { + enabled = env.Bool("ENABLE_MODEL_ERROR_AUTO_BAN", enabled) enableModelErrorAutoBan.Store(enabled) } @@ -61,22 +87,16 @@ func GetModelErrorAutoBanRate() float64 { } func SetModelErrorAutoBanRate(rate float64) { + rate = env.Float64("MODEL_ERROR_AUTO_BAN_RATE", rate) atomic.StoreUint64(&modelErrorAutoBanRate, math.Float64bits(rate)) } -func SetRetryTimes(times int64) { - retryTimes.Store(times) -} - -func init() { - timeoutWithModelType.Store(make(map[int]int64)) -} - func GetTimeoutWithModelType() map[int]int64 { return timeoutWithModelType.Load().(map[int]int64) } func SetTimeoutWithModelType(timeout map[int]int64) { + timeout = env.JSON("TIMEOUT_WITH_MODEL_TYPE", timeout) timeoutWithModelType.Store(timeout) } @@ -85,6 +105,7 @@ func GetLogDetailStorageHours() int64 { } func SetLogDetailStorageHours(hours int64) { + hours = env.Int64("LOG_DETAIL_STORAGE_HOURS", hours) atomic.StoreInt64(&logDetailStorageHours, hours) } @@ -93,36 +114,16 @@ func GetDisableServe() bool { } func SetDisableServe(disabled bool) { + disabled = env.Bool("DISABLE_SERVE", disabled) disableServe.Store(disabled) } -var DisableAutoMigrateDB = os.Getenv("DISABLE_AUTO_MIGRATE_DB") == "true" - -var RateLimitKeyExpirationDuration = 20 * time.Minute - -var OnlyOneLogFile = env.Bool("ONLY_ONE_LOG_FILE", false) - -var AdminKey = env.String("ADMIN_KEY", "") - -var ( - defaultChannelModels atomic.Value - defaultChannelModelMapping atomic.Value - groupMaxTokenNum atomic.Int32 - // group消费金额对应的rpm/tpm乘数,使用map[float64]float64 - groupConsumeLevelRatio atomic.Value -) - -func init() { - defaultChannelModels.Store(make(map[int][]string)) - defaultChannelModelMapping.Store(make(map[int]map[string]string)) - groupConsumeLevelRatio.Store(make(map[float64]float64)) -} - func GetDefaultChannelModels() map[int][]string { return defaultChannelModels.Load().(map[int][]string) } func SetDefaultChannelModels(models map[int][]string) { + models = env.JSON("DEFAULT_CHANNEL_MODELS", models) for key, ms := range models { slices.Sort(ms) models[key] = slices.Compact(ms) @@ -135,6 +136,7 @@ func GetDefaultChannelModelMapping() map[int]map[string]string { } func SetDefaultChannelModelMapping(mapping map[int]map[string]string) { + mapping = env.JSON("DEFAULT_CHANNEL_MODEL_MAPPING", mapping) defaultChannelModelMapping.Store(mapping) } @@ -143,42 +145,43 @@ func GetGroupConsumeLevelRatio() map[float64]float64 { } func SetGroupConsumeLevelRatio(ratio map[float64]float64) { + ratio = env.JSON("GROUP_CONSUME_LEVEL_RATIO", ratio) groupConsumeLevelRatio.Store(ratio) } -// 那个group最多可创建的token数量,0表示不限制 -func GetGroupMaxTokenNum() int32 { +// GetGroupMaxTokenNum returns max number of tokens per group, 0 means unlimited +func GetGroupMaxTokenNum() int64 { return groupMaxTokenNum.Load() } -func SetGroupMaxTokenNum(num int32) { +func SetGroupMaxTokenNum(num int64) { + num = env.Int64("GROUP_MAX_TOKEN_NUM", num) groupMaxTokenNum.Store(num) } -var geminiSafetySetting atomic.Value - -func init() { - geminiSafetySetting.Store("BLOCK_NONE") -} - func GetGeminiSafetySetting() string { return geminiSafetySetting.Load().(string) } func SetGeminiSafetySetting(setting string) { + setting = env.String("GEMINI_SAFETY_SETTING", setting) geminiSafetySetting.Store(setting) } -var billingEnabled atomic.Bool - -func init() { - billingEnabled.Store(true) -} - func GetBillingEnabled() bool { return billingEnabled.Load() } func SetBillingEnabled(enabled bool) { + enabled = env.Bool("BILLING_ENABLED", enabled) billingEnabled.Store(enabled) } + +func GetInternalToken() string { + return internalToken.Load().(string) +} + +func SetInternalToken(token string) { + token = env.String("INTERNAL_TOKEN", token) + internalToken.Store(token) +} diff --git a/service/aiproxy/common/database.go b/service/aiproxy/common/database.go index a164266c27a..b89c8956832 100644 --- a/service/aiproxy/common/database.go +++ b/service/aiproxy/common/database.go @@ -11,6 +11,6 @@ var ( ) var ( - SQLitePath = "aiproxy.db" - SQLiteBusyTimeout = env.Int("SQLITE_BUSY_TIMEOUT", 3000) + SQLitePath = env.String("SQLITE_PATH", "aiproxy.db") + SQLiteBusyTimeout = env.Int64("SQLITE_BUSY_TIMEOUT", 3000) ) diff --git a/service/aiproxy/common/env/helper.go b/service/aiproxy/common/env/helper.go index fdb9f827ac2..2e01ef156be 100644 --- a/service/aiproxy/common/env/helper.go +++ b/service/aiproxy/common/env/helper.go @@ -3,40 +3,83 @@ package env import ( "os" "strconv" + + json "github.com/json-iterator/go" + "github.com/labring/sealos/service/aiproxy/common/conv" + log "github.com/sirupsen/logrus" ) func Bool(env string, defaultValue bool) bool { - if env == "" || os.Getenv(env) == "" { + if env == "" { + return defaultValue + } + e := os.Getenv(env) + if e == "" { + return defaultValue + } + p, err := strconv.ParseBool(e) + if err != nil { + log.Errorf("invalid %s: %s", env, e) return defaultValue } - return os.Getenv(env) == "true" + return p } -func Int(env string, defaultValue int) int { - if env == "" || os.Getenv(env) == "" { +func Int64(env string, defaultValue int64) int64 { + if env == "" { + return defaultValue + } + e := os.Getenv(env) + if e == "" { return defaultValue } - num, err := strconv.Atoi(os.Getenv(env)) + num, err := strconv.ParseInt(e, 10, 64) if err != nil { + log.Errorf("invalid %s: %s", env, e) return defaultValue } return num } func Float64(env string, defaultValue float64) float64 { - if env == "" || os.Getenv(env) == "" { + if env == "" { + return defaultValue + } + e := os.Getenv(env) + if e == "" { return defaultValue } - num, err := strconv.ParseFloat(os.Getenv(env), 64) + num, err := strconv.ParseFloat(e, 64) if err != nil { + log.Errorf("invalid %s: %s", env, e) return defaultValue } return num } func String(env string, defaultValue string) string { - if env == "" || os.Getenv(env) == "" { + if env == "" { + return defaultValue + } + e := os.Getenv(env) + if e == "" { + return defaultValue + } + return e +} + +func JSON[T any](env string, defaultValue T) T { + if env == "" { + return defaultValue + } + e := os.Getenv(env) + if e == "" { + return defaultValue + } + var t T + if err := json.Unmarshal(conv.StringToBytes(e), &t); err != nil { + log.Errorf("invalid %s: %s", env, e) return defaultValue } - return os.Getenv(env) + return t } diff --git a/service/aiproxy/common/init.go b/service/aiproxy/common/init.go index fa2826e407a..b17fc1c8ac1 100644 --- a/service/aiproxy/common/init.go +++ b/service/aiproxy/common/init.go @@ -16,9 +16,6 @@ var ( func Init() { flag.Parse() - if os.Getenv("SQLITE_PATH") != "" { - SQLitePath = os.Getenv("SQLITE_PATH") - } if *LogDir != "" { var err error *LogDir, err = filepath.Abs(*LogDir) diff --git a/service/aiproxy/common/rpmlimit/rate-limit.go b/service/aiproxy/common/rpmlimit/rate-limit.go index 3199142356a..0aea231f692 100644 --- a/service/aiproxy/common/rpmlimit/rate-limit.go +++ b/service/aiproxy/common/rpmlimit/rate-limit.go @@ -6,7 +6,6 @@ import ( "time" "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/config" log "github.com/sirupsen/logrus" ) @@ -187,6 +186,6 @@ func ForceRateLimit(ctx context.Context, group, model string, maxRequestNum int6 func MemoryRateLimit(_ context.Context, group, model string, maxRequestNum int64, duration time.Duration) bool { // It's safe to call multi times. - inMemoryRateLimiter.Init(config.RateLimitKeyExpirationDuration) + inMemoryRateLimiter.Init(3 * time.Minute) return inMemoryRateLimiter.Request(fmt.Sprintf(groupModelRPMKey, group, model), int(maxRequestNum), duration) } diff --git a/service/aiproxy/middleware/auth.go b/service/aiproxy/middleware/auth.go index 14ff7475bf3..af9d7c26180 100644 --- a/service/aiproxy/middleware/auth.go +++ b/service/aiproxy/middleware/auth.go @@ -3,7 +3,6 @@ package middleware import ( "fmt" "net/http" - "os" "strings" "github.com/gin-gonic/gin" @@ -52,8 +51,6 @@ func AdminAuth(c *gin.Context) { c.Next() } -var internalToken = os.Getenv("INTERNAL_TOKEN") - func TokenAuth(c *gin.Context) { log := GetLogger(c) key := c.Request.Header.Get("Authorization") @@ -66,7 +63,7 @@ func TokenAuth(c *gin.Context) { var token *model.TokenCache var useInternalToken bool - if internalToken != "" && internalToken == key { + if config.GetInternalToken() != "" && config.GetInternalToken() == key { token = &model.TokenCache{} useInternalToken = true } else { @@ -202,7 +199,10 @@ func SetLogRequestIDField(fields logrus.Fields, requestID string) { } func SetLogGroupFields(fields logrus.Fields, group *model.GroupCache) { - if group != nil { + if group == nil { + return + } + if group.ID != "" { fields["gid"] = group.ID } } diff --git a/service/aiproxy/model/main.go b/service/aiproxy/model/main.go index 50c54c3cfdf..c3151766b4a 100644 --- a/service/aiproxy/model/main.go +++ b/service/aiproxy/model/main.go @@ -194,9 +194,9 @@ func setDBConns(db *gorm.DB) { return } - sqlDB.SetMaxIdleConns(env.Int("SQL_MAX_IDLE_CONNS", 100)) - sqlDB.SetMaxOpenConns(env.Int("SQL_MAX_OPEN_CONNS", 1000)) - sqlDB.SetConnMaxLifetime(time.Second * time.Duration(env.Int("SQL_MAX_LIFETIME", 60))) + sqlDB.SetMaxIdleConns(int(env.Int64("SQL_MAX_IDLE_CONNS", 100))) + sqlDB.SetMaxOpenConns(int(env.Int64("SQL_MAX_OPEN_CONNS", 1000))) + sqlDB.SetConnMaxLifetime(time.Second * time.Duration(env.Int64("SQL_MAX_LIFETIME", 60))) } func closeDB(db *gorm.DB) error { diff --git a/service/aiproxy/model/option.go b/service/aiproxy/model/option.go index 1d62a49247e..018daf9fb17 100644 --- a/service/aiproxy/model/option.go +++ b/service/aiproxy/model/option.go @@ -79,7 +79,7 @@ func initOptionMap() error { } optionMap["DefaultChannelModelMapping"] = conv.BytesToString(defaultChannelModelMappingJSON) optionMap["GeminiSafetySetting"] = config.GetGeminiSafetySetting() - optionMap["GroupMaxTokenNum"] = strconv.FormatInt(int64(config.GetGroupMaxTokenNum()), 10) + optionMap["GroupMaxTokenNum"] = strconv.FormatInt(config.GetGroupMaxTokenNum(), 10) groupConsumeLevelRatioJSON, err := json.Marshal(config.GetGroupConsumeLevelRatio()) if err != nil { return err @@ -207,7 +207,7 @@ func updateOption(key string, value string, isInit bool) (err error) { if groupMaxTokenNum < 0 { return errors.New("group max token num must be greater than 0") } - config.SetGroupMaxTokenNum(int32(groupMaxTokenNum)) + config.SetGroupMaxTokenNum(groupMaxTokenNum) case "GeminiSafetySetting": config.SetGeminiSafetySetting(value) case "DefaultChannelModels": diff --git a/service/aiproxy/model/token.go b/service/aiproxy/model/token.go index 11be7cf5cdc..1312741d92b 100644 --- a/service/aiproxy/model/token.go +++ b/service/aiproxy/model/token.go @@ -73,7 +73,7 @@ func InsertToken(token *Token, autoCreateGroup bool) error { if err != nil { return err } - if count >= int64(maxTokenNum) { + if count >= maxTokenNum { return errors.New("group max token num reached") } } From 272e9bb598eff73f2cc98db9630263155ab41b9f Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 6 Jan 2025 22:01:40 +0800 Subject: [PATCH 095/167] fix: internal token option key --- service/aiproxy/model/option.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/service/aiproxy/model/option.go b/service/aiproxy/model/option.go index 018daf9fb17..c4c647bc5ba 100644 --- a/service/aiproxy/model/option.go +++ b/service/aiproxy/model/option.go @@ -38,7 +38,8 @@ func GetOption(key string) (*Option, error) { } var ( - optionMap = make(map[string]string) + optionMap = make(map[string]string) + // allowed option keys optionKeys []string ) @@ -85,6 +86,7 @@ func initOptionMap() error { return err } optionMap["GroupConsumeLevelRatio"] = conv.BytesToString(groupConsumeLevelRatioJSON) + optionMap["InternalToken"] = config.GetInternalToken() optionKeys = make([]string, 0, len(optionMap)) for key := range optionMap { @@ -184,6 +186,8 @@ func isTrue(value string) bool { //nolint:gocyclo func updateOption(key string, value string, isInit bool) (err error) { switch key { + case "InternalToken": + config.SetInternalToken(value) case "DisableModelConfig": config.SetDisableModelConfig(isTrue(value)) case "LogDetailStorageHours": From 488b3b6cbb93e36accbc213abc0a24fe8f510d66 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 6 Jan 2025 22:11:22 +0800 Subject: [PATCH 096/167] fix: ignore redis ping error --- service/aiproxy/common/redis.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/service/aiproxy/common/redis.go b/service/aiproxy/common/redis.go index 30a7ad1b85b..7d26afceb09 100644 --- a/service/aiproxy/common/redis.go +++ b/service/aiproxy/common/redis.go @@ -32,7 +32,12 @@ func InitRedisClient() (err error) { defer cancel() _, err = RDB.Ping(ctx).Result() - return err + if err != nil { + log.Errorf("failed to ping redis: %s", err.Error()) + return err + } + + return nil } func RedisSet(key string, value string, expiration time.Duration) error { From 831b8ca622d2ac2a0275a6b68daa65aa02884b6d Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 6 Jan 2025 23:56:05 +0800 Subject: [PATCH 097/167] fix: ignore redis ping error --- service/aiproxy/common/redis.go | 1 - 1 file changed, 1 deletion(-) diff --git a/service/aiproxy/common/redis.go b/service/aiproxy/common/redis.go index 7d26afceb09..e83e9d8f4ca 100644 --- a/service/aiproxy/common/redis.go +++ b/service/aiproxy/common/redis.go @@ -34,7 +34,6 @@ func InitRedisClient() (err error) { _, err = RDB.Ping(ctx).Result() if err != nil { log.Errorf("failed to ping redis: %s", err.Error()) - return err } return nil From 2815506216f8d409ced041fb6e74c0f80533e52e Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 8 Jan 2025 12:10:07 +0800 Subject: [PATCH 098/167] fix: subscription --- service/aiproxy/controller/channel-billing.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/service/aiproxy/controller/channel-billing.go b/service/aiproxy/controller/channel-billing.go index 2ea6b241876..751251e1638 100644 --- a/service/aiproxy/controller/channel-billing.go +++ b/service/aiproxy/controller/channel-billing.go @@ -120,9 +120,9 @@ func GetSubscription(c *gin.Context) { quota = b } c.JSON(http.StatusOK, openai.SubscriptionResponse{ - HardLimitUSD: quota / 7, + HardLimitUSD: (quota + token.UsedAmount) / 7, SoftLimitUSD: b / 7, - SystemHardLimitUSD: quota / 7, + SystemHardLimitUSD: (quota + token.UsedAmount) / 7, }) } From 25d8e075e4dffe2ea7ed661ba754c52ae85a8007 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 8 Jan 2025 12:14:20 +0800 Subject: [PATCH 099/167] fix: subscription --- service/aiproxy/controller/channel-billing.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/service/aiproxy/controller/channel-billing.go b/service/aiproxy/controller/channel-billing.go index 751251e1638..e31a672a4c1 100644 --- a/service/aiproxy/controller/channel-billing.go +++ b/service/aiproxy/controller/channel-billing.go @@ -120,13 +120,13 @@ func GetSubscription(c *gin.Context) { quota = b } c.JSON(http.StatusOK, openai.SubscriptionResponse{ - HardLimitUSD: (quota + token.UsedAmount) / 7, - SoftLimitUSD: b / 7, - SystemHardLimitUSD: (quota + token.UsedAmount) / 7, + HardLimitUSD: quota + token.UsedAmount, + SoftLimitUSD: b, + SystemHardLimitUSD: quota + token.UsedAmount, }) } func GetUsage(c *gin.Context) { token := middleware.GetToken(c) - c.JSON(http.StatusOK, openai.UsageResponse{TotalUsage: token.UsedAmount / 7 * 100}) + c.JSON(http.StatusOK, openai.UsageResponse{TotalUsage: token.UsedAmount * 100}) } From 4eeb134942dc8c4c90d481cd4cf8a9620742ba05 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sun, 19 Jan 2025 16:07:32 +0800 Subject: [PATCH 100/167] feat: precheck group balance --- service/aiproxy/common/ctxkey/key.go | 1 + service/aiproxy/middleware/auth.go | 8 +++--- service/aiproxy/middleware/distributor.go | 32 +++++++++++++++++---- service/aiproxy/middleware/utils.go | 6 +++- service/aiproxy/relay/controller/consume.go | 16 ++++++----- service/aiproxy/relay/controller/handle.go | 7 +++-- service/aiproxy/relay/meta/meta.go | 8 ++++-- 7 files changed, 55 insertions(+), 23 deletions(-) diff --git a/service/aiproxy/common/ctxkey/key.go b/service/aiproxy/common/ctxkey/key.go index 83ffae25817..a1275b875f4 100644 --- a/service/aiproxy/common/ctxkey/key.go +++ b/service/aiproxy/common/ctxkey/key.go @@ -3,6 +3,7 @@ package ctxkey const ( Group = "group" Token = "token" + GroupBalance = "group_balance" OriginalModel = "original_model" RequestID = "X-Request-Id" ModelCaches = "model_caches" diff --git a/service/aiproxy/middleware/auth.go b/service/aiproxy/middleware/auth.go index af9d7c26180..49559a2e8c0 100644 --- a/service/aiproxy/middleware/auth.go +++ b/service/aiproxy/middleware/auth.go @@ -70,7 +70,7 @@ func TokenAuth(c *gin.Context) { var err error token, err = model.ValidateAndGetToken(key) if err != nil { - abortWithMessage(c, http.StatusUnauthorized, err.Error()) + abortLogWithMessage(c, http.StatusUnauthorized, err.Error()) return } } @@ -79,10 +79,10 @@ func TokenAuth(c *gin.Context) { if token.Subnet != "" { if ok, err := network.IsIPInSubnets(c.ClientIP(), token.Subnet); err != nil { - abortWithMessage(c, http.StatusInternalServerError, err.Error()) + abortLogWithMessage(c, http.StatusInternalServerError, err.Error()) return } else if !ok { - abortWithMessage(c, http.StatusForbidden, + abortLogWithMessage(c, http.StatusForbidden, fmt.Sprintf("token (%s[%d]) can only be used in the specified subnet: %s, current ip: %s", token.Name, token.ID, @@ -101,7 +101,7 @@ func TokenAuth(c *gin.Context) { var err error group, err = model.CacheGetGroup(token.Group) if err != nil { - abortWithMessage(c, http.StatusInternalServerError, err.Error()) + abortLogWithMessage(c, http.StatusInternalServerError, err.Error()) return } } diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 6b20d098ce5..fac4a49b3ad 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -9,6 +9,7 @@ import ( "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common" + "github.com/labring/sealos/service/aiproxy/common/balance" "github.com/labring/sealos/service/aiproxy/common/config" "github.com/labring/sealos/service/aiproxy/common/consume" "github.com/labring/sealos/service/aiproxy/common/ctxkey" @@ -110,6 +111,21 @@ func checkGroupModelRPMAndTPM(c *gin.Context, group *model.GroupCache, mc *model return nil } +func checkGroupBalance(c *gin.Context, group *model.GroupCache) bool { + groupBalance, _, err := balance.Default.GetGroupRemainBalance(c.Request.Context(), group.ID) + if err != nil { + GetLogger(c).Errorf("get group (%s) balance error: %v", group.ID, err) + abortWithMessage(c, http.StatusInternalServerError, "get group balance error") + return false + } + if groupBalance <= 0 { + abortLogWithMessage(c, http.StatusForbidden, "group balance not enough") + return false + } + c.Set(ctxkey.GroupBalance, groupBalance) + return true +} + func NewDistribute(mode int) gin.HandlerFunc { return func(c *gin.Context) { distribute(c, mode) @@ -118,7 +134,7 @@ func NewDistribute(mode int) gin.HandlerFunc { func distribute(c *gin.Context, mode int) { if config.GetDisableServe() { - abortWithMessage(c, http.StatusServiceUnavailable, "service is under maintenance") + abortLogWithMessage(c, http.StatusServiceUnavailable, "service is under maintenance") return } @@ -126,13 +142,17 @@ func distribute(c *gin.Context, mode int) { group := GetGroup(c) + if !checkGroupBalance(c, group) { + return + } + requestModel, err := getRequestModel(c) if err != nil { - abortWithMessage(c, http.StatusBadRequest, err.Error()) + abortLogWithMessage(c, http.StatusBadRequest, err.Error()) return } if requestModel == "" { - abortWithMessage(c, http.StatusBadRequest, "no model provided") + abortLogWithMessage(c, http.StatusBadRequest, "no model provided") return } @@ -142,7 +162,7 @@ func distribute(c *gin.Context, mode int) { mc, ok := GetModelCaches(c).ModelConfig.GetModelConfig(requestModel) if !ok { - abortWithMessage(c, http.StatusServiceUnavailable, requestModel+" is not available") + abortLogWithMessage(c, http.StatusServiceUnavailable, requestModel+" is not available") return } @@ -151,7 +171,7 @@ func distribute(c *gin.Context, mode int) { token := GetToken(c) if len(token.Models) == 0 || !slices.Contains(token.Models, requestModel) { - abortWithMessage(c, + abortLogWithMessage(c, http.StatusForbidden, fmt.Sprintf("token (%s[%d]) has no permission to use model: %s", token.Name, token.ID, requestModel, @@ -172,7 +192,7 @@ func distribute(c *gin.Context, mode int) { errMsg, nil, ) - abortWithMessage(c, http.StatusTooManyRequests, errMsg) + abortLogWithMessage(c, http.StatusTooManyRequests, errMsg) return } diff --git a/service/aiproxy/middleware/utils.go b/service/aiproxy/middleware/utils.go index b113dbc93be..778e54d2b67 100644 --- a/service/aiproxy/middleware/utils.go +++ b/service/aiproxy/middleware/utils.go @@ -15,8 +15,12 @@ func MessageWithRequestID(message string, id string) string { return fmt.Sprintf("%s (request id: %s)", message, id) } -func abortWithMessage(c *gin.Context, statusCode int, message string) { +func abortLogWithMessage(c *gin.Context, statusCode int, message string) { GetLogger(c).Error(message) + abortWithMessage(c, statusCode, message) +} + +func abortWithMessage(c *gin.Context, statusCode int, message string) { c.JSON(statusCode, gin.H{ "error": &model.Error{ Message: MessageWithRequestID(message, GetRequestID(c)), diff --git a/service/aiproxy/relay/controller/consume.go b/service/aiproxy/relay/controller/consume.go index b343dbc495f..309307e6578 100644 --- a/service/aiproxy/relay/controller/consume.go +++ b/service/aiproxy/relay/controller/consume.go @@ -1,9 +1,9 @@ package controller import ( - "context" - + "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common/balance" + "github.com/labring/sealos/service/aiproxy/common/ctxkey" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/meta" "github.com/shopspring/decimal" @@ -35,19 +35,21 @@ func checkGroupBalance(req *PreCheckGroupBalanceReq, meta *meta.Meta, groupRemai if meta.IsChannelTest { return true } - if groupRemainBalance <= 0 { - return false - } preConsumedAmount := getPreConsumedAmount(req) return groupRemainBalance > preConsumedAmount } -func getGroupBalance(ctx context.Context, meta *meta.Meta) (float64, balance.PostGroupConsumer, error) { +func getGroupBalance(ctx *gin.Context, meta *meta.Meta) (float64, balance.PostGroupConsumer, error) { if meta.IsChannelTest { return 0, nil, nil } - return balance.Default.GetGroupRemainBalance(ctx, meta.Group.ID) + groupBalance, ok := ctx.Get(ctxkey.GroupBalance) + if ok { + return groupBalance.(float64), nil, nil + } + + return balance.Default.GetGroupRemainBalance(ctx.Request.Context(), meta.Group.ID) } diff --git a/service/aiproxy/relay/controller/handle.go b/service/aiproxy/relay/controller/handle.go index e000266dfdc..db985ec1bdb 100644 --- a/service/aiproxy/relay/controller/handle.go +++ b/service/aiproxy/relay/controller/handle.go @@ -20,7 +20,6 @@ import ( func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBalanceReq, error)) *relaymodel.ErrorWithStatusCode { log := middleware.GetLogger(c) - ctx := c.Request.Context() // 1. Get adaptor adaptor, ok := channeltype.GetAdaptor(meta.Channel.Type) @@ -31,7 +30,7 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa } // 2. Get group balance - groupRemainBalance, postGroupConsumer, err := getGroupBalance(ctx, meta) + groupRemainBalance, postGroupConsumer, err := getGroupBalance(c, meta) if err != nil { log.Errorf("get group (%s) balance failed: %v", meta.Group.ID, err) errMsg := fmt.Sprintf("get group (%s) balance failed", meta.Group.ID) @@ -52,6 +51,10 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa ) } + if groupRemainBalance <= 0 { + return openai.ErrorWrapperWithMessage("group balance not enough", "insufficient_group_balance", http.StatusForbidden) + } + // 3. Pre-process request preCheckReq, err := preProcess() if err != nil { diff --git a/service/aiproxy/relay/meta/meta.go b/service/aiproxy/relay/meta/meta.go index 5fd49e10891..9787c0e43c0 100644 --- a/service/aiproxy/relay/meta/meta.go +++ b/service/aiproxy/relay/meta/meta.go @@ -144,10 +144,12 @@ func (m *Meta) GetString(key string) string { } func (m *Meta) GetBool(key string) bool { - if v, ok := m.Get(key); ok { - return v.(bool) + v, ok := m.Get(key) + if !ok { + return false } - return false + b, _ := v.(bool) + return b } //nolint:unparam From dd2f73b096896b3c3c5fe19652d8e7798d419c06 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sun, 19 Jan 2025 16:16:45 +0800 Subject: [PATCH 101/167] fix: consume nil pointer --- service/aiproxy/middleware/distributor.go | 12 ++++++++++-- service/aiproxy/relay/controller/consume.go | 8 +++++--- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index fac4a49b3ad..6397248fa19 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -111,8 +111,13 @@ func checkGroupModelRPMAndTPM(c *gin.Context, group *model.GroupCache, mc *model return nil } +type GroupBalanceConsumer struct { + GroupBalance float64 + Consumer balance.PostGroupConsumer +} + func checkGroupBalance(c *gin.Context, group *model.GroupCache) bool { - groupBalance, _, err := balance.Default.GetGroupRemainBalance(c.Request.Context(), group.ID) + groupBalance, consumer, err := balance.Default.GetGroupRemainBalance(c.Request.Context(), group.ID) if err != nil { GetLogger(c).Errorf("get group (%s) balance error: %v", group.ID, err) abortWithMessage(c, http.StatusInternalServerError, "get group balance error") @@ -122,7 +127,10 @@ func checkGroupBalance(c *gin.Context, group *model.GroupCache) bool { abortLogWithMessage(c, http.StatusForbidden, "group balance not enough") return false } - c.Set(ctxkey.GroupBalance, groupBalance) + c.Set(ctxkey.GroupBalance, &GroupBalanceConsumer{ + GroupBalance: groupBalance, + Consumer: consumer, + }) return true } diff --git a/service/aiproxy/relay/controller/consume.go b/service/aiproxy/relay/controller/consume.go index 309307e6578..ce1714cb77a 100644 --- a/service/aiproxy/relay/controller/consume.go +++ b/service/aiproxy/relay/controller/consume.go @@ -4,6 +4,7 @@ import ( "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common/balance" "github.com/labring/sealos/service/aiproxy/common/ctxkey" + "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/meta" "github.com/shopspring/decimal" @@ -47,9 +48,10 @@ func getGroupBalance(ctx *gin.Context, meta *meta.Meta) (float64, balance.PostGr } groupBalance, ok := ctx.Get(ctxkey.GroupBalance) - if ok { - return groupBalance.(float64), nil, nil + if !ok { + return balance.Default.GetGroupRemainBalance(ctx.Request.Context(), meta.Group.ID) } - return balance.Default.GetGroupRemainBalance(ctx.Request.Context(), meta.Group.ID) + groupBalanceConsumer := groupBalance.(*middleware.GroupBalanceConsumer) + return groupBalanceConsumer.GroupBalance, groupBalanceConsumer.Consumer, nil } From b4c7e7a070698809054b7e81823c83bb10678c8b Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 23 Jan 2025 11:37:18 +0800 Subject: [PATCH 102/167] feat: log balance --- service/aiproxy/middleware/distributor.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 6397248fa19..1ffed4711af 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -127,6 +127,8 @@ func checkGroupBalance(c *gin.Context, group *model.GroupCache) bool { abortLogWithMessage(c, http.StatusForbidden, "group balance not enough") return false } + log := GetLogger(c) + log.Data["balance"] = groupBalance c.Set(ctxkey.GroupBalance, &GroupBalanceConsumer{ GroupBalance: groupBalance, Consumer: consumer, From ae0523ef3142d6bc130ed9963f59f4c1aacdf6da Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 23 Jan 2025 11:59:01 +0800 Subject: [PATCH 103/167] feat: ip log --- service/aiproxy/common/consume/consume.go | 8 +++++-- service/aiproxy/controller/log.go | 8 +++++++ service/aiproxy/middleware/distributor.go | 1 + service/aiproxy/model/log.go | 28 ++++++++++++++++++---- service/aiproxy/model/utils.go | 2 ++ service/aiproxy/relay/controller/handle.go | 4 ++++ 6 files changed, 45 insertions(+), 6 deletions(-) diff --git a/service/aiproxy/common/consume/consume.go b/service/aiproxy/common/consume/consume.go index 139de8675b1..05e1e0fb327 100644 --- a/service/aiproxy/common/consume/consume.go +++ b/service/aiproxy/common/consume/consume.go @@ -26,6 +26,7 @@ func AsyncConsume( inputPrice, outputPrice float64, content string, + ip string, requestDetail *model.RequestDetail, ) { if meta.IsChannelTest { @@ -49,6 +50,7 @@ func AsyncConsume( inputPrice, outputPrice, content, + ip, requestDetail, ) } @@ -62,6 +64,7 @@ func Consume( inputPrice, outputPrice float64, content string, + ip string, requestDetail *model.RequestDetail, ) { if meta.IsChannelTest { @@ -70,7 +73,7 @@ func Consume( amount := calculateAmount(ctx, usage, inputPrice, outputPrice, postGroupConsumer, meta) - err := recordConsume(meta, code, usage, inputPrice, outputPrice, content, requestDetail, amount) + err := recordConsume(meta, code, usage, inputPrice, outputPrice, content, ip, requestDetail, amount) if err != nil { log.Error("error batch record consume: " + err.Error()) } @@ -136,7 +139,7 @@ func processGroupConsume( return consumedAmount } -func recordConsume(meta *meta.Meta, code int, usage *relaymodel.Usage, inputPrice, outputPrice float64, content string, requestDetail *model.RequestDetail, amount float64) error { +func recordConsume(meta *meta.Meta, code int, usage *relaymodel.Usage, inputPrice, outputPrice float64, content string, ip string, requestDetail *model.RequestDetail, amount float64) error { promptTokens := 0 completionTokens := 0 if usage != nil { @@ -166,6 +169,7 @@ func recordConsume(meta *meta.Meta, code int, usage *relaymodel.Usage, inputPric meta.Endpoint, content, meta.Mode, + ip, requestDetail, ) } diff --git a/service/aiproxy/controller/log.go b/service/aiproxy/controller/log.go index c4b8e209f39..081201d3b66 100644 --- a/service/aiproxy/controller/log.go +++ b/service/aiproxy/controller/log.go @@ -43,6 +43,7 @@ func GetLogs(c *gin.Context) { mode, _ := strconv.Atoi(c.Query("mode")) codeType := c.Query("code_type") withBody, _ := strconv.ParseBool(c.Query("with_body")) + ip := c.Query("ip") result, err := model.GetLogs( group, startTimestampTime, @@ -59,6 +60,7 @@ func GetLogs(c *gin.Context) { mode, model.CodeType(codeType), withBody, + ip, ) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) @@ -104,6 +106,7 @@ func GetGroupLogs(c *gin.Context) { mode, _ := strconv.Atoi(c.Query("mode")) codeType := c.Query("code_type") withBody, _ := strconv.ParseBool(c.Query("with_body")) + ip := c.Query("ip") result, err := model.GetGroupLogs( group, startTimestampTime, @@ -120,6 +123,7 @@ func GetGroupLogs(c *gin.Context) { mode, model.CodeType(codeType), withBody, + ip, ) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) @@ -158,6 +162,7 @@ func SearchLogs(c *gin.Context) { mode, _ := strconv.Atoi(c.Query("mode")) codeType := c.Query("code_type") withBody, _ := strconv.ParseBool(c.Query("with_body")) + ip := c.Query("ip") result, err := model.SearchLogs( group, keyword, @@ -175,6 +180,7 @@ func SearchLogs(c *gin.Context) { mode, model.CodeType(codeType), withBody, + ip, ) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) @@ -217,6 +223,7 @@ func SearchGroupLogs(c *gin.Context) { mode, _ := strconv.Atoi(c.Query("mode")) codeType := c.Query("code_type") withBody, _ := strconv.ParseBool(c.Query("with_body")) + ip := c.Query("ip") result, err := model.SearchGroupLogs( group, keyword, @@ -234,6 +241,7 @@ func SearchGroupLogs(c *gin.Context) { mode, model.CodeType(codeType), withBody, + ip, ) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 1ffed4711af..2befdd35e34 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -200,6 +200,7 @@ func distribute(c *gin.Context, mode int) { 0, 0, errMsg, + c.ClientIP(), nil, ) abortLogWithMessage(c, http.StatusTooManyRequests, errMsg) diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index ab24eafe3ab..328ae14fb93 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -44,6 +44,7 @@ type Log struct { ChannelID int `gorm:"index" json:"channel"` Code int `gorm:"index" json:"code"` Mode int `json:"mode"` + IP string `json:"ip"` } func (l *Log) MarshalJSON() ([]byte, error) { @@ -118,6 +119,7 @@ func RecordConsumeLog( endpoint string, content string, mode int, + ip string, requestDetail *RequestDetail, ) error { defer func() { @@ -141,6 +143,7 @@ func RecordConsumeLog( TokenName: tokenName, Model: modelName, Mode: mode, + IP: ip, UsedAmount: amount, Price: price, CompletionPrice: completionPrice, @@ -203,6 +206,7 @@ func getLogs( mode int, codeType CodeType, withBody bool, + ip string, ) (int64, []*Log, error) { tx := LogDB.Model(&Log{}) if group != "" { @@ -235,6 +239,9 @@ func getLogs( if endpoint != "" { tx = tx.Where("endpoint = ?", endpoint) } + if ip != "" { + tx = tx.Where("ip = ?", ip) + } switch codeType { case CodeTypeSuccess: tx = tx.Where("code = 200") @@ -287,8 +294,9 @@ func GetLogs( mode int, codeType CodeType, withBody bool, + ip string, ) (*GetLogsResult, error) { - total, logs, err := getLogs(group, startTimestamp, endTimestamp, modelName, requestID, tokenID, tokenName, startIdx, num, channelID, endpoint, order, mode, codeType, withBody) + total, logs, err := getLogs(group, startTimestamp, endTimestamp, modelName, requestID, tokenID, tokenName, startIdx, num, channelID, endpoint, order, mode, codeType, withBody, ip) if err != nil { return nil, err } @@ -323,11 +331,12 @@ func GetGroupLogs( mode int, codeType CodeType, withBody bool, + ip string, ) (*GetGroupLogsResult, error) { if group == "" { return nil, errors.New("group is required") } - total, logs, err := getLogs(group, startTimestamp, endTimestamp, modelName, requestID, tokenID, tokenName, startIdx, num, channelID, endpoint, order, mode, codeType, withBody) + total, logs, err := getLogs(group, startTimestamp, endTimestamp, modelName, requestID, tokenID, tokenName, startIdx, num, channelID, endpoint, order, mode, codeType, withBody, ip) if err != nil { return nil, err } @@ -366,6 +375,7 @@ func searchLogs( mode int, codeType CodeType, withBody bool, + ip string, ) (int64, []*Log, error) { tx := LogDB.Model(&Log{}) if group != "" { @@ -400,6 +410,9 @@ func searchLogs( if channelID != 0 { tx = tx.Where("channel_id = ?", channelID) } + if ip != "" { + tx = tx.Where("ip = ?", ip) + } switch codeType { case CodeTypeSuccess: tx = tx.Where("code = 200") @@ -470,6 +483,11 @@ func searchLogs( } values = append(values, "%"+keyword+"%") + if ip != "" { + conditions = append(conditions, "ip = ?") + values = append(values, ip) + } + if len(conditions) > 0 { tx = tx.Where(fmt.Sprintf("(%s)", strings.Join(conditions, " OR ")), values...) } @@ -526,8 +544,9 @@ func SearchLogs( mode int, codeType CodeType, withBody bool, + ip string, ) (*GetLogsResult, error) { - total, logs, err := searchLogs(group, keyword, page, perPage, endpoint, requestID, tokenID, tokenName, modelName, startTimestamp, endTimestamp, channelID, order, mode, codeType, withBody) + total, logs, err := searchLogs(group, keyword, page, perPage, endpoint, requestID, tokenID, tokenName, modelName, startTimestamp, endTimestamp, channelID, order, mode, codeType, withBody, ip) if err != nil { return nil, err } @@ -563,11 +582,12 @@ func SearchGroupLogs( mode int, codeType CodeType, withBody bool, + ip string, ) (*GetGroupLogsResult, error) { if group == "" { return nil, errors.New("group is required") } - total, logs, err := searchLogs(group, keyword, page, perPage, endpoint, requestID, tokenID, tokenName, modelName, startTimestamp, endTimestamp, channelID, order, mode, codeType, withBody) + total, logs, err := searchLogs(group, keyword, page, perPage, endpoint, requestID, tokenID, tokenName, modelName, startTimestamp, endTimestamp, channelID, order, mode, codeType, withBody, ip) if err != nil { return nil, err } diff --git a/service/aiproxy/model/utils.go b/service/aiproxy/model/utils.go index 249554235e0..148454496f5 100644 --- a/service/aiproxy/model/utils.go +++ b/service/aiproxy/model/utils.go @@ -59,6 +59,7 @@ func BatchRecordConsume( endpoint string, content string, mode int, + ip string, requestDetail *RequestDetail, ) error { errs := []error{} @@ -79,6 +80,7 @@ func BatchRecordConsume( endpoint, content, mode, + ip, requestDetail, ) if err != nil { diff --git a/service/aiproxy/relay/controller/handle.go b/service/aiproxy/relay/controller/handle.go index db985ec1bdb..7ad2767f1ea 100644 --- a/service/aiproxy/relay/controller/handle.go +++ b/service/aiproxy/relay/controller/handle.go @@ -42,6 +42,7 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa 0, 0, errMsg, + c.ClientIP(), nil, ) return openai.ErrorWrapperWithMessage( @@ -76,6 +77,7 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa 0, 0, err.Error(), + c.ClientIP(), detail, ) return openai.ErrorWrapper(err, "invalid_request", http.StatusBadRequest) @@ -111,6 +113,7 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa preCheckReq.InputPrice, preCheckReq.OutputPrice, respErr.Error.JSONOrEmpty(), + c.ClientIP(), detail, ) return respErr @@ -125,6 +128,7 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa preCheckReq.InputPrice, preCheckReq.OutputPrice, "", + c.ClientIP(), nil, ) From 3e21fbc23cb2c79c649b732103214bfd483d2a08 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 23 Jan 2025 12:38:09 +0800 Subject: [PATCH 104/167] fix: group disable --- service/aiproxy/middleware/auth.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/service/aiproxy/middleware/auth.go b/service/aiproxy/middleware/auth.go index 49559a2e8c0..710e4fe6818 100644 --- a/service/aiproxy/middleware/auth.go +++ b/service/aiproxy/middleware/auth.go @@ -96,7 +96,9 @@ func TokenAuth(c *gin.Context) { var group *model.GroupCache if useInternalToken { - group = &model.GroupCache{} + group = &model.GroupCache{ + Status: model.GroupStatusEnabled, + } } else { var err error group, err = model.CacheGetGroup(token.Group) @@ -104,6 +106,10 @@ func TokenAuth(c *gin.Context) { abortLogWithMessage(c, http.StatusInternalServerError, err.Error()) return } + if group.Status != model.GroupStatusEnabled { + abortLogWithMessage(c, http.StatusForbidden, "group is disabled") + return + } } SetLogGroupFields(log.Data, group) From a61c4e6ad0b33c76b554f8bec03838f37d74f991 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 23 Jan 2025 13:45:06 +0800 Subject: [PATCH 105/167] fix: non stream context cancel --- service/aiproxy/relay/adaptor/ali/embeddings.go | 2 +- service/aiproxy/relay/adaptor/ali/image.go | 2 +- service/aiproxy/relay/adaptor/ali/rerank.go | 2 +- service/aiproxy/relay/adaptor/baidu/embeddings.go | 2 +- service/aiproxy/relay/adaptor/baidu/image.go | 2 +- service/aiproxy/relay/adaptor/baidu/rerank.go | 2 +- service/aiproxy/relay/adaptor/coze/main.go | 2 +- service/aiproxy/relay/adaptor/minimax/tts.go | 4 ++-- service/aiproxy/relay/adaptor/openai/image.go | 2 +- service/aiproxy/relay/adaptor/openai/main.go | 2 +- service/aiproxy/relay/adaptor/openai/moderations.go | 2 +- service/aiproxy/relay/adaptor/openai/rerank.go | 2 +- service/aiproxy/relay/adaptor/openai/stt.go | 2 +- service/aiproxy/relay/adaptor/openai/tts.go | 2 +- service/aiproxy/relay/controller/dohelper.go | 11 ++++++----- service/aiproxy/relay/utils/utils.go | 4 +++- 16 files changed, 24 insertions(+), 21 deletions(-) diff --git a/service/aiproxy/relay/adaptor/ali/embeddings.go b/service/aiproxy/relay/adaptor/ali/embeddings.go index 66f4569cc18..877f6b66f50 100644 --- a/service/aiproxy/relay/adaptor/ali/embeddings.go +++ b/service/aiproxy/relay/adaptor/ali/embeddings.go @@ -94,7 +94,7 @@ func EmbeddingsHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*r } _, err = c.Writer.Write(data) if err != nil { - log.Error("write response body failed: " + err.Error()) + log.Warnf("write response body failed: %v", err) } return &openaiResponse.Usage, nil } diff --git a/service/aiproxy/relay/adaptor/ali/image.go b/service/aiproxy/relay/adaptor/ali/image.go index 4a8c12e1da2..0ec83b3fd76 100644 --- a/service/aiproxy/relay/adaptor/ali/image.go +++ b/service/aiproxy/relay/adaptor/ali/image.go @@ -97,7 +97,7 @@ func ImageHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model. c.Writer.WriteHeader(resp.StatusCode) _, err = c.Writer.Write(jsonResponse) if err != nil { - log.Error("aliImageHandler write response body failed: " + err.Error()) + log.Warnf("aliImageHandler write response body failed: %v", err) } return &model.Usage{}, nil } diff --git a/service/aiproxy/relay/adaptor/ali/rerank.go b/service/aiproxy/relay/adaptor/ali/rerank.go index ab739bc8ac9..97b56b55745 100644 --- a/service/aiproxy/relay/adaptor/ali/rerank.go +++ b/service/aiproxy/relay/adaptor/ali/rerank.go @@ -103,7 +103,7 @@ func RerankHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*relay } _, err = c.Writer.Write(jsonResponse) if err != nil { - log.Error("write response body failed: " + err.Error()) + log.Warnf("write response body failed: %v", err) } return usage, nil } diff --git a/service/aiproxy/relay/adaptor/baidu/embeddings.go b/service/aiproxy/relay/adaptor/baidu/embeddings.go index c07582d47ee..49e73cb5d47 100644 --- a/service/aiproxy/relay/adaptor/baidu/embeddings.go +++ b/service/aiproxy/relay/adaptor/baidu/embeddings.go @@ -50,7 +50,7 @@ func EmbeddingsHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*r } _, err = c.Writer.Write(data) if err != nil { - log.Error("write response body failed: " + err.Error()) + log.Warnf("write response body failed: %v", err) } return &baiduResponse.Usage, nil } diff --git a/service/aiproxy/relay/adaptor/baidu/image.go b/service/aiproxy/relay/adaptor/baidu/image.go index 1d7dc847098..dc24b2f9399 100644 --- a/service/aiproxy/relay/adaptor/baidu/image.go +++ b/service/aiproxy/relay/adaptor/baidu/image.go @@ -55,7 +55,7 @@ func ImageHandler(_ *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usa } _, err = c.Writer.Write(data) if err != nil { - log.Error("write response body failed: " + err.Error()) + log.Warnf("write response body failed: %v", err) } return usage, nil } diff --git a/service/aiproxy/relay/adaptor/baidu/rerank.go b/service/aiproxy/relay/adaptor/baidu/rerank.go index fe81e9e84e0..d6666d002f9 100644 --- a/service/aiproxy/relay/adaptor/baidu/rerank.go +++ b/service/aiproxy/relay/adaptor/baidu/rerank.go @@ -56,7 +56,7 @@ func RerankHandler(_ *meta.Meta, c *gin.Context, resp *http.Response) (*model.Us } _, err = c.Writer.Write(jsonData) if err != nil { - log.Error("write response body failed: " + err.Error()) + log.Warnf("write response body failed: %v", err) } return &reRankResp.Usage, nil } diff --git a/service/aiproxy/relay/adaptor/coze/main.go b/service/aiproxy/relay/adaptor/coze/main.go index d178f7ec0e8..aa9934261d3 100644 --- a/service/aiproxy/relay/adaptor/coze/main.go +++ b/service/aiproxy/relay/adaptor/coze/main.go @@ -171,7 +171,7 @@ func Handler(c *gin.Context, resp *http.Response, _ int, modelName string) (*mod c.Writer.WriteHeader(resp.StatusCode) _, err = c.Writer.Write(jsonResponse) if err != nil { - log.Error("write response body failed: " + err.Error()) + log.Warnf("write response body failed: %v", err) } var responseText string if len(fullTextResponse.Choices) > 0 { diff --git a/service/aiproxy/relay/adaptor/minimax/tts.go b/service/aiproxy/relay/adaptor/minimax/tts.go index 8663758b387..712ce8a8b09 100644 --- a/service/aiproxy/relay/adaptor/minimax/tts.go +++ b/service/aiproxy/relay/adaptor/minimax/tts.go @@ -137,7 +137,7 @@ func TTSHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*relaymod _, err = c.Writer.Write(audioBytes) if err != nil { - log.Error("write response body failed: " + err.Error()) + log.Warnf("write response body failed: %v", err) } usageCharacters := meta.InputTokens @@ -190,7 +190,7 @@ func ttsStreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*re _, err = c.Writer.Write(audioBytes) if err != nil { - log.Error("write response body failed: " + err.Error()) + log.Warnf("write response body failed: %v", err) } } diff --git a/service/aiproxy/relay/adaptor/openai/image.go b/service/aiproxy/relay/adaptor/openai/image.go index fa09364cded..5386357da45 100644 --- a/service/aiproxy/relay/adaptor/openai/image.go +++ b/service/aiproxy/relay/adaptor/openai/image.go @@ -71,7 +71,7 @@ func ImageHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model. _, err = c.Writer.Write(data) if err != nil { - log.Error("write response body failed: " + err.Error()) + log.Warnf("write response body failed: %v", err) } return usage, nil } diff --git a/service/aiproxy/relay/adaptor/openai/main.go b/service/aiproxy/relay/adaptor/openai/main.go index 0c91b314f4c..2ac593fc25d 100644 --- a/service/aiproxy/relay/adaptor/openai/main.go +++ b/service/aiproxy/relay/adaptor/openai/main.go @@ -169,7 +169,7 @@ func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage _, err = c.Writer.Write(newData) if err != nil { - log.Error("write response body failed: " + err.Error()) + log.Warnf("write response body failed: %v", err) } return &textResponse.Usage, nil } diff --git a/service/aiproxy/relay/adaptor/openai/moderations.go b/service/aiproxy/relay/adaptor/openai/moderations.go index 14d2c4919ef..06d164b41b2 100644 --- a/service/aiproxy/relay/adaptor/openai/moderations.go +++ b/service/aiproxy/relay/adaptor/openai/moderations.go @@ -52,7 +52,7 @@ func ModerationsHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (* _, err = c.Writer.Write(newData) if err != nil { - log.Error("write response body failed: " + err.Error()) + log.Warnf("write response body failed: %v", err) } return usage, nil } diff --git a/service/aiproxy/relay/adaptor/openai/rerank.go b/service/aiproxy/relay/adaptor/openai/rerank.go index 653415a68f0..5940b85f9e2 100644 --- a/service/aiproxy/relay/adaptor/openai/rerank.go +++ b/service/aiproxy/relay/adaptor/openai/rerank.go @@ -46,7 +46,7 @@ func RerankHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model _, err = c.Writer.Write(responseBody) if err != nil { - log.Error("write response body failed: " + err.Error()) + log.Warnf("write response body failed: %v", err) } if rerankResponse.Meta.Tokens == nil { diff --git a/service/aiproxy/relay/adaptor/openai/stt.go b/service/aiproxy/relay/adaptor/openai/stt.go index ab32ced6c8e..dcd158fb333 100644 --- a/service/aiproxy/relay/adaptor/openai/stt.go +++ b/service/aiproxy/relay/adaptor/openai/stt.go @@ -120,7 +120,7 @@ func STTHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Us } _, err = c.Writer.Write(responseBody) if err != nil { - log.Error("write response body failed: " + err.Error()) + log.Warnf("write response body failed: %v", err) } return &model.Usage{ diff --git a/service/aiproxy/relay/adaptor/openai/tts.go b/service/aiproxy/relay/adaptor/openai/tts.go index bdce9258938..b8cba5ce0be 100644 --- a/service/aiproxy/relay/adaptor/openai/tts.go +++ b/service/aiproxy/relay/adaptor/openai/tts.go @@ -47,7 +47,7 @@ func TTSHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*relaymod _, err := io.Copy(c.Writer, resp.Body) if err != nil { - log.Error("write response body failed: " + err.Error()) + log.Warnf("write response body failed: %v", err) } return &relaymodel.Usage{ PromptTokens: meta.InputTokens, diff --git a/service/aiproxy/relay/controller/dohelper.go b/service/aiproxy/relay/controller/dohelper.go index 45ce94f4508..07045245ba7 100644 --- a/service/aiproxy/relay/controller/dohelper.go +++ b/service/aiproxy/relay/controller/dohelper.go @@ -137,15 +137,16 @@ func prepareAndDoRequest(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*h return nil, openai.ErrorWrapperWithMessage("get request url failed: "+err.Error(), "get_request_url_failed", http.StatusBadRequest) } + ctx := context.Background() if timeout := config.GetTimeoutWithModelType()[meta.Mode]; timeout > 0 { - rawRequest := c.Request - ctx, cancel := context.WithTimeout(rawRequest.Context(), time.Duration(timeout)*time.Second) + // donot use c.Request.Context() because it will be canceled by the client + // which will cause the usage of non-streaming requests to be unable to be recorded + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, time.Duration(timeout)*time.Second) defer cancel() - c.Request = rawRequest.WithContext(ctx) - defer func() { c.Request = rawRequest }() } - req, err := http.NewRequestWithContext(c.Request.Context(), method, fullRequestURL, body) + req, err := http.NewRequestWithContext(ctx, method, fullRequestURL, body) if err != nil { return nil, openai.ErrorWrapperWithMessage("new request failed: "+err.Error(), "new_request_failed", http.StatusBadRequest) } diff --git a/service/aiproxy/relay/utils/utils.go b/service/aiproxy/relay/utils/utils.go index 2c0d8f69b5a..ae307bf0086 100644 --- a/service/aiproxy/relay/utils/utils.go +++ b/service/aiproxy/relay/utils/utils.go @@ -53,8 +53,10 @@ func UnmarshalMap(req *http.Request) (map[string]any, error) { return request, nil } +var defaultClient = &http.Client{} + func DoRequest(req *http.Request) (*http.Response, error) { - resp, err := http.DefaultClient.Do(req) + resp, err := defaultClient.Do(req) if err != nil { return nil, err } From da7ce1e6db8d5bc2d4e0605b07b1e285704d1472 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 23 Jan 2025 14:22:33 +0800 Subject: [PATCH 106/167] feat: amount log --- service/aiproxy/common/consume/consume.go | 26 +++++++++++++--------- service/aiproxy/relay/controller/handle.go | 5 +++++ 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/service/aiproxy/common/consume/consume.go b/service/aiproxy/common/consume/consume.go index 05e1e0fb327..1dd5d8c8462 100644 --- a/service/aiproxy/common/consume/consume.go +++ b/service/aiproxy/common/consume/consume.go @@ -71,7 +71,9 @@ func Consume( return } - amount := calculateAmount(ctx, usage, inputPrice, outputPrice, postGroupConsumer, meta) + amount := CalculateAmount(usage, inputPrice, outputPrice) + + amount = consumeAmount(ctx, amount, postGroupConsumer, meta) err := recordConsume(meta, code, usage, inputPrice, outputPrice, content, ip, requestDetail, amount) if err != nil { @@ -79,12 +81,21 @@ func Consume( } } -func calculateAmount( +func consumeAmount( ctx context.Context, - usage *relaymodel.Usage, - inputPrice, outputPrice float64, + amount float64, postGroupConsumer balance.PostGroupConsumer, meta *meta.Meta, +) float64 { + if amount > 0 { + return processGroupConsume(ctx, amount, postGroupConsumer, meta) + } + return 0 +} + +func CalculateAmount( + usage *relaymodel.Usage, + inputPrice, outputPrice float64, ) float64 { if usage == nil { return 0 @@ -104,13 +115,8 @@ func calculateAmount( completionAmount := decimal.NewFromInt(int64(completionTokens)). Mul(decimal.NewFromFloat(outputPrice)). Div(decimal.NewFromInt(model.PriceUnit)) - amount := promptAmount.Add(completionAmount).InexactFloat64() - if amount > 0 { - return processGroupConsume(ctx, amount, postGroupConsumer, meta) - } - - return 0 + return promptAmount.Add(completionAmount).InexactFloat64() } func processGroupConsume( diff --git a/service/aiproxy/relay/controller/handle.go b/service/aiproxy/relay/controller/handle.go index 7ad2767f1ea..4cf6a5aad93 100644 --- a/service/aiproxy/relay/controller/handle.go +++ b/service/aiproxy/relay/controller/handle.go @@ -119,6 +119,11 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa return respErr } + amount := consume.CalculateAmount(usage, preCheckReq.InputPrice, preCheckReq.OutputPrice) + if amount > 0 { + log.Data["amount"] = amount + } + // 6. Post consume consume.AsyncConsume( postGroupConsumer, From 212a3c5bda52c3aa17ecc4bdbb5daad965a21cae Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 23 Jan 2025 14:44:25 +0800 Subject: [PATCH 107/167] fix: balance and amount log format --- service/aiproxy/middleware/distributor.go | 8 +++++--- service/aiproxy/relay/controller/handle.go | 3 ++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 2befdd35e34..34ed28b66b3 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -4,6 +4,7 @@ import ( "fmt" "net/http" "slices" + "strconv" "strings" "time" @@ -117,18 +118,19 @@ type GroupBalanceConsumer struct { } func checkGroupBalance(c *gin.Context, group *model.GroupCache) bool { + log := GetLogger(c) groupBalance, consumer, err := balance.Default.GetGroupRemainBalance(c.Request.Context(), group.ID) if err != nil { - GetLogger(c).Errorf("get group (%s) balance error: %v", group.ID, err) + log.Errorf("get group (%s) balance error: %v", group.ID, err) abortWithMessage(c, http.StatusInternalServerError, "get group balance error") return false } + log.Data["balance"] = strconv.FormatFloat(groupBalance, 'f', -1, 64) + if groupBalance <= 0 { abortLogWithMessage(c, http.StatusForbidden, "group balance not enough") return false } - log := GetLogger(c) - log.Data["balance"] = groupBalance c.Set(ctxkey.GroupBalance, &GroupBalanceConsumer{ GroupBalance: groupBalance, Consumer: consumer, diff --git a/service/aiproxy/relay/controller/handle.go b/service/aiproxy/relay/controller/handle.go index 4cf6a5aad93..d2170d5082d 100644 --- a/service/aiproxy/relay/controller/handle.go +++ b/service/aiproxy/relay/controller/handle.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "net/http" + "strconv" "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common" @@ -121,7 +122,7 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa amount := consume.CalculateAmount(usage, preCheckReq.InputPrice, preCheckReq.OutputPrice) if amount > 0 { - log.Data["amount"] = amount + log.Data["amount"] = strconv.FormatFloat(amount, 'f', -1, 64) } // 6. Post consume From 8b690b247a3efbff914754606b64c43809095694 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 4 Feb 2025 23:20:14 +0800 Subject: [PATCH 108/167] fix: do not skip empty --- service/aiproxy/relay/adaptor/anthropic/main.go | 2 +- service/aiproxy/relay/adaptor/baidu/main.go | 2 +- service/aiproxy/relay/adaptor/cohere/main.go | 2 +- service/aiproxy/relay/adaptor/coze/main.go | 2 +- service/aiproxy/relay/adaptor/gemini/main.go | 2 +- service/aiproxy/relay/adaptor/ollama/main.go | 2 +- service/aiproxy/relay/adaptor/openai/main.go | 9 ++------- 7 files changed, 8 insertions(+), 13 deletions(-) diff --git a/service/aiproxy/relay/adaptor/anthropic/main.go b/service/aiproxy/relay/adaptor/anthropic/main.go index 50fe177a9ab..3fbbcb20cb1 100644 --- a/service/aiproxy/relay/adaptor/anthropic/main.go +++ b/service/aiproxy/relay/adaptor/anthropic/main.go @@ -339,7 +339,7 @@ func StreamHandler(_ *meta.Meta, c *gin.Context, resp *http.Response) (*model.Er } err = render.ObjectData(c, response) if err != nil { - log.Error("error rendering stream response: " + err.Error()) + log.Warn("error rendering stream response: " + err.Error()) } } diff --git a/service/aiproxy/relay/adaptor/baidu/main.go b/service/aiproxy/relay/adaptor/baidu/main.go index 34257469170..37d87f33d3f 100644 --- a/service/aiproxy/relay/adaptor/baidu/main.go +++ b/service/aiproxy/relay/adaptor/baidu/main.go @@ -160,7 +160,7 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model response := streamResponseBaidu2OpenAI(meta, &baiduResponse) err = render.ObjectData(c, response) if err != nil { - log.Error("error rendering stream response: " + err.Error()) + log.Warn("error rendering stream response: " + err.Error()) } } diff --git a/service/aiproxy/relay/adaptor/cohere/main.go b/service/aiproxy/relay/adaptor/cohere/main.go index 455e86b263e..3db0744d424 100644 --- a/service/aiproxy/relay/adaptor/cohere/main.go +++ b/service/aiproxy/relay/adaptor/cohere/main.go @@ -170,7 +170,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC err = render.ObjectData(c, response) if err != nil { - log.Error("error rendering stream response: " + err.Error()) + log.Warn("error rendering stream response: " + err.Error()) } } diff --git a/service/aiproxy/relay/adaptor/coze/main.go b/service/aiproxy/relay/adaptor/coze/main.go index aa9934261d3..6d36df943ca 100644 --- a/service/aiproxy/relay/adaptor/coze/main.go +++ b/service/aiproxy/relay/adaptor/coze/main.go @@ -129,7 +129,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC err = render.ObjectData(c, response) if err != nil { - log.Error("error rendering stream response: " + err.Error()) + log.Warn("error rendering stream response: " + err.Error()) } } diff --git a/service/aiproxy/relay/adaptor/gemini/main.go b/service/aiproxy/relay/adaptor/gemini/main.go index 6922660c7f2..ce390aef380 100644 --- a/service/aiproxy/relay/adaptor/gemini/main.go +++ b/service/aiproxy/relay/adaptor/gemini/main.go @@ -427,7 +427,7 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model err = render.ObjectData(c, response) if err != nil { - log.Error("error rendering stream response: " + err.Error()) + log.Warn("error rendering stream response: " + err.Error()) } } diff --git a/service/aiproxy/relay/adaptor/ollama/main.go b/service/aiproxy/relay/adaptor/ollama/main.go index f49c868d3f7..b559d512001 100644 --- a/service/aiproxy/relay/adaptor/ollama/main.go +++ b/service/aiproxy/relay/adaptor/ollama/main.go @@ -162,7 +162,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*relaymodel.ErrorWithSt response := streamResponseOllama2OpenAI(&ollamaResponse) err = render.ObjectData(c, response) if err != nil { - log.Error("error rendering stream response: " + err.Error()) + log.Warn("error rendering stream response: " + err.Error()) } } diff --git a/service/aiproxy/relay/adaptor/openai/main.go b/service/aiproxy/relay/adaptor/openai/main.go index 2ac593fc25d..bdfec67b816 100644 --- a/service/aiproxy/relay/adaptor/openai/main.go +++ b/service/aiproxy/relay/adaptor/openai/main.go @@ -63,11 +63,7 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model err := json.Unmarshal(conv.StringToBytes(data), &streamResponse) if err != nil { log.Error("error unmarshalling stream response: " + err.Error()) - continue // just ignore the error - } - if len(streamResponse.Choices) == 0 && streamResponse.Usage == nil { - // but for empty choice and no usage, we should not pass it to client, this is for azure - continue // just ignore empty choice + continue } if streamResponse.Usage != nil { usage = streamResponse.Usage @@ -75,7 +71,6 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model for _, choice := range streamResponse.Choices { responseText += choice.Delta.StringContent() } - // streamResponse.Model = meta.ActualModelName respMap := make(map[string]any) err = json.Unmarshal(conv.StringToBytes(data), &respMap) if err != nil { @@ -87,7 +82,7 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model } err = render.ObjectData(c, respMap) if err != nil { - log.Error("error rendering stream response: " + err.Error()) + log.Warn("error rendering stream response: " + err.Error()) continue } case relaymode.Completions: From cff95082beff0a9fdb75f818c387354b339c6a44 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 5 Feb 2025 01:03:31 +0800 Subject: [PATCH 109/167] fix: reason system prompt --- .../aiproxy/relay/adaptor/anthropic/main.go | 5 +-- service/aiproxy/relay/adaptor/baidu/main.go | 5 +-- service/aiproxy/relay/adaptor/cohere/main.go | 5 +-- service/aiproxy/relay/adaptor/coze/main.go | 5 +-- service/aiproxy/relay/adaptor/doubao/main.go | 38 +++++++++++++++++++ service/aiproxy/relay/adaptor/gemini/main.go | 5 +-- service/aiproxy/relay/adaptor/ollama/main.go | 5 +-- service/aiproxy/relay/adaptor/openai/main.go | 6 +-- service/aiproxy/relay/model/message.go | 15 +++++--- 9 files changed, 55 insertions(+), 34 deletions(-) diff --git a/service/aiproxy/relay/adaptor/anthropic/main.go b/service/aiproxy/relay/adaptor/anthropic/main.go index 3fbbcb20cb1..7b26cf66cca 100644 --- a/service/aiproxy/relay/adaptor/anthropic/main.go +++ b/service/aiproxy/relay/adaptor/anthropic/main.go @@ -337,10 +337,7 @@ func StreamHandler(_ *meta.Meta, c *gin.Context, resp *http.Response) (*model.Er lastToolCallChoice = choice } } - err = render.ObjectData(c, response) - if err != nil { - log.Warn("error rendering stream response: " + err.Error()) - } + _ = render.ObjectData(c, response) } if err := scanner.Err(); err != nil { diff --git a/service/aiproxy/relay/adaptor/baidu/main.go b/service/aiproxy/relay/adaptor/baidu/main.go index 37d87f33d3f..e9fc9eae8ee 100644 --- a/service/aiproxy/relay/adaptor/baidu/main.go +++ b/service/aiproxy/relay/adaptor/baidu/main.go @@ -158,10 +158,7 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model usage.CompletionTokens = baiduResponse.Usage.TotalTokens - baiduResponse.Usage.PromptTokens } response := streamResponseBaidu2OpenAI(meta, &baiduResponse) - err = render.ObjectData(c, response) - if err != nil { - log.Warn("error rendering stream response: " + err.Error()) - } + _ = render.ObjectData(c, response) } if err := scanner.Err(); err != nil { diff --git a/service/aiproxy/relay/adaptor/cohere/main.go b/service/aiproxy/relay/adaptor/cohere/main.go index 3db0744d424..fbc076f2df9 100644 --- a/service/aiproxy/relay/adaptor/cohere/main.go +++ b/service/aiproxy/relay/adaptor/cohere/main.go @@ -168,10 +168,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC response.Model = c.GetString("original_model") response.Created = createdTime - err = render.ObjectData(c, response) - if err != nil { - log.Warn("error rendering stream response: " + err.Error()) - } + _ = render.ObjectData(c, response) } if err := scanner.Err(); err != nil { diff --git a/service/aiproxy/relay/adaptor/coze/main.go b/service/aiproxy/relay/adaptor/coze/main.go index 6d36df943ca..d2b644f3c40 100644 --- a/service/aiproxy/relay/adaptor/coze/main.go +++ b/service/aiproxy/relay/adaptor/coze/main.go @@ -127,10 +127,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC response.Model = modelName response.Created = createdTime - err = render.ObjectData(c, response) - if err != nil { - log.Warn("error rendering stream response: " + err.Error()) - } + _ = render.ObjectData(c, response) } if err := scanner.Err(); err != nil { diff --git a/service/aiproxy/relay/adaptor/doubao/main.go b/service/aiproxy/relay/adaptor/doubao/main.go index fca376cd62f..724b5a8d0f5 100644 --- a/service/aiproxy/relay/adaptor/doubao/main.go +++ b/service/aiproxy/relay/adaptor/doubao/main.go @@ -1,12 +1,18 @@ package doubao import ( + "bytes" + "errors" "fmt" + "io" + "net/http" "strings" + json "github.com/json-iterator/go" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/meta" + relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" "github.com/labring/sealos/service/aiproxy/relay/relaymode" ) @@ -42,6 +48,38 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { return GetRequestURL(meta) } +func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { + method, header, body, err := a.Adaptor.ConvertRequest(meta, req) + if err != nil { + return "", nil, nil, err + } + if meta.Mode != relaymode.ChatCompletions || meta.OriginModel != "deepseek-reasoner" { + return method, header, body, nil + } + + m := make(map[string]any) + err = json.NewDecoder(body).Decode(&m) + if err != nil { + return "", nil, nil, err + } + messages, _ := m["messages"].([]any) + if len(messages) == 0 { + return "", nil, nil, errors.New("messages is empty") + } + sysMessage := relaymodel.Message{ + Role: "system", + Content: "回答前,都先用 输出你的思考过程。", + } + messages = append([]any{sysMessage}, messages...) + m["messages"] = messages + newBody, err := json.Marshal(m) + if err != nil { + return "", nil, nil, err + } + + return method, header, bytes.NewReader(newBody), nil +} + func (a *Adaptor) GetChannelName() string { return "doubao" } diff --git a/service/aiproxy/relay/adaptor/gemini/main.go b/service/aiproxy/relay/adaptor/gemini/main.go index ce390aef380..92f2664596d 100644 --- a/service/aiproxy/relay/adaptor/gemini/main.go +++ b/service/aiproxy/relay/adaptor/gemini/main.go @@ -425,10 +425,7 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model responseText.WriteString(response.Choices[0].Delta.StringContent()) - err = render.ObjectData(c, response) - if err != nil { - log.Warn("error rendering stream response: " + err.Error()) - } + _ = render.ObjectData(c, response) } if err := scanner.Err(); err != nil { diff --git a/service/aiproxy/relay/adaptor/ollama/main.go b/service/aiproxy/relay/adaptor/ollama/main.go index b559d512001..27ed57c1ab0 100644 --- a/service/aiproxy/relay/adaptor/ollama/main.go +++ b/service/aiproxy/relay/adaptor/ollama/main.go @@ -160,10 +160,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*relaymodel.ErrorWithSt } response := streamResponseOllama2OpenAI(&ollamaResponse) - err = render.ObjectData(c, response) - if err != nil { - log.Warn("error rendering stream response: " + err.Error()) - } + _ = render.ObjectData(c, response) } if err := scanner.Err(); err != nil { diff --git a/service/aiproxy/relay/adaptor/openai/main.go b/service/aiproxy/relay/adaptor/openai/main.go index bdfec67b816..5bebc50cba0 100644 --- a/service/aiproxy/relay/adaptor/openai/main.go +++ b/service/aiproxy/relay/adaptor/openai/main.go @@ -80,11 +80,7 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model if _, ok := respMap["model"]; ok && meta.OriginModel != "" { respMap["model"] = meta.OriginModel } - err = render.ObjectData(c, respMap) - if err != nil { - log.Warn("error rendering stream response: " + err.Error()) - continue - } + _ = render.ObjectData(c, respMap) case relaymode.Completions: var streamResponse CompletionsStreamResponse err := json.Unmarshal(conv.StringToBytes(data), &streamResponse) diff --git a/service/aiproxy/relay/model/message.go b/service/aiproxy/relay/model/message.go index eefe4e02964..fab5e794242 100644 --- a/service/aiproxy/relay/model/message.go +++ b/service/aiproxy/relay/model/message.go @@ -3,11 +3,12 @@ package model import "strings" type Message struct { - Content any `json:"content,omitempty"` - Name *string `json:"name,omitempty"` - Role string `json:"role,omitempty"` - ToolCallID string `json:"tool_call_id,omitempty"` - ToolCalls []*Tool `json:"tool_calls,omitempty"` + Content any `json:"content,omitempty"` + ReasoningContent string `json:"reasoning_content,omitempty"` + Name *string `json:"name,omitempty"` + Role string `json:"role,omitempty"` + ToolCallID string `json:"tool_call_id,omitempty"` + ToolCalls []*Tool `json:"tool_calls,omitempty"` } func (m *Message) IsStringContent() bool { @@ -23,6 +24,10 @@ func (m *Message) ToStringContentMessage() { } func (m *Message) StringContent() string { + if m.ReasoningContent != "" { + return m.ReasoningContent + } + content, ok := m.Content.(string) if ok { return content From 707ba6aab8684fb15ed02ecbb23230a5ad670729 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 6 Feb 2025 14:42:15 +0800 Subject: [PATCH 110/167] feat: doubao and moonshot model --- .../aiproxy/relay/adaptor/ali/constants.go | 27 ++++++++++ .../aiproxy/relay/adaptor/doubao/constants.go | 52 +++++++++++++++++++ .../relay/adaptor/moonshot/constants.go | 40 ++++++++++++++ 3 files changed, 119 insertions(+) diff --git a/service/aiproxy/relay/adaptor/ali/constants.go b/service/aiproxy/relay/adaptor/ali/constants.go index 61e0f7490d4..baca8474d9d 100644 --- a/service/aiproxy/relay/adaptor/ali/constants.go +++ b/service/aiproxy/relay/adaptor/ali/constants.go @@ -642,6 +642,33 @@ var ModelList = []*model.ModelConfig{ ), }, + { + Model: "qwq-32b-preview", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerAlibaba, + InputPrice: 0.0035, + OutputPrice: 0.007, + RPM: 1200, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + model.WithModelConfigMaxInputTokens(30720), + model.WithModelConfigMaxOutputTokens(16384), + ), + }, + { + Model: "qvq-72b-preview", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerAlibaba, + InputPrice: 0.012, + OutputPrice: 0.036, + RPM: 60, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + model.WithModelConfigMaxInputTokens(16384), + model.WithModelConfigMaxOutputTokens(16384), + ), + }, + // stable-diffusion { Model: "stable-diffusion-xl", diff --git a/service/aiproxy/relay/adaptor/doubao/constants.go b/service/aiproxy/relay/adaptor/doubao/constants.go index c5db8903351..75f56506c8d 100644 --- a/service/aiproxy/relay/adaptor/doubao/constants.go +++ b/service/aiproxy/relay/adaptor/doubao/constants.go @@ -8,6 +8,58 @@ import ( // https://console.volcengine.com/ark/region:ark+cn-beijing/model var ModelList = []*model.ModelConfig{ + { + Model: "Doubao-1.5-vision-pro-32k", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerDoubao, + InputPrice: 0.003, + OutputPrice: 0.009, + RPM: 15000, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(32768), + model.WithModelConfigVision(true), + ), + }, + { + Model: "Doubao-1.5-pro-32k", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerDoubao, + InputPrice: 0.0008, + OutputPrice: 0.0020, + RPM: 15000, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(32768), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), + }, + { + Model: "Doubao-1.5-pro-256k", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerDoubao, + InputPrice: 0.005, + OutputPrice: 0.009, + RPM: 2000, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(256000), + model.WithModelConfigMaxOutputTokens(12000), + model.WithModelConfigToolChoice(true), + ), + }, + { + Model: "Doubao-1.5-lite-32k", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerDoubao, + InputPrice: 0.0003, + OutputPrice: 0.0006, + RPM: 15000, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), + }, + { Model: "Doubao-vision-lite-32k", Type: relaymode.ChatCompletions, diff --git a/service/aiproxy/relay/adaptor/moonshot/constants.go b/service/aiproxy/relay/adaptor/moonshot/constants.go index fabdc824768..905f015c73c 100644 --- a/service/aiproxy/relay/adaptor/moonshot/constants.go +++ b/service/aiproxy/relay/adaptor/moonshot/constants.go @@ -42,4 +42,44 @@ var ModelList = []*model.ModelConfig{ model.WithModelConfigToolChoice(true), ), }, + + { + Model: "moonshot-v1-8k-vision-preview", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerMoonshot, + InputPrice: 0.012, + OutputPrice: 0.012, + RPM: 500, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(8192), + model.WithModelConfigToolChoice(true), + model.WithModelConfigVision(true), + ), + }, + { + Model: "moonshot-v1-32k-vision-preview", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerMoonshot, + InputPrice: 0.024, + OutputPrice: 0.024, + RPM: 500, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(32768), + model.WithModelConfigToolChoice(true), + model.WithModelConfigVision(true), + ), + }, + { + Model: "moonshot-v1-128k-vision-preview", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerMoonshot, + InputPrice: 0.06, + OutputPrice: 0.06, + RPM: 500, + Config: model.NewModelConfig( + model.WithModelConfigMaxInputTokens(131072), + model.WithModelConfigToolChoice(true), + model.WithModelConfigVision(true), + ), + }, } From eeab34a4f8ddbbb835a8fec7de82a10d6920dc2a Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 6 Feb 2025 15:10:36 +0800 Subject: [PATCH 111/167] feat: disable model config can load existed model --- service/aiproxy/model/cache.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/service/aiproxy/model/cache.go b/service/aiproxy/model/cache.go index ada30601912..acad8ed56a6 100644 --- a/service/aiproxy/model/cache.go +++ b/service/aiproxy/model/cache.go @@ -536,16 +536,18 @@ func (m *modelConfigMapCache) GetModelConfig(model string) (*ModelConfig, bool) var _ ModelConfigCache = (*disabledModelConfigCache)(nil) -type disabledModelConfigCache struct{} +type disabledModelConfigCache struct { + modelConfigs ModelConfigCache +} func (d *disabledModelConfigCache) GetModelConfig(model string) (*ModelConfig, bool) { + if config, ok := d.modelConfigs.GetModelConfig(model); ok { + return config, true + } return NewDefaultModelConfig(model), true } func initializeModelConfigCache() (ModelConfigCache, error) { - if config.GetDisableModelConfig() { - return &disabledModelConfigCache{}, nil - } modelConfigs, err := GetAllModelConfigs() if err != nil { return nil, err @@ -555,7 +557,11 @@ func initializeModelConfigCache() (ModelConfigCache, error) { newModelConfigMap[modelConfig.Model] = modelConfig } - return &modelConfigMapCache{modelConfigMap: newModelConfigMap}, nil + configs := &modelConfigMapCache{modelConfigMap: newModelConfigMap} + if config.GetDisableModelConfig() { + return &disabledModelConfigCache{modelConfigs: configs}, nil + } + return configs, nil } func initializeChannelModels(channel *Channel) { From 8c46269f5cac9252b332b5c15f0cea67d2f79bba Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 6 Feb 2025 15:29:02 +0800 Subject: [PATCH 112/167] chore: add shutdown timeout duration to 600 sec --- service/aiproxy/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/service/aiproxy/main.go b/service/aiproxy/main.go index 42c997935be..ec9a973a846 100644 --- a/service/aiproxy/main.go +++ b/service/aiproxy/main.go @@ -174,11 +174,11 @@ func main() { <-ctx.Done() - shutdownCtx, cancel := context.WithTimeout(context.Background(), 120*time.Second) + shutdownCtx, cancel := context.WithTimeout(context.Background(), 600*time.Second) defer cancel() log.Info("shutting down http server...") - log.Info("max wait time: 120s") + log.Info("max wait time: 600s") if err := srv.Shutdown(shutdownCtx); err != nil { log.Error("server forced to shutdown: " + err.Error()) } else { From ab9a9129d6fd456f532e7fc679eac59476c9d6fc Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 6 Feb 2025 15:51:48 +0800 Subject: [PATCH 113/167] feat: dashboard data build whit concurrent --- service/aiproxy/model/log.go | 109 +++++++++++++++++++++++------------ 1 file changed, 73 insertions(+), 36 deletions(-) diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index 328ae14fb93..56d3256b531 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -13,6 +13,7 @@ import ( "github.com/labring/sealos/service/aiproxy/common/config" "github.com/shopspring/decimal" log "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" "gorm.io/gorm" ) @@ -791,13 +792,40 @@ func GetDashboardData(start, end time.Time, modelName string, timeSpan time.Dura return nil, errors.New("end time is before start time") } - chartData, err := getChartData("", start, end, "", modelName, timeSpan) - if err != nil { - return nil, err - } + var ( + chartData []*HourlyChartData + models []string + rpm int64 + tpm int64 + ) - models, err := getLogDistinctValues[string]("model", "", start, end) - if err != nil { + g := new(errgroup.Group) + + g.Go(func() error { + var err error + chartData, err = getChartData("", start, end, "", modelName, timeSpan) + return err + }) + + g.Go(func() error { + var err error + models, err = getLogDistinctValues[string]("model", "", start, end) + return err + }) + + g.Go(func() error { + var err error + rpm, err = getRPM("", end, "", modelName) + return err + }) + + g.Go(func() error { + var err error + tpm, err = getTPM("", end, "", modelName) + return err + }) + + if err := g.Wait(); err != nil { return nil, err } @@ -805,16 +833,6 @@ func GetDashboardData(start, end time.Time, modelName string, timeSpan time.Dura exceptionCount := sumExceptionCount(chartData) usedAmount := sumUsedAmount(chartData) - rpm, err := getRPM("", end, "", modelName) - if err != nil { - return nil, err - } - - tpm, err := getTPM("", end, "", modelName) - if err != nil { - return nil, err - } - return &DashboardResponse{ ChartData: chartData, Models: models, @@ -837,18 +855,47 @@ func GetGroupDashboardData(group string, start, end time.Time, tokenName string, return nil, errors.New("end time is before start time") } - chartData, err := getChartData(group, start, end, tokenName, modelName, timeSpan) - if err != nil { - return nil, err - } + var ( + chartData []*HourlyChartData + tokenNames []string + models []string + rpm int64 + tpm int64 + ) - tokenNames, err := getLogDistinctValues[string]("token_name", group, start, end) - if err != nil { - return nil, err - } + g := new(errgroup.Group) - models, err := getLogDistinctValues[string]("model", group, start, end) - if err != nil { + g.Go(func() error { + var err error + chartData, err = getChartData(group, start, end, tokenName, modelName, timeSpan) + return err + }) + + g.Go(func() error { + var err error + tokenNames, err = getLogDistinctValues[string]("token_name", group, start, end) + return err + }) + + g.Go(func() error { + var err error + models, err = getLogDistinctValues[string]("model", group, start, end) + return err + }) + + g.Go(func() error { + var err error + rpm, err = getRPM(group, end, tokenName, modelName) + return err + }) + + g.Go(func() error { + var err error + tpm, err = getTPM(group, end, tokenName, modelName) + return err + }) + + if err := g.Wait(); err != nil { return nil, err } @@ -856,16 +903,6 @@ func GetGroupDashboardData(group string, start, end time.Time, tokenName string, exceptionCount := sumExceptionCount(chartData) usedAmount := sumUsedAmount(chartData) - rpm, err := getRPM(group, end, tokenName, modelName) - if err != nil { - return nil, err - } - - tpm, err := getTPM(group, end, tokenName, modelName) - if err != nil { - return nil, err - } - return &GroupDashboardResponse{ DashboardResponse: DashboardResponse{ ChartData: chartData, From 5202f3abb10369a76eb8e876cdfab3ef7ca3db10 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 6 Feb 2025 16:17:07 +0800 Subject: [PATCH 114/167] feat: logs data build whit concurrent --- service/aiproxy/controller/log.go | 16 +- service/aiproxy/model/log.go | 363 ++++++++++++++++++++++-------- 2 files changed, 273 insertions(+), 106 deletions(-) diff --git a/service/aiproxy/controller/log.go b/service/aiproxy/controller/log.go index 081201d3b66..f028a9099be 100644 --- a/service/aiproxy/controller/log.go +++ b/service/aiproxy/controller/log.go @@ -52,8 +52,6 @@ func GetLogs(c *gin.Context) { requestID, tokenID, tokenName, - p*perPage, - perPage, channelID, endpoint, order, @@ -61,6 +59,8 @@ func GetLogs(c *gin.Context) { model.CodeType(codeType), withBody, ip, + p, + perPage, ) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) @@ -115,8 +115,6 @@ func GetGroupLogs(c *gin.Context) { requestID, tokenID, tokenName, - p*perPage, - perPage, channelID, endpoint, order, @@ -124,6 +122,8 @@ func GetGroupLogs(c *gin.Context) { model.CodeType(codeType), withBody, ip, + p, + perPage, ) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) @@ -166,8 +166,6 @@ func SearchLogs(c *gin.Context) { result, err := model.SearchLogs( group, keyword, - p, - perPage, endpoint, requestID, tokenID, @@ -181,6 +179,8 @@ func SearchLogs(c *gin.Context) { model.CodeType(codeType), withBody, ip, + p, + perPage, ) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) @@ -227,8 +227,6 @@ func SearchGroupLogs(c *gin.Context) { result, err := model.SearchGroupLogs( group, keyword, - p, - perPage, endpoint, requestID, tokenID, @@ -242,6 +240,8 @@ func SearchGroupLogs(c *gin.Context) { model.CodeType(codeType), withBody, ip, + p, + perPage, ) if err != nil { middleware.ErrorResponse(c, http.StatusOK, err.Error()) diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index 56d3256b531..649755f2bbb 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -191,7 +191,7 @@ type GetGroupLogsResult struct { TokenNames []string `json:"token_names"` } -func getLogs( +func buildGetLogsQuery( group string, startTimestamp time.Time, endTimestamp time.Time, @@ -199,16 +199,12 @@ func getLogs( requestID string, tokenID int, tokenName string, - startIdx int, - num int, channelID int, endpoint string, - order string, mode int, codeType CodeType, - withBody bool, ip string, -) (int64, []*Log, error) { +) *gorm.DB { tx := LogDB.Model(&Log{}) if group != "" { tx = tx.Where("group_id = ?", group) @@ -249,33 +245,88 @@ func getLogs( case CodeTypeError: tx = tx.Where("code != 200") } + return tx +} +func getLogs( + group string, + startTimestamp time.Time, + endTimestamp time.Time, + modelName string, + requestID string, + tokenID int, + tokenName string, + channelID int, + endpoint string, + order string, + mode int, + codeType CodeType, + withBody bool, + ip string, + page int, + perPage int, +) (int64, []*Log, error) { var total int64 var logs []*Log - err := tx.Count(&total).Error - if err != nil { - return total, nil, err - } - if total <= 0 { - return total, nil, nil - } - if withBody { - tx = tx.Preload("RequestDetail") - } else { - tx = tx.Preload("RequestDetail", func(db *gorm.DB) *gorm.DB { - return db.Select("id", "log_id") - }) - } + g := new(errgroup.Group) - err = tx. - Order(getLogOrder(order)). - Limit(num). - Offset(startIdx). - Find(&logs).Error - if err != nil { - return total, nil, err + g.Go(func() error { + return buildGetLogsQuery( + group, + startTimestamp, + endTimestamp, + modelName, + requestID, + tokenID, + tokenName, + channelID, + endpoint, + mode, + codeType, + ip, + ).Count(&total).Error + }) + + g.Go(func() error { + page-- + if page < 0 { + page = 0 + } + + query := buildGetLogsQuery( + group, + startTimestamp, + endTimestamp, + modelName, + requestID, + tokenID, + tokenName, + channelID, + endpoint, + mode, + codeType, + ip, + ) + if withBody { + query = query.Preload("RequestDetail") + } else { + query = query.Preload("RequestDetail", func(db *gorm.DB) *gorm.DB { + return db.Select("id", "log_id") + }) + } + + return query. + Order(getLogOrder(order)). + Limit(perPage). + Offset(page * perPage). + Find(&logs).Error + }) + + if err := g.Wait(); err != nil { + return 0, nil, err } + return total, logs, nil } @@ -287,8 +338,6 @@ func GetLogs( requestID string, tokenID int, tokenName string, - startIdx int, - num int, channelID int, endpoint string, order string, @@ -296,14 +345,30 @@ func GetLogs( codeType CodeType, withBody bool, ip string, + page int, + perPage int, ) (*GetLogsResult, error) { - total, logs, err := getLogs(group, startTimestamp, endTimestamp, modelName, requestID, tokenID, tokenName, startIdx, num, channelID, endpoint, order, mode, codeType, withBody, ip) - if err != nil { - return nil, err - } + var ( + total int64 + logs []*Log + models []string + ) - models, err := getLogDistinctValues[string]("model", group, startTimestamp, endTimestamp) - if err != nil { + g := new(errgroup.Group) + + g.Go(func() error { + var err error + total, logs, err = getLogs(group, startTimestamp, endTimestamp, modelName, requestID, tokenID, tokenName, channelID, endpoint, order, mode, codeType, withBody, ip, page, perPage) + return err + }) + + g.Go(func() error { + var err error + models, err = getLogDistinctValues[string]("model", group, startTimestamp, endTimestamp) + return err + }) + + if err := g.Wait(); err != nil { return nil, err } @@ -324,8 +389,6 @@ func GetGroupLogs( requestID string, tokenID int, tokenName string, - startIdx int, - num int, channelID int, endpoint string, order string, @@ -333,22 +396,44 @@ func GetGroupLogs( codeType CodeType, withBody bool, ip string, + page int, + perPage int, ) (*GetGroupLogsResult, error) { if group == "" { return nil, errors.New("group is required") } - total, logs, err := getLogs(group, startTimestamp, endTimestamp, modelName, requestID, tokenID, tokenName, startIdx, num, channelID, endpoint, order, mode, codeType, withBody, ip) - if err != nil { - return nil, err - } - tokenNames, err := getLogDistinctValues[string]("token_name", group, startTimestamp, endTimestamp) - if err != nil { - return nil, err - } - models, err := getLogDistinctValues[string]("model", group, startTimestamp, endTimestamp) - if err != nil { + + var ( + total int64 + logs []*Log + tokenNames []string + models []string + ) + + g := new(errgroup.Group) + + g.Go(func() error { + var err error + total, logs, err = getLogs(group, startTimestamp, endTimestamp, modelName, requestID, tokenID, tokenName, channelID, endpoint, order, mode, codeType, withBody, ip, page, perPage) + return err + }) + + g.Go(func() error { + var err error + tokenNames, err = getLogDistinctValues[string]("token_name", group, startTimestamp, endTimestamp) + return err + }) + + g.Go(func() error { + var err error + models, err = getLogDistinctValues[string]("model", group, startTimestamp, endTimestamp) + return err + }) + + if err := g.Wait(); err != nil { return nil, err } + return &GetGroupLogsResult{ GetLogsResult: GetLogsResult{ Logs: logs, @@ -359,11 +444,9 @@ func GetGroupLogs( }, nil } -func searchLogs( +func buildSearchLogsQuery( group string, keyword string, - page int, - perPage int, endpoint string, requestID string, tokenID int, @@ -372,12 +455,10 @@ func searchLogs( startTimestamp time.Time, endTimestamp time.Time, channelID int, - order string, mode int, codeType CodeType, - withBody bool, ip string, -) (int64, []*Log, error) { +) *gorm.DB { tx := LogDB.Model(&Log{}) if group != "" { tx = tx.Where("group_id = ?", group) @@ -494,45 +575,98 @@ func searchLogs( } } + return tx +} + +func searchLogs( + group string, + keyword string, + endpoint string, + requestID string, + tokenID int, + tokenName string, + modelName string, + startTimestamp time.Time, + endTimestamp time.Time, + channelID int, + order string, + mode int, + codeType CodeType, + withBody bool, + ip string, + page int, + perPage int, +) (int64, []*Log, error) { var total int64 var logs []*Log - err := tx.Count(&total).Error - if err != nil { - return total, nil, err - } - if total <= 0 { - return total, logs, nil - } - page-- - if page < 0 { - page = 0 - } + g := new(errgroup.Group) - if withBody { - tx = tx.Preload("RequestDetail") - } else { - tx = tx.Preload("RequestDetail", func(db *gorm.DB) *gorm.DB { - return db.Select("id", "log_id") - }) - } + g.Go(func() error { + return buildSearchLogsQuery( + group, + keyword, + endpoint, + requestID, + tokenID, + tokenName, + modelName, + startTimestamp, + endTimestamp, + channelID, + mode, + codeType, + ip, + ).Count(&total).Error + }) - err = tx. - Order(getLogOrder(order)). - Limit(perPage). - Offset(page * perPage). - Find(&logs).Error - if err != nil { - return total, nil, err + g.Go(func() error { + page-- + if page < 0 { + page = 0 + } + + query := buildSearchLogsQuery( + group, + keyword, + endpoint, + requestID, + tokenID, + tokenName, + modelName, + startTimestamp, + endTimestamp, + channelID, + mode, + codeType, + ip, + ) + + if withBody { + query = query.Preload("RequestDetail") + } else { + query = query.Preload("RequestDetail", func(db *gorm.DB) *gorm.DB { + return db.Select("id", "log_id") + }) + } + + return query. + Order(getLogOrder(order)). + Limit(perPage). + Offset(page * perPage). + Find(&logs).Error + }) + + if err := g.Wait(); err != nil { + return 0, nil, err } + return total, logs, nil } func SearchLogs( group string, keyword string, - page int, - perPage int, endpoint string, requestID string, tokenID int, @@ -546,14 +680,30 @@ func SearchLogs( codeType CodeType, withBody bool, ip string, + page int, + perPage int, ) (*GetLogsResult, error) { - total, logs, err := searchLogs(group, keyword, page, perPage, endpoint, requestID, tokenID, tokenName, modelName, startTimestamp, endTimestamp, channelID, order, mode, codeType, withBody, ip) - if err != nil { - return nil, err - } + var ( + total int64 + logs []*Log + models []string + ) - models, err := getLogDistinctValues[string]("model", group, startTimestamp, endTimestamp) - if err != nil { + g := new(errgroup.Group) + + g.Go(func() error { + var err error + total, logs, err = searchLogs(group, keyword, endpoint, requestID, tokenID, tokenName, modelName, startTimestamp, endTimestamp, channelID, order, mode, codeType, withBody, ip, page, perPage) + return err + }) + + g.Go(func() error { + var err error + models, err = getLogDistinctValues[string]("model", group, startTimestamp, endTimestamp) + return err + }) + + if err := g.Wait(); err != nil { return nil, err } @@ -569,8 +719,6 @@ func SearchLogs( func SearchGroupLogs( group string, keyword string, - page int, - perPage int, endpoint string, requestID string, tokenID int, @@ -584,22 +732,41 @@ func SearchGroupLogs( codeType CodeType, withBody bool, ip string, + page int, + perPage int, ) (*GetGroupLogsResult, error) { if group == "" { return nil, errors.New("group is required") } - total, logs, err := searchLogs(group, keyword, page, perPage, endpoint, requestID, tokenID, tokenName, modelName, startTimestamp, endTimestamp, channelID, order, mode, codeType, withBody, ip) - if err != nil { - return nil, err - } - tokenNames, err := getLogDistinctValues[string]("token_name", group, startTimestamp, endTimestamp) - if err != nil { - return nil, err - } + var ( + total int64 + logs []*Log + tokenNames []string + models []string + ) - models, err := getLogDistinctValues[string]("model", group, startTimestamp, endTimestamp) - if err != nil { + g := new(errgroup.Group) + + g.Go(func() error { + var err error + total, logs, err = searchLogs(group, keyword, endpoint, requestID, tokenID, tokenName, modelName, startTimestamp, endTimestamp, channelID, order, mode, codeType, withBody, ip, page, perPage) + return err + }) + + g.Go(func() error { + var err error + tokenNames, err = getLogDistinctValues[string]("token_name", group, startTimestamp, endTimestamp) + return err + }) + + g.Go(func() error { + var err error + models, err = getLogDistinctValues[string]("model", group, startTimestamp, endTimestamp) + return err + }) + + if err := g.Wait(); err != nil { return nil, err } From 7d734f25dd5aab1959b2b628c0ef7bb037a1f6b0 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 7 Feb 2025 10:10:25 +0800 Subject: [PATCH 115/167] fix: monitor remove banned model --- service/aiproxy/controller/monitor.go | 1 + service/aiproxy/monitor/model.go | 23 ++++++++++++++++++++++- 2 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 service/aiproxy/controller/monitor.go diff --git a/service/aiproxy/controller/monitor.go b/service/aiproxy/controller/monitor.go new file mode 100644 index 00000000000..b0b429f8999 --- /dev/null +++ b/service/aiproxy/controller/monitor.go @@ -0,0 +1 @@ +package controller diff --git a/service/aiproxy/monitor/model.go b/service/aiproxy/monitor/model.go index 02b70a66049..9f1f7822f87 100644 --- a/service/aiproxy/monitor/model.go +++ b/service/aiproxy/monitor/model.go @@ -135,7 +135,7 @@ var clearChannelAllModelErrorsScript = redis.NewScript(` for _, key in ipairs(keys) do redis.call("DEL", key) end - redis.call("DEL", banned_key) + redis.call("SREM", banned_key, channel_id) return redis.status_reply("ok") `) @@ -147,6 +147,27 @@ func ClearChannelAllModelErrors(ctx context.Context, channelID int) error { return clearChannelAllModelErrorsScript.Run(ctx, common.RDB, []string{}, channelID).Err() } +var clearAllModelErrorsScript = redis.NewScript(` + local banned_key = "model:*:banned" + local channel_requests_pattern = "model:*:channel:*:requests" + + local keys = redis.call("KEYS", channel_requests_pattern) + for _, key in ipairs(keys) do + redis.call("DEL", key) + end + redis.call("DEL", banned_key) + + return redis.status_reply("ok") +`) + +func ClearAllModelErrors(ctx context.Context) error { + if !common.RedisEnabled || !config.GetEnableModelErrorAutoBan() { + return nil + } + return clearAllModelErrorsScript.Run(ctx, common.RDB, []string{}).Err() +} + +// map[model][]channelID func GetAllBannedChannels(ctx context.Context) (map[string][]int64, error) { if !common.RedisEnabled || !config.GetEnableModelErrorAutoBan() { return nil, nil From 504455d3f96ed8ce82417563245e6e4d81059727 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 7 Feb 2025 11:09:20 +0800 Subject: [PATCH 116/167] feat: split think --- service/aiproxy/common/splitter/splitter.go | 120 ++++++++++++++++++ service/aiproxy/common/splitter/think.go | 17 +++ .../aiproxy/relay/adaptor/baiduv2/adaptor.go | 3 + .../relay/adaptor/baiduv2/constants.go | 25 ++++ .../relay/adaptor/deepseek/constants.go | 15 ++- .../aiproxy/relay/adaptor/openai/adaptor.go | 10 +- service/aiproxy/relay/adaptor/openai/main.go | 49 ++++++- .../relay/adaptor/openaithink/adaptor.go | 26 ++++ service/aiproxy/relay/channeltype/define.go | 2 + 9 files changed, 261 insertions(+), 6 deletions(-) create mode 100644 service/aiproxy/common/splitter/splitter.go create mode 100644 service/aiproxy/common/splitter/think.go create mode 100644 service/aiproxy/relay/adaptor/openaithink/adaptor.go diff --git a/service/aiproxy/common/splitter/splitter.go b/service/aiproxy/common/splitter/splitter.go new file mode 100644 index 00000000000..152c888ee50 --- /dev/null +++ b/service/aiproxy/common/splitter/splitter.go @@ -0,0 +1,120 @@ +package splitter + +import "bytes" + +type Splitter struct { + head []byte + tail []byte + headLen int + tailLen int + buffer []byte + state int + partialTailPos int + kmpNext []int +} + +func NewSplitter(head, tail []byte) *Splitter { + return &Splitter{ + head: head, + tail: tail, + headLen: len(head), + tailLen: len(tail), + kmpNext: computeKMPNext(tail), + } +} + +func computeKMPNext(pattern []byte) []int { + n := len(pattern) + next := make([]int, n) + if n == 0 { + return next + } + next[0] = 0 + for i := 1; i < n; i++ { + j := next[i-1] + for j > 0 && pattern[i] != pattern[j] { + j = next[j-1] + } + if pattern[i] == pattern[j] { + j++ + } + next[i] = j + } + return next +} + +func (s *Splitter) Process(data []byte) ([]byte, []byte) { + switch s.state { + case 0: + s.buffer = append(s.buffer, data...) + bufferLen := len(s.buffer) + minLen := bufferLen + if minLen > s.headLen { + minLen = s.headLen + } + if minLen > 0 { + if !bytes.Equal(s.buffer[:minLen], s.head[:minLen]) { + s.state = 2 + remaining := s.buffer + s.buffer = nil + return nil, remaining + } + } + + if bufferLen < s.headLen { + return nil, nil + } + + s.state = 1 + if s.headLen == len(s.buffer) { + return nil, nil + } + tailData := s.buffer[s.headLen:] + s.buffer = tailData + return s.processSeekTail() + case 1: + s.buffer = append(s.buffer, data...) + return s.processSeekTail() + default: + return nil, data + } +} + +func (s *Splitter) processSeekTail() ([]byte, []byte) { + data := s.buffer + j := s.partialTailPos + tail := s.tail + tailLen := s.tailLen + kmpNext := s.kmpNext + + var i int + for i = 0; i < len(data); i++ { + for j > 0 && data[i] != tail[j] { + j = kmpNext[j-1] + } + if data[i] == tail[j] { + j++ + if j == tailLen { + end := i - tailLen + 1 + if end < 0 { + end = 0 + } + result := data[:end] + remaining := data[i+1:] + s.buffer = nil + s.state = 2 + s.partialTailPos = 0 + return result, remaining + } + } + } + splitAt := len(data) - j + if splitAt < 0 { + splitAt = 0 + } + result := data[:splitAt] + remainingPart := data[splitAt:] + s.partialTailPos = j + s.buffer = remainingPart + return result, nil +} diff --git a/service/aiproxy/common/splitter/think.go b/service/aiproxy/common/splitter/think.go new file mode 100644 index 00000000000..99b1f3d324e --- /dev/null +++ b/service/aiproxy/common/splitter/think.go @@ -0,0 +1,17 @@ +package splitter + +import "github.com/labring/sealos/service/aiproxy/common/conv" + +const ( + ThinkHead = "\n" + ThinkTail = "\n" +) + +var ( + thinkHeadBytes = conv.StringToBytes(ThinkHead) + thinkTailBytes = conv.StringToBytes(ThinkTail) +) + +func NewThinkSplitter() *Splitter { + return NewSplitter(thinkHeadBytes, thinkTailBytes) +} diff --git a/service/aiproxy/relay/adaptor/baiduv2/adaptor.go b/service/aiproxy/relay/adaptor/baiduv2/adaptor.go index d0fb3ba0006..8a26c285bd6 100644 --- a/service/aiproxy/relay/adaptor/baiduv2/adaptor.go +++ b/service/aiproxy/relay/adaptor/baiduv2/adaptor.go @@ -67,6 +67,9 @@ func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, ht meta.ActualModel = v2Model defer func() { meta.ActualModel = actModel }() } + if meta.ActualModel == "deepseek-r1" { + meta.Set(openai.SplitThinkMetaKey, true) + } return openai.ConvertRequest(meta, req) default: return "", nil, nil, fmt.Errorf("unsupported mode: %d", meta.Mode) diff --git a/service/aiproxy/relay/adaptor/baiduv2/constants.go b/service/aiproxy/relay/adaptor/baiduv2/constants.go index 75d2fc52a5d..61f6f21ef57 100644 --- a/service/aiproxy/relay/adaptor/baiduv2/constants.go +++ b/service/aiproxy/relay/adaptor/baiduv2/constants.go @@ -266,4 +266,29 @@ var ModelList = []*model.ModelConfig{ model.WithModelConfigMaxOutputTokens(2048), ), }, + + { + Model: "DeepSeek-V3", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerDeepSeek, + InputPrice: 0.0008, + OutputPrice: 0.0016, + RPM: 1000, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(64000), + model.WithModelConfigMaxOutputTokens(8192), + ), + }, + { + Model: "DeepSeek-R1", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerDeepSeek, + InputPrice: 0.002, + OutputPrice: 0.008, + RPM: 1000, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(64000), + model.WithModelConfigMaxOutputTokens(8192), + ), + }, } diff --git a/service/aiproxy/relay/adaptor/deepseek/constants.go b/service/aiproxy/relay/adaptor/deepseek/constants.go index 4aa765c8c25..33b7f30a7b4 100644 --- a/service/aiproxy/relay/adaptor/deepseek/constants.go +++ b/service/aiproxy/relay/adaptor/deepseek/constants.go @@ -15,8 +15,21 @@ var ModelList = []*model.ModelConfig{ RPM: 10000, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(64000), - model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigToolChoice(true), ), }, + + { + Model: "deepseek-reasoner", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerDeepSeek, + InputPrice: 0.004, + OutputPrice: 0.016, + RPM: 10000, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(64000), + model.WithModelConfigMaxOutputTokens(8192), + ), + }, } diff --git a/service/aiproxy/relay/adaptor/openai/adaptor.go b/service/aiproxy/relay/adaptor/openai/adaptor.go index 1d0155ab393..8ff26c67703 100644 --- a/service/aiproxy/relay/adaptor/openai/adaptor.go +++ b/service/aiproxy/relay/adaptor/openai/adaptor.go @@ -86,7 +86,7 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io case relaymode.Embeddings: return ConvertEmbeddingsRequest(meta, req) case relaymode.ChatCompletions: - return ConvertTextRequest(meta, req) + return ConvertTextRequest(meta, req, meta.GetBool(DoNotPatchStreamOptionsIncludeUsageMetaKey)) case relaymode.ImagesGenerations: return ConvertImageRequest(meta, req) case relaymode.AudioTranscription, relaymode.AudioTranslation: @@ -100,6 +100,8 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io } } +const SplitThinkMetaKey = "split_think" + func DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode) { switch meta.Mode { case relaymode.ImagesGenerations: @@ -116,7 +118,7 @@ func DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *re fallthrough case relaymode.ChatCompletions: if utils.IsStreamResponse(resp) { - usage, err = StreamHandler(meta, c, resp) + usage, err = StreamHandler(meta, c, resp, meta.GetBool(SplitThinkMetaKey)) } else { usage, err = Handler(meta, c, resp) } @@ -128,14 +130,14 @@ func DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *re const DoNotPatchStreamOptionsIncludeUsageMetaKey = "do_not_patch_stream_options_include_usage" -func ConvertTextRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { +func ConvertTextRequest(meta *meta.Meta, req *http.Request, doNotPatchStreamOptionsIncludeUsage bool) (string, http.Header, io.Reader, error) { reqMap := make(map[string]any) err := common.UnmarshalBodyReusable(req, &reqMap) if err != nil { return "", nil, nil, err } - if !meta.GetBool(DoNotPatchStreamOptionsIncludeUsageMetaKey) { + if !doNotPatchStreamOptionsIncludeUsage { if err := patchStreamOptions(reqMap); err != nil { return "", nil, nil, err } diff --git a/service/aiproxy/relay/adaptor/openai/main.go b/service/aiproxy/relay/adaptor/openai/main.go index 5bebc50cba0..c5aa40989f4 100644 --- a/service/aiproxy/relay/adaptor/openai/main.go +++ b/service/aiproxy/relay/adaptor/openai/main.go @@ -9,6 +9,7 @@ import ( json "github.com/json-iterator/go" "github.com/labring/sealos/service/aiproxy/common/render" + "github.com/labring/sealos/service/aiproxy/common/splitter" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/gin-gonic/gin" @@ -32,7 +33,7 @@ type UsageAndChoicesResponse struct { Choices []*ChatCompletionsStreamResponseChoice } -func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode) { +func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response, splitThink bool) (*model.Usage, *model.ErrorWithStatusCode) { defer resp.Body.Close() log := middleware.GetLogger(c) @@ -45,6 +46,12 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model common.SetEventStreamHeaders(c) + hasReasoningContent := false + var thinkSplitter *splitter.Splitter + if splitThink { + thinkSplitter = splitter.NewThinkSplitter() + } + for scanner.Scan() { data := scanner.Text() if len(data) < DataPrefixLength { // ignore blank line or wrong format @@ -70,6 +77,9 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model } for _, choice := range streamResponse.Choices { responseText += choice.Delta.StringContent() + if choice.Delta.ReasoningContent != "" { + hasReasoningContent = true + } } respMap := make(map[string]any) err = json.Unmarshal(conv.StringToBytes(data), &respMap) @@ -80,6 +90,12 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model if _, ok := respMap["model"]; ok && meta.OriginModel != "" { respMap["model"] = meta.OriginModel } + if splitThink && !hasReasoningContent { + SplitThink(respMap, thinkSplitter, func(data map[string]any) { + _ = render.ObjectData(c, data) + }) + continue + } _ = render.ObjectData(c, respMap) case relaymode.Completions: var streamResponse CompletionsStreamResponse @@ -113,6 +129,37 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model return usage, nil } +// renderCallback maybe reuse data, so don't modify data +func SplitThink(data map[string]any, thinkSplitter *splitter.Splitter, renderCallback func(data map[string]any)) { + choices, ok := data["choices"].([]any) + if !ok { + return + } + for _, choice := range choices { + choiceMap, ok := choice.(map[string]any) + if !ok { + continue + } + delta, ok := choiceMap["delta"].(map[string]any) + if !ok { + continue + } + content, ok := delta["content"].(string) + if !ok { + continue + } + think, remaining := thinkSplitter.Process(conv.StringToBytes(content)) + delta["content"] = "" + delta["reasoning_content"] = conv.BytesToString(think) + renderCallback(data) + if len(remaining) > 0 { + delta["content"] = conv.BytesToString(remaining) + delta["reasoning_content"] = "" + renderCallback(data) + } + } +} + func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode) { defer resp.Body.Close() diff --git a/service/aiproxy/relay/adaptor/openaithink/adaptor.go b/service/aiproxy/relay/adaptor/openaithink/adaptor.go new file mode 100644 index 00000000000..7c16df46070 --- /dev/null +++ b/service/aiproxy/relay/adaptor/openaithink/adaptor.go @@ -0,0 +1,26 @@ +package openaithink + +import ( + "net/http" + + "github.com/gin-gonic/gin" + "github.com/labring/sealos/service/aiproxy/relay/adaptor" + "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" + "github.com/labring/sealos/service/aiproxy/relay/meta" + "github.com/labring/sealos/service/aiproxy/relay/model" +) + +var _ adaptor.Adaptor = (*Adaptor)(nil) + +type Adaptor struct { + openai.Adaptor +} + +func (a *Adaptor) GetChannelName() string { + return "openai-think" +} + +func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode) { + meta.Set(openai.SplitThinkMetaKey, true) + return a.Adaptor.DoResponse(meta, c, resp) +} diff --git a/service/aiproxy/relay/channeltype/define.go b/service/aiproxy/relay/channeltype/define.go index 4a41be3e855..6f8e613276f 100644 --- a/service/aiproxy/relay/channeltype/define.go +++ b/service/aiproxy/relay/channeltype/define.go @@ -26,6 +26,7 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/adaptor/novita" "github.com/labring/sealos/service/aiproxy/relay/adaptor/ollama" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" + "github.com/labring/sealos/service/aiproxy/relay/adaptor/openaithink" "github.com/labring/sealos/service/aiproxy/relay/adaptor/siliconflow" "github.com/labring/sealos/service/aiproxy/relay/adaptor/stepfun" "github.com/labring/sealos/service/aiproxy/relay/adaptor/tencent" @@ -36,6 +37,7 @@ import ( var ChannelAdaptor = map[int]adaptor.Adaptor{ 1: &openai.Adaptor{}, + 2: &openaithink.Adaptor{}, 3: &azure.Adaptor{}, 12: &geminiopenai.Adaptor{}, 13: &baiduv2.Adaptor{}, From ca5a6e21e4d0324bff5213fe766710019992b42a Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 7 Feb 2025 14:10:19 +0800 Subject: [PATCH 117/167] fix: skip enpty think --- service/aiproxy/relay/adaptor/openai/main.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/service/aiproxy/relay/adaptor/openai/main.go b/service/aiproxy/relay/adaptor/openai/main.go index c5aa40989f4..1882a153bdc 100644 --- a/service/aiproxy/relay/adaptor/openai/main.go +++ b/service/aiproxy/relay/adaptor/openai/main.go @@ -149,9 +149,11 @@ func SplitThink(data map[string]any, thinkSplitter *splitter.Splitter, renderCal continue } think, remaining := thinkSplitter.Process(conv.StringToBytes(content)) - delta["content"] = "" - delta["reasoning_content"] = conv.BytesToString(think) - renderCallback(data) + if len(think) > 0 { + delta["content"] = "" + delta["reasoning_content"] = conv.BytesToString(think) + renderCallback(data) + } if len(remaining) > 0 { delta["content"] = conv.BytesToString(remaining) delta["reasoning_content"] = "" From 09aaea5e9595ed2dae0e266341536fb517cce6bd Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 7 Feb 2025 15:26:52 +0800 Subject: [PATCH 118/167] fix: do not store large resp --- service/aiproxy/controller/relay.go | 5 +++-- service/aiproxy/relay/controller/dohelper.go | 16 ++++++++++++---- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/service/aiproxy/controller/relay.go b/service/aiproxy/controller/relay.go index ac5758042d7..f9c8afcf353 100644 --- a/service/aiproxy/controller/relay.go +++ b/service/aiproxy/controller/relay.go @@ -2,6 +2,7 @@ package controller import ( "bytes" + "context" "errors" "io" "net/http" @@ -55,7 +56,7 @@ func RelayHelper(meta *meta.Meta, c *gin.Context, relayController RelayControlle err := relayController(meta, c) if err == nil { if err := monitor.AddRequest( - c.Request.Context(), + context.Background(), meta.OriginModel, int64(meta.Channel.ID), false, @@ -66,7 +67,7 @@ func RelayHelper(meta *meta.Meta, c *gin.Context, relayController RelayControlle } if shouldRetry(c, err.StatusCode) { if err := monitor.AddRequest( - c.Request.Context(), + context.Background(), meta.OriginModel, int64(meta.Channel.ID), true, diff --git a/service/aiproxy/relay/controller/dohelper.go b/service/aiproxy/relay/controller/dohelper.go index 07045245ba7..64e7184edd7 100644 --- a/service/aiproxy/relay/controller/dohelper.go +++ b/service/aiproxy/relay/controller/dohelper.go @@ -30,26 +30,34 @@ func isErrorHappened(resp *http.Response) bool { return resp.StatusCode != http.StatusOK } +const ( + storeResponseBodyMaxSize = 3 * 1024 * 1024 // 3MB +) + type responseWriter struct { gin.ResponseWriter body *bytes.Buffer } func (rw *responseWriter) Write(b []byte) (int, error) { - rw.body.Write(b) + if rw.body.Len() <= storeResponseBodyMaxSize { + rw.body.Write(b) + } return rw.ResponseWriter.Write(b) } func (rw *responseWriter) WriteString(s string) (int, error) { - rw.body.WriteString(s) + if rw.body.Len() <= storeResponseBodyMaxSize { + rw.body.WriteString(s) + } return rw.ResponseWriter.WriteString(s) } const ( // 0.5MB defaultBufferSize = 512 * 1024 - // 3MB - maxBufferSize = 3 * 1024 * 1024 + // 2MB + maxBufferSize = 2 * 1024 * 1024 ) var bufferPool = sync.Pool{ From 26bb9c5763b405521f6c93ab13a5cfced3d0c6d3 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 7 Feb 2025 16:48:16 +0800 Subject: [PATCH 119/167] fix: reat limit script --- service/aiproxy/common/rpmlimit/rate-limit.go | 70 +-- service/aiproxy/controller/monitor.go | 73 ++++ service/aiproxy/monitor/model.go | 407 +++++++++++++----- service/aiproxy/router/api.go | 9 + 4 files changed, 385 insertions(+), 174 deletions(-) diff --git a/service/aiproxy/common/rpmlimit/rate-limit.go b/service/aiproxy/common/rpmlimit/rate-limit.go index 0aea231f692..80bcb16af25 100644 --- a/service/aiproxy/common/rpmlimit/rate-limit.go +++ b/service/aiproxy/common/rpmlimit/rate-limit.go @@ -21,40 +21,10 @@ local window = tonumber(ARGV[1]) local current_time = tonumber(ARGV[2]) local cutoff = current_time - window -local page_size = 100 -local remove_count = 0 - -while true do - local timestamps = redis.call('LRANGE', key, remove_count, remove_count + page_size - 1) - if #timestamps == 0 then - break - end - - local found_non_expired = false - for i = 1, #timestamps do - local timestamp = tonumber(timestamps[i]) - if timestamp < cutoff then - remove_count = remove_count + 1 - else - found_non_expired = true - break - end - end - - if found_non_expired then - break - end -end - -if remove_count > 0 then - redis.call('LTRIM', key, remove_count, -1) -end - -redis.call('RPUSH', key, current_time) - +redis.call('ZREMRANGEBYSCORE', key, '-inf', cutoff) +redis.call('ZADD', key, current_time, current_time) redis.call('PEXPIRE', key, window) - -return redis.call('LLEN', key) +return redis.call('ZCOUNT', key, cutoff, current_time) ` var getRequestCountScript = ` @@ -62,42 +32,14 @@ local pattern = ARGV[1] local window = tonumber(ARGV[2]) local current_time = tonumber(ARGV[3]) local cutoff = current_time - window -local page_size = 100 local keys = redis.call('KEYS', pattern) local total = 0 for _, key in ipairs(keys) do - local remove_count = 0 - - while true do - local timestamps = redis.call('LRANGE', key, remove_count, remove_count + page_size - 1) - if #timestamps == 0 then - break - end - - local found_non_expired = false - for i = 1, #timestamps do - local timestamp = tonumber(timestamps[i]) - if timestamp < cutoff then - remove_count = remove_count + 1 - else - found_non_expired = true - break - end - end - - if found_non_expired then - break - end - end - - if remove_count > 0 then - redis.call('LTRIM', key, remove_count, -1) - end - - local total_count = redis.call('LLEN', key) - total = total + total_count + redis.call('ZREMRANGEBYSCORE', key, '-inf', cutoff) + local count = redis.call('ZCOUNT', key, cutoff, current_time) + total = total + count end return total diff --git a/service/aiproxy/controller/monitor.go b/service/aiproxy/controller/monitor.go index b0b429f8999..fbe4be466d0 100644 --- a/service/aiproxy/controller/monitor.go +++ b/service/aiproxy/controller/monitor.go @@ -1 +1,74 @@ package controller + +import ( + "net/http" + "strconv" + + "github.com/gin-gonic/gin" + "github.com/labring/sealos/service/aiproxy/middleware" + "github.com/labring/sealos/service/aiproxy/monitor" +) + +func GetAllChannelModelErrorRates(c *gin.Context) { + rates, err := monitor.GetAllChannelModelErrorRates(c.Request.Context()) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, err.Error()) + return + } + c.JSON(http.StatusOK, rates) +} + +func GetChannelModelErrorRates(c *gin.Context) { + channelID := c.Param("id") + channelIDInt, err := strconv.ParseInt(channelID, 10, 64) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, "Invalid channel ID") + return + } + rates, err := monitor.GetChannelModelErrorRates(c.Request.Context(), channelIDInt) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, err.Error()) + return + } + c.JSON(http.StatusOK, rates) +} + +func ClearAllModelErrors(c *gin.Context) { + err := monitor.ClearAllModelErrors(c.Request.Context()) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, err.Error()) + return + } + c.Status(http.StatusNoContent) +} + +func ClearChannelAllModelErrors(c *gin.Context) { + channelID := c.Param("id") + channelIDInt, err := strconv.ParseInt(channelID, 10, 64) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, "Invalid channel ID") + return + } + err = monitor.ClearChannelAllModelErrors(c.Request.Context(), int(channelIDInt)) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, err.Error()) + return + } + c.Status(http.StatusNoContent) +} + +func ClearChannelModelErrors(c *gin.Context) { + channelID := c.Param("id") + model := c.Param("model") + channelIDInt, err := strconv.ParseInt(channelID, 10, 64) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, "Invalid channel ID") + return + } + err = monitor.ClearChannelModelErrors(c.Request.Context(), model, int(channelIDInt)) + if err != nil { + middleware.ErrorResponse(c, http.StatusOK, err.Error()) + return + } + c.Status(http.StatusNoContent) +} diff --git a/service/aiproxy/monitor/model.go b/service/aiproxy/monitor/model.go index 9f1f7822f87..70b477f192b 100644 --- a/service/aiproxy/monitor/model.go +++ b/service/aiproxy/monitor/model.go @@ -2,6 +2,8 @@ package monitor import ( "context" + "fmt" + "strconv" "strings" "time" @@ -11,77 +13,59 @@ import ( log "github.com/sirupsen/logrus" ) -// 使用set存储被永久禁用的channelID -var addRequestScript = redis.NewScript(` - local model = KEYS[1] - local channel_id = ARGV[1] - local error_time_to_live = tonumber(ARGV[2]) - local max_error_rate = tonumber(ARGV[3]) - local is_error = tonumber(ARGV[4]) - local ban_time = tonumber(ARGV[5]) - local banned_key = "model:" .. model .. ":banned" - - if redis.call("SISMEMBER", banned_key, channel_id) == 1 then - return 2 - end - - local now_ms = redis.call("TIME")[1] * 1000 + math.floor(redis.call("TIME")[2]/1000) - local expired_time = now_ms - error_time_to_live - local channel_requests_key = "model:" .. model .. ":channel:" .. channel_id .. ":requests" - - redis.call("ZREMRANGEBYSCORE", channel_requests_key, 0, expired_time) - - local request_data = string.format("%d:%d", now_ms, is_error) - redis.call("ZADD", channel_requests_key, now_ms, request_data) - redis.call("PEXPIRE", channel_requests_key, error_time_to_live) - - local total_count = redis.call("ZCARD", channel_requests_key) - if total_count >= 10 then - local error_count = 0 - local requests = redis.call("ZRANGE", channel_requests_key, 0, -1) - for _, request in ipairs(requests) do - local _, status = string.match(request, "(%d+):(%d+)") - if tonumber(status) == 1 then - error_count = error_count + 1 - end - end - local error_rate = error_count / total_count - - if error_rate >= max_error_rate then - redis.call("SADD", banned_key, channel_id) - if ban_time > 0 then - redis.call("PEXPIRE", banned_key, ban_time) - end - redis.call("DEL", channel_requests_key) - return 1 - end - end - - return 0 -`) +// Redis key prefixes and patterns +const ( + modelKeyPrefix = "model:" + bannedKeySuffix = ":banned" + statsKeySuffix = ":stats" + channelKeyPart = ":channel:" +) + +// Redis scripts +var ( + addRequestScript = redis.NewScript(addRequestLuaScript) + getChannelModelErrorRateScript = redis.NewScript(getChannelModelErrorRateLuaScript) + getBannedChannelsScript = redis.NewScript(getBannedChannelsLuaScript) + clearChannelModelErrorsScript = redis.NewScript(clearChannelModelErrorsLuaScript) + clearChannelAllModelErrorsScript = redis.NewScript(clearChannelAllModelErrorsLuaScript) + clearAllModelErrorsScript = redis.NewScript(clearAllModelErrorsLuaScript) +) + +// Helper functions +func isFeatureEnabled() bool { + return common.RedisEnabled && config.GetEnableModelErrorAutoBan() +} + +func buildStatsKey(model string, channelID interface{}) string { + return fmt.Sprintf("%s%s%s%v%s", modelKeyPrefix, model, channelKeyPart, channelID, statsKeySuffix) +} +// AddRequest adds a request record and checks if channel should be banned func AddRequest(ctx context.Context, model string, channelID int64, isError bool) error { - if !common.RedisEnabled || !config.GetEnableModelErrorAutoBan() { + if !isFeatureEnabled() { return nil } + errorFlag := 0 if isError { errorFlag = 1 } - live := 60 * time.Second - banTime := 4 * live + + now := time.Now().UnixMilli() val, err := addRequestScript.Run( ctx, common.RDB, []string{model}, channelID, - live.Milliseconds(), - config.GetModelErrorAutoBanRate(), errorFlag, - banTime.Milliseconds()).Int64() + now, + config.GetModelErrorAutoBanRate(), + time.Second.Milliseconds()*15, + ).Int64() if err != nil { return err } + log.Debugf("add request result: %d", val) if val == 1 { log.Errorf("channel %d model %s is banned", channelID, model) @@ -89,15 +73,48 @@ func AddRequest(ctx context.Context, model string, channelID int64, isError bool return nil } -var getBannedChannelsScript = redis.NewScript(` - local model = KEYS[1] - local banned_key = "model:" .. model .. ":banned" - - return redis.call("SMEMBERS", banned_key) -`) +// GetChannelModelErrorRates gets error rates for a specific channel +func GetChannelModelErrorRates(ctx context.Context, channelID int64) (map[string]float64, error) { + if !isFeatureEnabled() { + return nil, nil + } + + result := make(map[string]float64) + pattern := buildStatsKey("*", channelID) + now := time.Now().UnixMilli() + + iter := common.RDB.Scan(ctx, 0, pattern, 0).Iterator() + for iter.Next(ctx) { + key := iter.Val() + parts := strings.Split(key, ":") + if len(parts) != 5 || parts[4] != "stats" { + continue + } + model := parts[1] + + rate, err := getChannelModelErrorRateScript.Run( + ctx, + common.RDB, + []string{key}, + now, + ).Float64() + if err != nil { + return nil, err + } + + result[model] = rate + } + + if err := iter.Err(); err != nil { + return nil, err + } + + return result, nil +} +// GetBannedChannels gets banned channels for a specific model func GetBannedChannels(ctx context.Context, model string) ([]int64, error) { - if !common.RedisEnabled || !config.GetEnableModelErrorAutoBan() { + if !isFeatureEnabled() { return nil, nil } result, err := getBannedChannelsScript.Run(ctx, common.RDB, []string{model}).Int64Slice() @@ -107,79 +124,58 @@ func GetBannedChannels(ctx context.Context, model string) ([]int64, error) { return result, nil } -var clearChannelModelErrorsScript = redis.NewScript(` - local model = KEYS[1] - local channel_id = ARGV[1] - local channel_requests_key = "model:" .. model .. ":channel:" .. channel_id .. ":requests" - local banned_key = "model:" .. model .. ":banned" - - redis.call("DEL", channel_requests_key) - redis.call("SREM", banned_key, channel_id) - - return redis.status_reply("ok") -`) - +// ClearChannelModelErrors clears errors for a specific channel and model func ClearChannelModelErrors(ctx context.Context, model string, channelID int) error { - if !common.RedisEnabled || !config.GetEnableModelErrorAutoBan() { + if !isFeatureEnabled() { return nil } - return clearChannelModelErrorsScript.Run(ctx, common.RDB, []string{model}, channelID).Err() + return clearChannelModelErrorsScript.Run( + ctx, + common.RDB, + []string{model}, + strconv.Itoa(channelID), + ).Err() } -var clearChannelAllModelErrorsScript = redis.NewScript(` - local channel_id = ARGV[1] - local banned_key = "model:*:banned" - local channel_requests_pattern = "model:*:channel:" .. channel_id .. ":requests" - - local keys = redis.call("KEYS", channel_requests_pattern) - for _, key in ipairs(keys) do - redis.call("DEL", key) - end - redis.call("SREM", banned_key, channel_id) - - return redis.status_reply("ok") -`) - +// ClearChannelAllModelErrors clears all errors for a specific channel func ClearChannelAllModelErrors(ctx context.Context, channelID int) error { - if !common.RedisEnabled || !config.GetEnableModelErrorAutoBan() { + if !isFeatureEnabled() { return nil } - return clearChannelAllModelErrorsScript.Run(ctx, common.RDB, []string{}, channelID).Err() + return clearChannelAllModelErrorsScript.Run( + ctx, + common.RDB, + []string{}, + strconv.Itoa(channelID), + ).Err() } -var clearAllModelErrorsScript = redis.NewScript(` - local banned_key = "model:*:banned" - local channel_requests_pattern = "model:*:channel:*:requests" - - local keys = redis.call("KEYS", channel_requests_pattern) - for _, key in ipairs(keys) do - redis.call("DEL", key) - end - redis.call("DEL", banned_key) - - return redis.status_reply("ok") -`) - +// ClearAllModelErrors clears all error records func ClearAllModelErrors(ctx context.Context) error { - if !common.RedisEnabled || !config.GetEnableModelErrorAutoBan() { + if !isFeatureEnabled() { return nil } return clearAllModelErrorsScript.Run(ctx, common.RDB, []string{}).Err() } -// map[model][]channelID +// GetAllBannedChannels gets all banned channels for all models func GetAllBannedChannels(ctx context.Context) (map[string][]int64, error) { - if !common.RedisEnabled || !config.GetEnableModelErrorAutoBan() { + if !isFeatureEnabled() { return nil, nil } result := make(map[string][]int64) - iter := common.RDB.Scan(ctx, 0, "model:*:banned", 0).Iterator() + iter := common.RDB.Scan(ctx, 0, modelKeyPrefix+"*"+bannedKeySuffix, 0).Iterator() + for iter.Next(ctx) { key := iter.Val() - model := strings.TrimPrefix(strings.TrimSuffix(key, ":banned"), "model:") + model := strings.Split(key, ":")[1] - channels, err := getBannedChannelsScript.Run(ctx, common.RDB, []string{model}).Int64Slice() + channels, err := getBannedChannelsScript.Run( + ctx, + common.RDB, + []string{model}, + ).Int64Slice() if err != nil { return nil, err } @@ -192,3 +188,194 @@ func GetAllBannedChannels(ctx context.Context) (map[string][]int64, error) { return result, nil } + +// GetAllChannelModelErrorRates gets error rates for all channels and models +func GetAllChannelModelErrorRates(ctx context.Context) (map[int64]map[string]float64, error) { + if !isFeatureEnabled() { + return nil, nil + } + + result := make(map[int64]map[string]float64) + pattern := modelKeyPrefix + "*" + channelKeyPart + "*" + statsKeySuffix + now := time.Now().UnixMilli() + + iter := common.RDB.Scan(ctx, 0, pattern, 0).Iterator() + for iter.Next(ctx) { + key := iter.Val() + parts := strings.Split(key, ":") + if len(parts) != 5 || parts[4] != "stats" { + continue + } + + model := parts[1] + channelID, err := strconv.ParseInt(parts[3], 10, 64) + if err != nil { + continue + } + + rate, err := getChannelModelErrorRateScript.Run( + ctx, + common.RDB, + []string{key}, + now, + ).Float64() + if err != nil { + return nil, err + } + + if _, exists := result[channelID]; !exists { + result[channelID] = make(map[string]float64) + } + result[channelID][model] = rate + } + + if err := iter.Err(); err != nil { + return nil, err + } + + return result, nil +} + +// Lua scripts +const ( + addRequestLuaScript = ` +local model = KEYS[1] +local channel_id = ARGV[1] +local is_error = tonumber(ARGV[2]) +local now_ts = tonumber(ARGV[3]) +local max_error_rate = tonumber(ARGV[4]) +local statsExpiry = tonumber(ARGV[5]) + +local banned_key = "model:" .. model .. ":banned" +local stats_key = "model:" .. model .. ":channel:" .. channel_id .. ":stats" +local maxSliceCount = 6 +local current_slice = math.floor(now_ts / 1000) + +if redis.call("SISMEMBER", banned_key, channel_id) == 1 then + return 2 +end + +local function parse_req_err(value) + if not value then return 0, 0 end + local r, e = value:match("^(%d+):(%d+)$") + return tonumber(r) or 0, tonumber(e) or 0 +end + +local function update_current_slice() + local req, err = parse_req_err(redis.call("HGET", stats_key, current_slice)) + req = req + 1 + err = err + (is_error == 1 and 1 or 0) + redis.call("HSET", stats_key, current_slice, req .. ":" .. err) + redis.call("PEXPIRE", stats_key, statsExpiry) + return req, err +end + +local function calculate_error_rate() + local total_req, total_err = 0, 0 + local min_valid_slice = current_slice - maxSliceCount + + local all_slices = redis.call("HGETALL", stats_key) + local to_delete = {} + + for i = 1, #all_slices, 2 do + local slice = tonumber(all_slices[i]) + if slice < min_valid_slice then + table.insert(to_delete, all_slices[i]) + else + local req, err = parse_req_err(all_slices[i+1]) + total_req = total_req + req + total_err = total_err + err + end + end + + if #to_delete > 0 then + redis.call("HDEL", stats_key, unpack(to_delete)) + end + + return total_req, total_err +end + +update_current_slice() +local total_req, total_err = calculate_error_rate() + +if total_req >= 10 and (total_err / total_req) >= max_error_rate then + redis.call("SADD", banned_key, channel_id) + redis.call("DEL", stats_key) + return 1 +end + +return 0 +` + + getChannelModelErrorRateLuaScript = ` +local stats_key = KEYS[1] +local now_ts = tonumber(ARGV[1]) +local maxSliceCount = 6 +local current_slice = math.floor(now_ts / 1000) +local min_valid_slice = current_slice - maxSliceCount + +local function parse_req_err(value) + if not value then return 0, 0 end + local r, e = value:match("^(%d+):(%d+)$") + return tonumber(r) or 0, tonumber(e) or 0 +end + +local total_req, total_err = 0, 0 +local all_slices = redis.call("HGETALL", stats_key) + +for i = 1, #all_slices, 2 do + local slice = tonumber(all_slices[i]) + if slice >= min_valid_slice then + local req, err = parse_req_err(all_slices[i+1]) + total_req = total_req + req + total_err = total_err + err + end +end + +if total_req == 0 then return 0 end +return string.format("%.2f", total_err / total_req) +` + + getBannedChannelsLuaScript = ` +local model = KEYS[1] +return redis.call("SMEMBERS", "model:" .. model .. ":banned") +` + + clearChannelModelErrorsLuaScript = ` +local model = KEYS[1] +local channel_id = ARGV[1] +local stats_key = "model:" .. model .. ":channel:" .. channel_id .. ":stats" +local banned_key = "model:" .. model .. ":banned" + +redis.call("DEL", stats_key) +redis.call("SREM", banned_key, channel_id) +return redis.status_reply("ok") +` + + clearChannelAllModelErrorsLuaScript = ` +local channel_id = ARGV[1] +local pattern = "model:*:channel:" .. channel_id .. ":stats" +local keys = redis.call("KEYS", pattern) + +for _, key in ipairs(keys) do + redis.call("DEL", key) + local model = string.match(key, "model:(.*):channel:") + if model then + redis.call("SREM", "model:"..model..":banned", channel_id) + end +end +return redis.status_reply("ok") +` + + clearAllModelErrorsLuaScript = ` +local function del_keys(pattern) + local keys = redis.call("KEYS", pattern) + if #keys > 0 then redis.call("DEL", unpack(keys)) end +end + +del_keys("model:*:channel:*:stats") +del_keys("model:*:banned") + +return redis.status_reply("ok") +` +) diff --git a/service/aiproxy/router/api.go b/service/aiproxy/router/api.go index c5dc93b1f58..4e15b74bf2e 100644 --- a/service/aiproxy/router/api.go +++ b/service/aiproxy/router/api.go @@ -143,5 +143,14 @@ func SetAPIRouter(router *gin.Engine) { modelConfigRoute.POST("/", controller.SaveModelConfig) modelConfigRoute.DELETE("/:model", controller.DeleteModelConfig) } + + monitorRoute := apiRouter.Group("/monitor") + { + monitorRoute.GET("/", controller.GetAllChannelModelErrorRates) + monitorRoute.GET("/:id", controller.GetChannelModelErrorRates) + monitorRoute.DELETE("/", controller.ClearAllModelErrors) + monitorRoute.DELETE("/:id", controller.ClearChannelAllModelErrors) + monitorRoute.DELETE("/:id/:model", controller.ClearChannelModelErrors) + } } } From d29418ed78cdfb350b9c273ffc6f95fe403b4065 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 7 Feb 2025 16:55:52 +0800 Subject: [PATCH 120/167] fix: reat limit use micro second --- service/aiproxy/common/rpmlimit/rate-limit.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/service/aiproxy/common/rpmlimit/rate-limit.go b/service/aiproxy/common/rpmlimit/rate-limit.go index 80bcb16af25..32b88baa06e 100644 --- a/service/aiproxy/common/rpmlimit/rate-limit.go +++ b/service/aiproxy/common/rpmlimit/rate-limit.go @@ -62,14 +62,13 @@ func GetRPM(ctx context.Context, group, model string) (int64, error) { } rdb := common.RDB - currentTime := time.Now().UnixMilli() result, err := rdb.Eval( ctx, getRequestCountScript, []string{}, pattern, - time.Minute.Milliseconds(), - currentTime, + time.Minute.Microseconds(), + time.Now().UnixMicro(), ).Int64() if err != nil { return 0, err @@ -92,8 +91,8 @@ func PushRequest(ctx context.Context, group, model string, duration time.Duratio []string{ fmt.Sprintf(groupModelRPMKey, group, model), }, - duration.Milliseconds(), - time.Now().UnixMilli(), + duration.Microseconds(), + time.Now().UnixMicro(), ).Int64() if err != nil { return 0, err From 1fe4dc8e84d24481f9de71bc75ccb3205a44d63c Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 7 Feb 2025 22:09:38 +0800 Subject: [PATCH 121/167] fix: ignore gemini input count error --- service/aiproxy/relay/adaptor/anthropic/main.go | 7 +++---- service/aiproxy/relay/adaptor/gemini/main.go | 5 +++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/service/aiproxy/relay/adaptor/anthropic/main.go b/service/aiproxy/relay/adaptor/anthropic/main.go index 7b26cf66cca..ea3595632e2 100644 --- a/service/aiproxy/relay/adaptor/anthropic/main.go +++ b/service/aiproxy/relay/adaptor/anthropic/main.go @@ -6,14 +6,13 @@ import ( "slices" "time" + "github.com/gin-gonic/gin" json "github.com/json-iterator/go" + "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/conv" + "github.com/labring/sealos/service/aiproxy/common/image" "github.com/labring/sealos/service/aiproxy/common/render" "github.com/labring/sealos/service/aiproxy/middleware" - - "github.com/gin-gonic/gin" - "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/image" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/constant" "github.com/labring/sealos/service/aiproxy/relay/meta" diff --git a/service/aiproxy/relay/adaptor/gemini/main.go b/service/aiproxy/relay/adaptor/gemini/main.go index 92f2664596d..e155055b391 100644 --- a/service/aiproxy/relay/adaptor/gemini/main.go +++ b/service/aiproxy/relay/adaptor/gemini/main.go @@ -200,9 +200,10 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io tokenCount, err := CountTokens(req.Context(), meta, contents) if err != nil { - return "", nil, nil, err + log.Error("count tokens failed: " + err.Error()) + } else { + meta.InputTokens = tokenCount } - meta.InputTokens = tokenCount // Build actual request geminiRequest := ChatRequest{ From 592f96451e49b801f9e8bdb71ea8a6a5fb2b61dc Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sat, 8 Feb 2025 10:25:57 +0800 Subject: [PATCH 122/167] feat: calude model config --- .../relay/adaptor/anthropic/constants.go | 91 ++++++++++++------- 1 file changed, 58 insertions(+), 33 deletions(-) diff --git a/service/aiproxy/relay/adaptor/anthropic/constants.go b/service/aiproxy/relay/adaptor/anthropic/constants.go index 119e019b40e..c58eee144db 100644 --- a/service/aiproxy/relay/adaptor/anthropic/constants.go +++ b/service/aiproxy/relay/adaptor/anthropic/constants.go @@ -7,48 +7,73 @@ import ( var ModelList = []*model.ModelConfig{ { - Model: "claude-instant-1.2", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerAnthropic, + Model: "claude-3-haiku-20240307", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerAnthropic, + InputPrice: 0.0025, + OutputPrice: 0.0125, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(200000), + model.WithModelConfigMaxOutputTokens(4096), + ), }, { - Model: "claude-2.0", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerAnthropic, + Model: "claude-3-opus-20240229", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerAnthropic, + InputPrice: 0.015, + OutputPrice: 0.075, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(200000), + model.WithModelConfigMaxOutputTokens(4096), + ), }, { - Model: "claude-2.1", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerAnthropic, + Model: "claude-3-5-haiku-20241022", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerAnthropic, + InputPrice: 0.0008, + OutputPrice: 0.004, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(200000), + model.WithModelConfigMaxOutputTokens(4096), + model.WithModelConfigToolChoice(true), + ), }, { - Model: "claude-3-haiku-20240307", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerAnthropic, + Model: "claude-3-5-sonnet-20240620", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerAnthropic, + InputPrice: 0.003, + OutputPrice: 0.015, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(200000), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { - Model: "claude-3-sonnet-20240229", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerAnthropic, + Model: "claude-3-5-sonnet-20241022", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerAnthropic, + InputPrice: 0.003, + OutputPrice: 0.015, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(200000), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, { - Model: "claude-3-opus-20240229", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerAnthropic, - }, - { - Model: "claude-3-5-sonnet-20240620", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerAnthropic, - }, - { - Model: "claude-3-5-sonnet-20241022", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerAnthropic, - }, - { - Model: "claude-3-5-sonnet-latest", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerAnthropic, + Model: "claude-3-5-sonnet-latest", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerAnthropic, + InputPrice: 0.003, + OutputPrice: 0.015, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(200000), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + ), }, } From 526d6194ac5846dc68a38ff82c83a409fba011c3 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sat, 8 Feb 2025 10:57:59 +0800 Subject: [PATCH 123/167] fix: claude stream usage resp --- .../aiproxy/relay/adaptor/anthropic/main.go | 20 +++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/service/aiproxy/relay/adaptor/anthropic/main.go b/service/aiproxy/relay/adaptor/anthropic/main.go index ea3595632e2..0ed50e23e8d 100644 --- a/service/aiproxy/relay/adaptor/anthropic/main.go +++ b/service/aiproxy/relay/adaptor/anthropic/main.go @@ -260,7 +260,7 @@ func ResponseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse { return &fullTextResponse } -func StreamHandler(_ *meta.Meta, c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) { +func StreamHandler(m *meta.Meta, c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) { defer resp.Body.Close() log := middleware.GetLogger(c) @@ -283,7 +283,6 @@ func StreamHandler(_ *meta.Meta, c *gin.Context, resp *http.Response) (*model.Er common.SetEventStreamHeaders(c) var usage model.Usage - var modelName string var id string var lastToolCallChoice *openai.ChatCompletionsStreamResponseChoice @@ -313,7 +312,6 @@ func StreamHandler(_ *meta.Meta, c *gin.Context, resp *http.Response) (*model.Er usage.PromptTokens += meta.Usage.InputTokens usage.CompletionTokens += meta.Usage.OutputTokens if len(meta.ID) > 0 { // only message_start has an id, otherwise it's a finish_reason event. - modelName = meta.Model id = "chatcmpl-" + meta.ID continue } @@ -328,7 +326,7 @@ func StreamHandler(_ *meta.Meta, c *gin.Context, resp *http.Response) (*model.Er } response.ID = id - response.Model = modelName + response.Model = m.OriginModel response.Created = createdTime for _, choice := range response.Choices { @@ -343,6 +341,20 @@ func StreamHandler(_ *meta.Meta, c *gin.Context, resp *http.Response) (*model.Er log.Error("error reading stream: " + err.Error()) } + if usage.CompletionTokens == 0 && usage.PromptTokens == 0 { + usage.PromptTokens = m.InputTokens + } + + usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens + + _ = render.ObjectData(c, &openai.ChatCompletionsStreamResponse{ + Model: m.OriginModel, + Object: "chat.completion.chunk", + Created: createdTime, + Choices: []*openai.ChatCompletionsStreamResponseChoice{}, + Usage: &usage, + }) + render.Done(c) return nil, &usage From 8376d87e16dd277e02cfa410e1099a3b6d611585 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sat, 8 Feb 2025 11:23:15 +0800 Subject: [PATCH 124/167] fix: claude stream usage resp --- service/aiproxy/relay/adaptor/anthropic/main.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/service/aiproxy/relay/adaptor/anthropic/main.go b/service/aiproxy/relay/adaptor/anthropic/main.go index 0ed50e23e8d..6369be50756 100644 --- a/service/aiproxy/relay/adaptor/anthropic/main.go +++ b/service/aiproxy/relay/adaptor/anthropic/main.go @@ -311,10 +311,12 @@ func StreamHandler(m *meta.Meta, c *gin.Context, resp *http.Response) (*model.Er if meta != nil { usage.PromptTokens += meta.Usage.InputTokens usage.CompletionTokens += meta.Usage.OutputTokens + usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens if len(meta.ID) > 0 { // only message_start has an id, otherwise it's a finish_reason event. id = "chatcmpl-" + meta.ID continue } + response.Usage = &usage if lastToolCallChoice != nil && len(lastToolCallChoice.Delta.ToolCalls) > 0 { lastArgs := &lastToolCallChoice.Delta.ToolCalls[len(lastToolCallChoice.Delta.ToolCalls)-1].Function if len(lastArgs.Arguments) == 0 { // compatible with OpenAI sending an empty object `{}` when no arguments. @@ -347,14 +349,6 @@ func StreamHandler(m *meta.Meta, c *gin.Context, resp *http.Response) (*model.Er usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens - _ = render.ObjectData(c, &openai.ChatCompletionsStreamResponse{ - Model: m.OriginModel, - Object: "chat.completion.chunk", - Created: createdTime, - Choices: []*openai.ChatCompletionsStreamResponseChoice{}, - Usage: &usage, - }) - render.Done(c) return nil, &usage From cb51b256ebdb70659ad6209883f81572df0286ff Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sat, 8 Feb 2025 11:24:52 +0800 Subject: [PATCH 125/167] fix: claude stream usage resp --- service/aiproxy/relay/adaptor/anthropic/main.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/service/aiproxy/relay/adaptor/anthropic/main.go b/service/aiproxy/relay/adaptor/anthropic/main.go index 6369be50756..54576245b87 100644 --- a/service/aiproxy/relay/adaptor/anthropic/main.go +++ b/service/aiproxy/relay/adaptor/anthropic/main.go @@ -285,6 +285,7 @@ func StreamHandler(m *meta.Meta, c *gin.Context, resp *http.Response) (*model.Er var usage model.Usage var id string var lastToolCallChoice *openai.ChatCompletionsStreamResponseChoice + var usageWrited bool for scanner.Scan() { data := scanner.Bytes() @@ -317,6 +318,8 @@ func StreamHandler(m *meta.Meta, c *gin.Context, resp *http.Response) (*model.Er continue } response.Usage = &usage + usageWrited = true + if lastToolCallChoice != nil && len(lastToolCallChoice.Delta.ToolCalls) > 0 { lastArgs := &lastToolCallChoice.Delta.ToolCalls[len(lastToolCallChoice.Delta.ToolCalls)-1].Function if len(lastArgs.Arguments) == 0 { // compatible with OpenAI sending an empty object `{}` when no arguments. @@ -349,6 +352,16 @@ func StreamHandler(m *meta.Meta, c *gin.Context, resp *http.Response) (*model.Er usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens + if !usageWrited { + _ = render.ObjectData(c, &openai.ChatCompletionsStreamResponse{ + Model: m.OriginModel, + Object: "chat.completion.chunk", + Created: createdTime, + Choices: []*openai.ChatCompletionsStreamResponseChoice{}, + Usage: &usage, + }) + } + render.Done(c) return nil, &usage From a14a7eed9f920e3fb0c7ad5c2fa7a0e02c27c972 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sat, 8 Feb 2025 14:59:08 +0800 Subject: [PATCH 126/167] feat: auto create sqlite dir --- service/aiproxy/model/main.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/service/aiproxy/model/main.go b/service/aiproxy/model/main.go index c3151766b4a..6d5cbb109fd 100644 --- a/service/aiproxy/model/main.go +++ b/service/aiproxy/model/main.go @@ -3,6 +3,7 @@ package model import ( "fmt" "os" + "path/filepath" "strings" "time" @@ -88,8 +89,15 @@ func openMySQL(dsn string) (*gorm.DB, error) { } func openSQLite() (*gorm.DB, error) { - log.Info("SQL_DSN not set, using SQLite as database") + log.Info("SQL_DSN not set, using SQLite as database: ", common.SQLitePath) common.UsingSQLite = true + + baseDir := filepath.Dir(common.SQLitePath) + if err := os.MkdirAll(baseDir, 0o755); err != nil { + log.Fatal("failed to create base directory: " + err.Error()) + return nil, err + } + dsn := fmt.Sprintf("%s?_busy_timeout=%d", common.SQLitePath, common.SQLiteBusyTimeout) return gorm.Open(sqlite.Open(dsn), &gorm.Config{ PrepareStmt: true, // precompile SQL From 1e5f06886ab13038ce2be98e6892f93582491191 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sat, 8 Feb 2025 16:04:11 +0800 Subject: [PATCH 127/167] feat: log detail body truncated --- service/aiproxy/model/log.go | 12 +++++++----- service/aiproxy/relay/controller/dohelper.go | 16 +++++++++++++++- service/aiproxy/relay/controller/handle.go | 13 +++---------- 3 files changed, 25 insertions(+), 16 deletions(-) diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index 649755f2bbb..a1fc1a7cbe3 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -18,11 +18,13 @@ import ( ) type RequestDetail struct { - CreatedAt time.Time `gorm:"autoCreateTime" json:"-"` - RequestBody string `gorm:"type:text" json:"request_body,omitempty"` - ResponseBody string `gorm:"type:text" json:"response_body,omitempty"` - ID int `json:"id"` - LogID int `json:"log_id"` + CreatedAt time.Time `gorm:"autoCreateTime" json:"-"` + RequestBody string `gorm:"type:text" json:"request_body,omitempty"` + ResponseBody string `gorm:"type:text" json:"response_body,omitempty"` + RequestBodyTruncated bool `json:"request_body_truncated,omitempty"` + ResponseBodyTruncated bool `json:"response_body_truncated,omitempty"` + ID int `json:"id"` + LogID int `json:"log_id"` } type Log struct { diff --git a/service/aiproxy/relay/controller/dohelper.go b/service/aiproxy/relay/controller/dohelper.go index 64e7184edd7..a7300056929 100644 --- a/service/aiproxy/relay/controller/dohelper.go +++ b/service/aiproxy/relay/controller/dohelper.go @@ -36,12 +36,15 @@ const ( type responseWriter struct { gin.ResponseWriter - body *bytes.Buffer + body *bytes.Buffer + truncated bool } func (rw *responseWriter) Write(b []byte) (int, error) { if rw.body.Len() <= storeResponseBodyMaxSize { rw.body.Write(b) + } else { + rw.truncated = true } return rw.ResponseWriter.Write(b) } @@ -49,6 +52,8 @@ func (rw *responseWriter) Write(b []byte) (int, error) { func (rw *responseWriter) WriteString(s string) (int, error) { if rw.body.Len() <= storeResponseBodyMaxSize { rw.body.WriteString(s) + } else { + rw.truncated = true } return rw.ResponseWriter.WriteString(s) } @@ -120,6 +125,10 @@ func DoHelper( return usage, &detail, nil } +const ( + requestBodyMaxSize = 2 * 1024 * 1024 // 2MB +) + func getRequestBody(meta *meta.Meta, c *gin.Context, detail *model.RequestDetail) *relaymodel.ErrorWithStatusCode { switch meta.Mode { case relaymode.AudioTranscription, relaymode.AudioTranslation: @@ -129,6 +138,10 @@ func getRequestBody(meta *meta.Meta, c *gin.Context, detail *model.RequestDetail if err != nil { return openai.ErrorWrapperWithMessage("get request body failed: "+err.Error(), "get_request_body_failed", http.StatusBadRequest) } + if len(reqBody) > requestBodyMaxSize { + reqBody = reqBody[:requestBodyMaxSize] + detail.RequestBodyTruncated = true + } detail.RequestBody = conv.BytesToString(reqBody) return nil } @@ -210,6 +223,7 @@ func handleSuccessResponse(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta, r c.Header("Content-Type", resp.Header.Get("Content-Type")) usage, relayErr := a.DoResponse(meta, c, resp) detail.ResponseBody = rw.body.String() + detail.ResponseBodyTruncated = rw.truncated return usage, relayErr } diff --git a/service/aiproxy/relay/controller/handle.go b/service/aiproxy/relay/controller/handle.go index d2170d5082d..f1a5ff97fd0 100644 --- a/service/aiproxy/relay/controller/handle.go +++ b/service/aiproxy/relay/controller/handle.go @@ -7,10 +7,8 @@ import ( "strconv" "github.com/gin-gonic/gin" - "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" "github.com/labring/sealos/service/aiproxy/common/consume" - "github.com/labring/sealos/service/aiproxy/common/conv" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" @@ -61,14 +59,9 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa preCheckReq, err := preProcess() if err != nil { log.Errorf("pre-process request failed: %s", err.Error()) - var detail *model.RequestDetail - body, bodyErr := common.GetRequestBody(c.Request) - if bodyErr != nil { - log.Errorf("get request body failed: %s", bodyErr.Error()) - } else { - detail = &model.RequestDetail{ - RequestBody: conv.BytesToString(body), - } + detail := &model.RequestDetail{} + if err := getRequestBody(meta, c, detail); err != nil { + log.Errorf("get request body failed: %v", err.Error) } consume.AsyncConsume( nil, From 1838887e67a52150e16ba75836928e3514d92421 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sat, 8 Feb 2025 16:53:34 +0800 Subject: [PATCH 128/167] chore: add body conv commend --- service/aiproxy/relay/controller/dohelper.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/service/aiproxy/relay/controller/dohelper.go b/service/aiproxy/relay/controller/dohelper.go index a7300056929..e6caf65dd8d 100644 --- a/service/aiproxy/relay/controller/dohelper.go +++ b/service/aiproxy/relay/controller/dohelper.go @@ -222,6 +222,8 @@ func handleSuccessResponse(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta, r c.Header("Content-Type", resp.Header.Get("Content-Type")) usage, relayErr := a.DoResponse(meta, c, resp) + // copy body buffer + // do not use bytes conv detail.ResponseBody = rw.body.String() detail.ResponseBodyTruncated = rw.truncated From b2a795996686c50f52129e866f624f2985fdc829 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sat, 8 Feb 2025 17:12:14 +0800 Subject: [PATCH 129/167] feat: monitor ignore error rate compute when is success request --- service/aiproxy/monitor/model.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/service/aiproxy/monitor/model.go b/service/aiproxy/monitor/model.go index 70b477f192b..fca8217efdb 100644 --- a/service/aiproxy/monitor/model.go +++ b/service/aiproxy/monitor/model.go @@ -296,6 +296,11 @@ local function calculate_error_rate() end update_current_slice() + +if is_error == 0 then + return 0 +end + local total_req, total_err = calculate_error_rate() if total_req >= 10 and (total_err / total_req) >= max_error_rate then From 0e7eb14bf0232be3ec7130e9bc945a1b72c1cdfe Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sat, 8 Feb 2025 22:27:21 +0800 Subject: [PATCH 130/167] feat: ollama usage support --- .../fastJSONSerializer/fastJSONSerializer.go | 1 - .../aiproxy/relay/adaptor/ollama/adaptor.go | 19 +++----- service/aiproxy/relay/adaptor/ollama/main.go | 45 ++++++++++++------- service/aiproxy/relay/adaptor/ollama/model.go | 8 ++-- 4 files changed, 38 insertions(+), 35 deletions(-) diff --git a/service/aiproxy/common/fastJSONSerializer/fastJSONSerializer.go b/service/aiproxy/common/fastJSONSerializer/fastJSONSerializer.go index 98a55ae32cf..32fe8e48358 100644 --- a/service/aiproxy/common/fastJSONSerializer/fastJSONSerializer.go +++ b/service/aiproxy/common/fastJSONSerializer/fastJSONSerializer.go @@ -7,7 +7,6 @@ import ( json "github.com/json-iterator/go" "github.com/labring/sealos/service/aiproxy/common/conv" - "gorm.io/gorm/schema" ) diff --git a/service/aiproxy/relay/adaptor/ollama/adaptor.go b/service/aiproxy/relay/adaptor/ollama/adaptor.go index 8e3761a5821..e8a7b1dc3b0 100644 --- a/service/aiproxy/relay/adaptor/ollama/adaptor.go +++ b/service/aiproxy/relay/adaptor/ollama/adaptor.go @@ -6,13 +6,12 @@ import ( "io" "net/http" + "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/meta" + relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" "github.com/labring/sealos/service/aiproxy/relay/relaymode" "github.com/labring/sealos/service/aiproxy/relay/utils" - - "github.com/gin-gonic/gin" - relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" ) type Adaptor struct{} @@ -58,23 +57,15 @@ func (a *Adaptor) DoRequest(_ *meta.Meta, _ *gin.Context, req *http.Request) (*h return utils.DoRequest(req) } -func (a *Adaptor) ConvertSTTRequest(*http.Request) (io.Reader, error) { - return nil, errors.New("not implemented") -} - -func (a *Adaptor) ConvertTTSRequest(*relaymodel.TextToSpeechRequest) (any, error) { - return nil, errors.New("not implemented") -} - func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode) { switch meta.Mode { case relaymode.Embeddings: - err, usage = EmbeddingHandler(c, resp) + err, usage = EmbeddingHandler(meta, c, resp) default: if utils.IsStreamResponse(resp) { - err, usage = StreamHandler(c, resp) + err, usage = StreamHandler(meta, c, resp) } else { - err, usage = Handler(c, resp) + err, usage = Handler(meta, c, resp) } } return diff --git a/service/aiproxy/relay/adaptor/ollama/main.go b/service/aiproxy/relay/adaptor/ollama/main.go index 27ed57c1ab0..54dfefe9464 100644 --- a/service/aiproxy/relay/adaptor/ollama/main.go +++ b/service/aiproxy/relay/adaptor/ollama/main.go @@ -75,7 +75,7 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io return http.MethodPost, nil, bytes.NewReader(data), nil } -func responseOllama2OpenAI(response *ChatResponse) *openai.TextResponse { +func responseOllama2OpenAI(meta *meta.Meta, response *ChatResponse) *openai.TextResponse { choice := openai.TextResponseChoice{ Index: 0, Message: relaymodel.Message{ @@ -88,7 +88,7 @@ func responseOllama2OpenAI(response *ChatResponse) *openai.TextResponse { } fullTextResponse := openai.TextResponse{ ID: "chatcmpl-" + random.GetUUID(), - Model: response.Model, + Model: meta.OriginModel, Object: "chat.completion", Created: time.Now().Unix(), Choices: []*openai.TextResponseChoice{&choice}, @@ -101,7 +101,7 @@ func responseOllama2OpenAI(response *ChatResponse) *openai.TextResponse { return &fullTextResponse } -func streamResponseOllama2OpenAI(ollamaResponse *ChatResponse) *openai.ChatCompletionsStreamResponse { +func streamResponseOllama2OpenAI(meta *meta.Meta, ollamaResponse *ChatResponse) *openai.ChatCompletionsStreamResponse { var choice openai.ChatCompletionsStreamResponseChoice choice.Delta.Role = ollamaResponse.Message.Role choice.Delta.Content = ollamaResponse.Message.Content @@ -112,13 +112,22 @@ func streamResponseOllama2OpenAI(ollamaResponse *ChatResponse) *openai.ChatCompl ID: "chatcmpl-" + random.GetUUID(), Object: "chat.completion.chunk", Created: time.Now().Unix(), - Model: ollamaResponse.Model, + Model: meta.OriginModel, Choices: []*openai.ChatCompletionsStreamResponseChoice{&choice}, } + + if ollamaResponse.EvalCount != 0 { + response.Usage = &relaymodel.Usage{ + PromptTokens: ollamaResponse.PromptEvalCount, + CompletionTokens: ollamaResponse.EvalCount, + TotalTokens: ollamaResponse.PromptEvalCount + ollamaResponse.EvalCount, + } + } + return &response } -func StreamHandler(c *gin.Context, resp *http.Response) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) { +func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) { defer resp.Body.Close() log := middleware.GetLogger(c) @@ -153,13 +162,12 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*relaymodel.ErrorWithSt continue } - if ollamaResponse.EvalCount != 0 { - usage.PromptTokens = ollamaResponse.PromptEvalCount - usage.CompletionTokens = ollamaResponse.EvalCount - usage.TotalTokens = ollamaResponse.PromptEvalCount + ollamaResponse.EvalCount + response := streamResponseOllama2OpenAI(meta, &ollamaResponse) + + if response.Usage != nil { + usage = *response.Usage } - response := streamResponseOllama2OpenAI(&ollamaResponse) _ = render.ObjectData(c, response) } @@ -195,7 +203,7 @@ func ConvertEmbeddingRequest(meta *meta.Meta, req *http.Request) (string, http.H return http.MethodPost, nil, bytes.NewReader(data), nil } -func EmbeddingHandler(c *gin.Context, resp *http.Response) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) { +func EmbeddingHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) { defer resp.Body.Close() var ollamaResponse EmbeddingResponse @@ -216,7 +224,7 @@ func EmbeddingHandler(c *gin.Context, resp *http.Response) (*relaymodel.ErrorWit }, nil } - fullTextResponse := embeddingResponseOllama2OpenAI(&ollamaResponse) + fullTextResponse := embeddingResponseOllama2OpenAI(meta, &ollamaResponse) jsonResponse, err := json.Marshal(fullTextResponse) if err != nil { return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil @@ -227,12 +235,15 @@ func EmbeddingHandler(c *gin.Context, resp *http.Response) (*relaymodel.ErrorWit return nil, &fullTextResponse.Usage } -func embeddingResponseOllama2OpenAI(response *EmbeddingResponse) *openai.EmbeddingResponse { +func embeddingResponseOllama2OpenAI(meta *meta.Meta, response *EmbeddingResponse) *openai.EmbeddingResponse { openAIEmbeddingResponse := openai.EmbeddingResponse{ Object: "list", Data: make([]*openai.EmbeddingResponseItem, 0, 1), - Model: response.Model, - Usage: relaymodel.Usage{TotalTokens: 0}, + Model: meta.OriginModel, + Usage: relaymodel.Usage{ + PromptTokens: response.PromptEvalCount, + TotalTokens: response.PromptEvalCount, + }, } for i, embedding := range response.Embeddings { @@ -245,7 +256,7 @@ func embeddingResponseOllama2OpenAI(response *EmbeddingResponse) *openai.Embeddi return &openAIEmbeddingResponse } -func Handler(c *gin.Context, resp *http.Response) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) { +func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*relaymodel.ErrorWithStatusCode, *relaymodel.Usage) { defer resp.Body.Close() var ollamaResponse ChatResponse @@ -264,7 +275,7 @@ func Handler(c *gin.Context, resp *http.Response) (*relaymodel.ErrorWithStatusCo StatusCode: resp.StatusCode, }, nil } - fullTextResponse := responseOllama2OpenAI(&ollamaResponse) + fullTextResponse := responseOllama2OpenAI(meta, &ollamaResponse) jsonResponse, err := json.Marshal(fullTextResponse) if err != nil { return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil diff --git a/service/aiproxy/relay/adaptor/ollama/model.go b/service/aiproxy/relay/adaptor/ollama/model.go index 7dc4c773c89..454af1823c8 100644 --- a/service/aiproxy/relay/adaptor/ollama/model.go +++ b/service/aiproxy/relay/adaptor/ollama/model.go @@ -22,6 +22,7 @@ type ChatRequest struct { Model string `json:"model,omitempty"` Messages []Message `json:"messages,omitempty"` Stream bool `json:"stream"` + Format any `json:"format,omitempty"` } type ChatResponse struct { @@ -45,7 +46,8 @@ type EmbeddingRequest struct { } type EmbeddingResponse struct { - Error string `json:"error,omitempty"` - Model string `json:"model"` - Embeddings [][]float64 `json:"embeddings"` + Error string `json:"error,omitempty"` + Model string `json:"model"` + Embeddings [][]float64 `json:"embeddings"` + PromptEvalCount int `json:"prompt_eval_count,omitempty"` } From f49ec83d5f86ff6f69f4652f1cbeb6f0b0360dc1 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sun, 9 Feb 2025 01:15:00 +0800 Subject: [PATCH 131/167] feat: baseurl embed v1 prefix --- service/aiproxy/controller/channel-billing.go | 3 +- service/aiproxy/controller/misc.go | 3 +- service/aiproxy/go.mod | 2 +- service/aiproxy/model/channel.go | 1 - service/aiproxy/model/option.go | 1 - .../aiproxy/relay/adaptor/ai360/adaptor.go | 2 +- .../aiproxy/relay/adaptor/azure/constants.go | 9 ----- .../aiproxy/relay/adaptor/baichuan/adaptor.go | 2 +- .../aiproxy/relay/adaptor/baiduv2/adaptor.go | 5 +-- .../aiproxy/relay/adaptor/deepseek/adaptor.go | 2 +- .../aiproxy/relay/adaptor/deepseek/balance.go | 1 - .../relay/adaptor/geminiopenai/adaptor.go | 1 - service/aiproxy/relay/adaptor/groq/adaptor.go | 2 +- .../relay/adaptor/lingyiwanwu/adaptor.go | 2 +- .../aiproxy/relay/adaptor/minimax/adaptor.go | 8 ++-- .../aiproxy/relay/adaptor/mistral/adaptor.go | 2 +- .../aiproxy/relay/adaptor/moonshot/adaptor.go | 2 +- .../aiproxy/relay/adaptor/openai/adaptor.go | 12 +----- service/aiproxy/relay/adaptor/openai/main.go | 8 ++-- .../relay/adaptor/siliconflow/adaptor.go | 2 +- .../relay/adaptor/siliconflow/image.go | 1 - .../aiproxy/relay/adaptor/stepfun/adaptor.go | 2 +- .../aiproxy/relay/adaptor/tencent/adaptor.go | 2 +- .../relay/adaptor/togetherai/constants.go | 40 ------------------- .../aiproxy/relay/adaptor/vertexai/token.go | 5 +-- .../aiproxy/relay/adaptor/xunfei/adaptor.go | 2 +- .../aiproxy/relay/adaptor/zhipu/adaptor.go | 19 ++------- service/aiproxy/relay/controller/dohelper.go | 2 + service/aiproxy/relay/controller/handle.go | 2 +- 29 files changed, 34 insertions(+), 111 deletions(-) delete mode 100644 service/aiproxy/relay/adaptor/togetherai/constants.go diff --git a/service/aiproxy/controller/channel-billing.go b/service/aiproxy/controller/channel-billing.go index e31a672a4c1..f569b60f1cf 100644 --- a/service/aiproxy/controller/channel-billing.go +++ b/service/aiproxy/controller/channel-billing.go @@ -6,6 +6,7 @@ import ( "strconv" "time" + "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common/balance" "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/model" @@ -13,8 +14,6 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/channeltype" log "github.com/sirupsen/logrus" - - "github.com/gin-gonic/gin" ) // https://github.com/labring/sealos/service/aiproxy/issues/79 diff --git a/service/aiproxy/controller/misc.go b/service/aiproxy/controller/misc.go index 83f10290ff2..21ed6f03b30 100644 --- a/service/aiproxy/controller/misc.go +++ b/service/aiproxy/controller/misc.go @@ -1,10 +1,9 @@ package controller import ( + "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/middleware" - - "github.com/gin-gonic/gin" ) type StatusData struct { diff --git a/service/aiproxy/go.mod b/service/aiproxy/go.mod index 24a10f8b386..927fee00171 100644 --- a/service/aiproxy/go.mod +++ b/service/aiproxy/go.mod @@ -32,6 +32,7 @@ require ( github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef github.com/stretchr/testify v1.10.0 golang.org/x/image v0.23.0 + golang.org/x/sync v0.10.0 google.golang.org/api v0.214.0 gorm.io/driver/mysql v1.5.7 gorm.io/driver/postgres v1.5.11 @@ -100,7 +101,6 @@ require ( golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 // indirect golang.org/x/net v0.33.0 // indirect golang.org/x/oauth2 v0.24.0 // indirect - golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.8.0 // indirect diff --git a/service/aiproxy/model/channel.go b/service/aiproxy/model/channel.go index c9c07bd3bad..401d2d4693a 100644 --- a/service/aiproxy/model/channel.go +++ b/service/aiproxy/model/channel.go @@ -7,7 +7,6 @@ import ( "time" json "github.com/json-iterator/go" - "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/config" "gorm.io/gorm" diff --git a/service/aiproxy/model/option.go b/service/aiproxy/model/option.go index c4c647bc5ba..f3aa912c900 100644 --- a/service/aiproxy/model/option.go +++ b/service/aiproxy/model/option.go @@ -11,7 +11,6 @@ import ( "time" json "github.com/json-iterator/go" - "github.com/labring/sealos/service/aiproxy/common/config" "github.com/labring/sealos/service/aiproxy/common/conv" log "github.com/sirupsen/logrus" diff --git a/service/aiproxy/relay/adaptor/ai360/adaptor.go b/service/aiproxy/relay/adaptor/ai360/adaptor.go index e53cc50b10a..463fdb57a85 100644 --- a/service/aiproxy/relay/adaptor/ai360/adaptor.go +++ b/service/aiproxy/relay/adaptor/ai360/adaptor.go @@ -10,7 +10,7 @@ type Adaptor struct { openai.Adaptor } -const baseURL = "https://ai.360.cn" +const baseURL = "https://ai.360.cn/v1" func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { if meta.Channel.BaseURL == "" { diff --git a/service/aiproxy/relay/adaptor/azure/constants.go b/service/aiproxy/relay/adaptor/azure/constants.go index 257f94a9411..0eab4e89c88 100644 --- a/service/aiproxy/relay/adaptor/azure/constants.go +++ b/service/aiproxy/relay/adaptor/azure/constants.go @@ -40,15 +40,6 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { } } -func GetFullRequestURL(baseURL string, requestURL string) string { - fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL) - - if strings.HasPrefix(baseURL, "https://gateway.ai.cloudflare.com") { - fullRequestURL = fmt.Sprintf("%s%s", baseURL, strings.TrimPrefix(requestURL, "/v1")) - } - return fullRequestURL -} - func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error { token, _, err := getTokenAndAPIVersion(meta.Channel.Key) if err != nil { diff --git a/service/aiproxy/relay/adaptor/baichuan/adaptor.go b/service/aiproxy/relay/adaptor/baichuan/adaptor.go index 9a7a88adbf2..4eba1883fe1 100644 --- a/service/aiproxy/relay/adaptor/baichuan/adaptor.go +++ b/service/aiproxy/relay/adaptor/baichuan/adaptor.go @@ -10,7 +10,7 @@ type Adaptor struct { openai.Adaptor } -const baseURL = "https://api.baichuan-ai.com" +const baseURL = "https://api.baichuan-ai.com/v1" func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { if meta.Channel.BaseURL == "" { diff --git a/service/aiproxy/relay/adaptor/baiduv2/adaptor.go b/service/aiproxy/relay/adaptor/baiduv2/adaptor.go index 8a26c285bd6..d42ab6b67a3 100644 --- a/service/aiproxy/relay/adaptor/baiduv2/adaptor.go +++ b/service/aiproxy/relay/adaptor/baiduv2/adaptor.go @@ -7,14 +7,13 @@ import ( "net/http" "strings" + "github.com/gin-gonic/gin" "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/meta" + relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" "github.com/labring/sealos/service/aiproxy/relay/relaymode" "github.com/labring/sealos/service/aiproxy/relay/utils" - - "github.com/gin-gonic/gin" - relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" ) type Adaptor struct{} diff --git a/service/aiproxy/relay/adaptor/deepseek/adaptor.go b/service/aiproxy/relay/adaptor/deepseek/adaptor.go index 4acb32802b0..a9145bc5829 100644 --- a/service/aiproxy/relay/adaptor/deepseek/adaptor.go +++ b/service/aiproxy/relay/adaptor/deepseek/adaptor.go @@ -13,7 +13,7 @@ type Adaptor struct { openai.Adaptor } -const baseURL = "https://api.deepseek.com" +const baseURL = "https://api.deepseek.com/v1" func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { if meta.Channel.BaseURL == "" { diff --git a/service/aiproxy/relay/adaptor/deepseek/balance.go b/service/aiproxy/relay/adaptor/deepseek/balance.go index 9ba1a95413e..0d990adeb61 100644 --- a/service/aiproxy/relay/adaptor/deepseek/balance.go +++ b/service/aiproxy/relay/adaptor/deepseek/balance.go @@ -7,7 +7,6 @@ import ( "strconv" json "github.com/json-iterator/go" - "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor" ) diff --git a/service/aiproxy/relay/adaptor/geminiopenai/adaptor.go b/service/aiproxy/relay/adaptor/geminiopenai/adaptor.go index 749e5d50727..5e6d5a890ff 100644 --- a/service/aiproxy/relay/adaptor/geminiopenai/adaptor.go +++ b/service/aiproxy/relay/adaptor/geminiopenai/adaptor.go @@ -17,7 +17,6 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { if meta.Channel.BaseURL == "" { meta.Channel.BaseURL = baseURL } - meta.Set(openai.MetaBaseURLNoV1, true) return a.Adaptor.GetRequestURL(meta) } diff --git a/service/aiproxy/relay/adaptor/groq/adaptor.go b/service/aiproxy/relay/adaptor/groq/adaptor.go index 2dfb7380a20..9a7ddc16259 100644 --- a/service/aiproxy/relay/adaptor/groq/adaptor.go +++ b/service/aiproxy/relay/adaptor/groq/adaptor.go @@ -10,7 +10,7 @@ type Adaptor struct { openai.Adaptor } -const baseURL = "https://api.groq.com/openai" +const baseURL = "https://api.groq.com/openai/v1" func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { if meta.Channel.BaseURL == "" { diff --git a/service/aiproxy/relay/adaptor/lingyiwanwu/adaptor.go b/service/aiproxy/relay/adaptor/lingyiwanwu/adaptor.go index d271ba594de..315c0ad15ed 100644 --- a/service/aiproxy/relay/adaptor/lingyiwanwu/adaptor.go +++ b/service/aiproxy/relay/adaptor/lingyiwanwu/adaptor.go @@ -10,7 +10,7 @@ type Adaptor struct { openai.Adaptor } -const baseURL = "https://api.lingyiwanwu.com" +const baseURL = "https://api.lingyiwanwu.com/v1" func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { if meta.Channel.BaseURL == "" { diff --git a/service/aiproxy/relay/adaptor/minimax/adaptor.go b/service/aiproxy/relay/adaptor/minimax/adaptor.go index 40050cc101c..7de3c8d3432 100644 --- a/service/aiproxy/relay/adaptor/minimax/adaptor.go +++ b/service/aiproxy/relay/adaptor/minimax/adaptor.go @@ -17,7 +17,7 @@ type Adaptor struct { openai.Adaptor } -const baseURL = "https://api.minimax.chat" +const baseURL = "https://api.minimax.chat/v1" func (a *Adaptor) GetModelList() []*model.ModelConfig { return ModelList @@ -42,11 +42,11 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { } switch meta.Mode { case relaymode.ChatCompletions: - return meta.Channel.BaseURL + "/v1/text/chatcompletion_v2", nil + return meta.Channel.BaseURL + "/text/chatcompletion_v2", nil case relaymode.Embeddings: - return fmt.Sprintf("%s/v1/embeddings?GroupId=%s", meta.Channel.BaseURL, groupID), nil + return fmt.Sprintf("%s/embeddings?GroupId=%s", meta.Channel.BaseURL, groupID), nil case relaymode.AudioSpeech: - return fmt.Sprintf("%s/v1/t2a_v2?GroupId=%s", meta.Channel.BaseURL, groupID), nil + return fmt.Sprintf("%s/t2a_v2?GroupId=%s", meta.Channel.BaseURL, groupID), nil default: return a.Adaptor.GetRequestURL(meta) } diff --git a/service/aiproxy/relay/adaptor/mistral/adaptor.go b/service/aiproxy/relay/adaptor/mistral/adaptor.go index f27bcff71f9..db34757fd3a 100644 --- a/service/aiproxy/relay/adaptor/mistral/adaptor.go +++ b/service/aiproxy/relay/adaptor/mistral/adaptor.go @@ -10,7 +10,7 @@ type Adaptor struct { openai.Adaptor } -const baseURL = "https://api.mistral.ai" +const baseURL = "https://api.mistral.ai/v1" func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { if meta.Channel.BaseURL == "" { diff --git a/service/aiproxy/relay/adaptor/moonshot/adaptor.go b/service/aiproxy/relay/adaptor/moonshot/adaptor.go index 0ad45bf456d..fdf5c9ab31e 100644 --- a/service/aiproxy/relay/adaptor/moonshot/adaptor.go +++ b/service/aiproxy/relay/adaptor/moonshot/adaptor.go @@ -10,7 +10,7 @@ type Adaptor struct { openai.Adaptor } -const baseURL = "https://api.moonshot.cn" +const baseURL = "https://api.moonshot.cn/v1" func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { if meta.Channel.BaseURL == "" { diff --git a/service/aiproxy/relay/adaptor/openai/adaptor.go b/service/aiproxy/relay/adaptor/openai/adaptor.go index 8ff26c67703..ff35c9a3e48 100644 --- a/service/aiproxy/relay/adaptor/openai/adaptor.go +++ b/service/aiproxy/relay/adaptor/openai/adaptor.go @@ -3,10 +3,8 @@ package openai import ( "bytes" "errors" - "fmt" "io" "net/http" - "strings" "github.com/gin-gonic/gin" json "github.com/json-iterator/go" @@ -23,9 +21,7 @@ var _ adaptor.Adaptor = (*Adaptor)(nil) type Adaptor struct{} -const baseURL = "https://api.openai.com" - -const MetaBaseURLNoV1 = "base_url_no_v1" +const baseURL = "https://api.openai.com/v1" func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { u := meta.Channel.BaseURL @@ -59,11 +55,7 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { return "", errors.New("unsupported mode") } - if meta.GetBool(MetaBaseURLNoV1) || - (strings.HasPrefix(u, "https://gateway.ai.cloudflare.com") && strings.HasSuffix(u, "/openai")) { - return u + path, nil - } - return fmt.Sprintf("%s/v1%s", u, path), nil + return u + path, nil } func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error { diff --git a/service/aiproxy/relay/adaptor/openai/main.go b/service/aiproxy/relay/adaptor/openai/main.go index 1882a153bdc..a2694bfdedc 100644 --- a/service/aiproxy/relay/adaptor/openai/main.go +++ b/service/aiproxy/relay/adaptor/openai/main.go @@ -6,15 +6,13 @@ import ( "net/http" "strings" + "github.com/gin-gonic/gin" json "github.com/json-iterator/go" - + "github.com/labring/sealos/service/aiproxy/common" + "github.com/labring/sealos/service/aiproxy/common/conv" "github.com/labring/sealos/service/aiproxy/common/render" "github.com/labring/sealos/service/aiproxy/common/splitter" "github.com/labring/sealos/service/aiproxy/middleware" - - "github.com/gin-gonic/gin" - "github.com/labring/sealos/service/aiproxy/common" - "github.com/labring/sealos/service/aiproxy/common/conv" "github.com/labring/sealos/service/aiproxy/relay/meta" "github.com/labring/sealos/service/aiproxy/relay/model" "github.com/labring/sealos/service/aiproxy/relay/relaymode" diff --git a/service/aiproxy/relay/adaptor/siliconflow/adaptor.go b/service/aiproxy/relay/adaptor/siliconflow/adaptor.go index 002aa9fb7d8..5c890c79049 100644 --- a/service/aiproxy/relay/adaptor/siliconflow/adaptor.go +++ b/service/aiproxy/relay/adaptor/siliconflow/adaptor.go @@ -19,7 +19,7 @@ type Adaptor struct { openai.Adaptor } -const baseURL = "https://api.siliconflow.cn" +const baseURL = "https://api.siliconflow.cn/v1" func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { if meta.Channel.BaseURL == "" { diff --git a/service/aiproxy/relay/adaptor/siliconflow/image.go b/service/aiproxy/relay/adaptor/siliconflow/image.go index 1992fccc6ba..517b1ac67c6 100644 --- a/service/aiproxy/relay/adaptor/siliconflow/image.go +++ b/service/aiproxy/relay/adaptor/siliconflow/image.go @@ -6,7 +6,6 @@ import ( "net/http" json "github.com/json-iterator/go" - "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" "github.com/labring/sealos/service/aiproxy/relay/meta" diff --git a/service/aiproxy/relay/adaptor/stepfun/adaptor.go b/service/aiproxy/relay/adaptor/stepfun/adaptor.go index 6359e3d95f5..e57c5c59288 100644 --- a/service/aiproxy/relay/adaptor/stepfun/adaptor.go +++ b/service/aiproxy/relay/adaptor/stepfun/adaptor.go @@ -10,7 +10,7 @@ type Adaptor struct { openai.Adaptor } -const baseURL = "https://api.stepfun.com" +const baseURL = "https://api.stepfun.com/v1" func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { if meta.Channel.BaseURL == "" { diff --git a/service/aiproxy/relay/adaptor/tencent/adaptor.go b/service/aiproxy/relay/adaptor/tencent/adaptor.go index 0b032d03240..030c003fb52 100644 --- a/service/aiproxy/relay/adaptor/tencent/adaptor.go +++ b/service/aiproxy/relay/adaptor/tencent/adaptor.go @@ -12,7 +12,7 @@ type Adaptor struct { openai.Adaptor } -const baseURL = "https://api.hunyuan.cloud.tencent.com" +const baseURL = "https://api.hunyuan.cloud.tencent.com/v1" func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { if meta.Channel.BaseURL == "" { diff --git a/service/aiproxy/relay/adaptor/togetherai/constants.go b/service/aiproxy/relay/adaptor/togetherai/constants.go deleted file mode 100644 index dd3db44f496..00000000000 --- a/service/aiproxy/relay/adaptor/togetherai/constants.go +++ /dev/null @@ -1,40 +0,0 @@ -package togetherai - -import ( - "github.com/labring/sealos/service/aiproxy/model" - "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/relaymode" -) - -// https://docs.together.ai/docs/inference-models - -var ModelList = []*model.ModelConfig{ - { - Model: "meta-llama/Llama-3-70b-chat-hf", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerMeta, - }, - { - Model: "deepseek-ai/deepseek-coder-33b-instruct", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerDeepSeek, - }, - { - Model: "mistralai/Mixtral-8x22B-Instruct-v0.1", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerMistral, - }, - { - Model: "Qwen/Qwen1.5-72B-Chat", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerAlibaba, - }, -} - -type Adaptor struct { - openai.Adaptor -} - -func (a *Adaptor) GetModelList() []*model.ModelConfig { - return ModelList -} diff --git a/service/aiproxy/relay/adaptor/vertexai/token.go b/service/aiproxy/relay/adaptor/vertexai/token.go index 77b64ba9db3..1f51295e7b9 100644 --- a/service/aiproxy/relay/adaptor/vertexai/token.go +++ b/service/aiproxy/relay/adaptor/vertexai/token.go @@ -5,11 +5,10 @@ import ( "fmt" "time" - json "github.com/json-iterator/go" - "github.com/labring/sealos/service/aiproxy/common/conv" - credentials "cloud.google.com/go/iam/credentials/apiv1" "cloud.google.com/go/iam/credentials/apiv1/credentialspb" + json "github.com/json-iterator/go" + "github.com/labring/sealos/service/aiproxy/common/conv" "github.com/patrickmn/go-cache" "google.golang.org/api/option" ) diff --git a/service/aiproxy/relay/adaptor/xunfei/adaptor.go b/service/aiproxy/relay/adaptor/xunfei/adaptor.go index 0309a92f258..8d65d475c5e 100644 --- a/service/aiproxy/relay/adaptor/xunfei/adaptor.go +++ b/service/aiproxy/relay/adaptor/xunfei/adaptor.go @@ -13,7 +13,7 @@ type Adaptor struct { openai.Adaptor } -const baseURL = "https://spark-api-open.xf-yun.com" +const baseURL = "https://spark-api-open.xf-yun.com/v1" func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { if meta.Channel.BaseURL == "" { diff --git a/service/aiproxy/relay/adaptor/zhipu/adaptor.go b/service/aiproxy/relay/adaptor/zhipu/adaptor.go index 719e1791afe..c14a67b623f 100644 --- a/service/aiproxy/relay/adaptor/zhipu/adaptor.go +++ b/service/aiproxy/relay/adaptor/zhipu/adaptor.go @@ -1,7 +1,6 @@ package zhipu import ( - "errors" "net/http" "github.com/gin-gonic/gin" @@ -16,23 +15,13 @@ type Adaptor struct { openai.Adaptor } -const baseURL = "https://open.bigmodel.cn" +const baseURL = "https://open.bigmodel.cn/api/paas/v4" func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - u := meta.Channel.BaseURL - if u == "" { - u = baseURL - } - switch meta.Mode { - case relaymode.ImagesGenerations: - return u + "/api/paas/v4/images/generations", nil - case relaymode.Embeddings: - return u + "/api/paas/v4/embeddings", nil - case relaymode.ChatCompletions: - return u + "/api/paas/v4/chat/completions", nil - default: - return "", errors.New("unsupported mode") + if meta.Channel.BaseURL == "" { + meta.Channel.BaseURL = baseURL } + return a.Adaptor.GetRequestURL(meta) } func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode) { diff --git a/service/aiproxy/relay/controller/dohelper.go b/service/aiproxy/relay/controller/dohelper.go index e6caf65dd8d..147afd5eb74 100644 --- a/service/aiproxy/relay/controller/dohelper.go +++ b/service/aiproxy/relay/controller/dohelper.go @@ -158,6 +158,8 @@ func prepareAndDoRequest(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*h return nil, openai.ErrorWrapperWithMessage("get request url failed: "+err.Error(), "get_request_url_failed", http.StatusBadRequest) } + log.Debugf("request url: %s %s", method, fullRequestURL) + ctx := context.Background() if timeout := config.GetTimeoutWithModelType()[meta.Mode]; timeout > 0 { // donot use c.Request.Context() because it will be canceled by the client diff --git a/service/aiproxy/relay/controller/handle.go b/service/aiproxy/relay/controller/handle.go index f1a5ff97fd0..1b732ad22f2 100644 --- a/service/aiproxy/relay/controller/handle.go +++ b/service/aiproxy/relay/controller/handle.go @@ -51,7 +51,7 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa ) } - if groupRemainBalance <= 0 { + if !meta.IsChannelTest && groupRemainBalance <= 0 { return openai.ErrorWrapperWithMessage("group balance not enough", "insufficient_group_balance", http.StatusForbidden) } From 0dd9742ad9bbc561fbd92b66484236707953fb75 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sun, 9 Feb 2025 02:24:25 +0800 Subject: [PATCH 132/167] feat: limit detail record size --- service/aiproxy/common/trunc.go | 31 +++++++++++++++++++ service/aiproxy/controller/channel-test.go | 9 +++++- service/aiproxy/model/log.go | 28 +++++++++++++++++ service/aiproxy/relay/controller/dohelper.go | 32 +++++--------------- 4 files changed, 75 insertions(+), 25 deletions(-) create mode 100644 service/aiproxy/common/trunc.go diff --git a/service/aiproxy/common/trunc.go b/service/aiproxy/common/trunc.go new file mode 100644 index 00000000000..e4d85bfe897 --- /dev/null +++ b/service/aiproxy/common/trunc.go @@ -0,0 +1,31 @@ +package common + +import ( + "unicode/utf8" + + "github.com/labring/sealos/service/aiproxy/common/conv" +) + +func TruncateByRune(s string, length int) string { + total := 0 + for _, r := range s { + runeLen := utf8.RuneLen(r) + if runeLen == -1 || total+runeLen > length { + return s[:total] + } + total += runeLen + } + return s[:total] +} + +func TruncateBytesByRune(b []byte, length int) []byte { + total := 0 + for _, r := range conv.BytesToString(b) { + runeLen := utf8.RuneLen(r) + if runeLen == -1 || total+runeLen > length { + return b[:total] + } + total += runeLen + } + return b[:total] +} diff --git a/service/aiproxy/controller/channel-test.go b/service/aiproxy/controller/channel-test.go index 7ae968d9769..c7656edf639 100644 --- a/service/aiproxy/controller/channel-test.go +++ b/service/aiproxy/controller/channel-test.go @@ -22,6 +22,7 @@ import ( "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/monitor" "github.com/labring/sealos/service/aiproxy/relay/meta" + "github.com/labring/sealos/service/aiproxy/relay/relaymode" "github.com/labring/sealos/service/aiproxy/relay/utils" log "github.com/sirupsen/logrus" ) @@ -65,7 +66,13 @@ func testSingleModel(mc *model.ModelCaches, channel *model.Channel, modelName st var respStr string var code int if success { - respStr = w.Body.String() + switch meta.Mode { + case relaymode.AudioSpeech, + relaymode.ImagesGenerations: + respStr = "" + default: + respStr = w.Body.String() + } code = w.Code } else { respStr = bizErr.Error.JSONOrEmpty() diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index a1fc1a7cbe3..b2a29632c62 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -17,6 +17,11 @@ import ( "gorm.io/gorm" ) +const ( + requestBodyMaxSize = 128 * 1024 // 128KB + responseBodyMaxSize = 128 * 1024 // 128KB +) + type RequestDetail struct { CreatedAt time.Time `gorm:"autoCreateTime" json:"-"` RequestBody string `gorm:"type:text" json:"request_body,omitempty"` @@ -27,6 +32,18 @@ type RequestDetail struct { LogID int `json:"log_id"` } +func (d *RequestDetail) BeforeSave(tx *gorm.DB) (err error) { + if len(d.RequestBody) > requestBodyMaxSize { + d.RequestBody = common.TruncateByRune(d.RequestBody, requestBodyMaxSize) + "..." + d.RequestBodyTruncated = true + } + if len(d.ResponseBody) > responseBodyMaxSize { + d.ResponseBody = common.TruncateByRune(d.ResponseBody, responseBodyMaxSize) + "..." + d.ResponseBodyTruncated = true + } + return +} + type Log struct { RequestDetail *RequestDetail `gorm:"foreignKey:LogID;constraint:OnUpdate:CASCADE,OnDelete:CASCADE;" json:"request_detail,omitempty"` RequestAt time.Time `gorm:"index;index:idx_request_at_group_id,priority:2;index:idx_group_reqat_token,priority:2" json:"request_at"` @@ -50,6 +67,17 @@ type Log struct { IP string `json:"ip"` } +const ( + contentMaxSize = 2 * 1024 // 2KB +) + +func (l *Log) BeforeSave(tx *gorm.DB) (err error) { + if len(l.Content) > contentMaxSize { + l.Content = common.TruncateByRune(l.Content, contentMaxSize) + "..." + } + return +} + func (l *Log) MarshalJSON() ([]byte, error) { type Alias Log return json.Marshal(&struct { diff --git a/service/aiproxy/relay/controller/dohelper.go b/service/aiproxy/relay/controller/dohelper.go index 147afd5eb74..884f40469b0 100644 --- a/service/aiproxy/relay/controller/dohelper.go +++ b/service/aiproxy/relay/controller/dohelper.go @@ -31,43 +31,36 @@ func isErrorHappened(resp *http.Response) bool { } const ( - storeResponseBodyMaxSize = 3 * 1024 * 1024 // 3MB + // 0.5MB + maxBufferSize = 512 * 1024 ) type responseWriter struct { gin.ResponseWriter - body *bytes.Buffer - truncated bool + body *bytes.Buffer } func (rw *responseWriter) Write(b []byte) (int, error) { - if rw.body.Len() <= storeResponseBodyMaxSize { + if total := rw.body.Len() + len(b); total <= maxBufferSize { rw.body.Write(b) } else { - rw.truncated = true + rw.body.Write(b[:maxBufferSize-rw.body.Len()]) } return rw.ResponseWriter.Write(b) } func (rw *responseWriter) WriteString(s string) (int, error) { - if rw.body.Len() <= storeResponseBodyMaxSize { + if total := rw.body.Len() + len(s); total <= maxBufferSize { rw.body.WriteString(s) } else { - rw.truncated = true + rw.body.WriteString(s[:maxBufferSize-rw.body.Len()]) } return rw.ResponseWriter.WriteString(s) } -const ( - // 0.5MB - defaultBufferSize = 512 * 1024 - // 2MB - maxBufferSize = 2 * 1024 * 1024 -) - var bufferPool = sync.Pool{ New: func() interface{} { - return bytes.NewBuffer(make([]byte, 0, defaultBufferSize)) + return bytes.NewBuffer(make([]byte, 0, maxBufferSize)) }, } @@ -125,10 +118,6 @@ func DoHelper( return usage, &detail, nil } -const ( - requestBodyMaxSize = 2 * 1024 * 1024 // 2MB -) - func getRequestBody(meta *meta.Meta, c *gin.Context, detail *model.RequestDetail) *relaymodel.ErrorWithStatusCode { switch meta.Mode { case relaymode.AudioTranscription, relaymode.AudioTranslation: @@ -138,10 +127,6 @@ func getRequestBody(meta *meta.Meta, c *gin.Context, detail *model.RequestDetail if err != nil { return openai.ErrorWrapperWithMessage("get request body failed: "+err.Error(), "get_request_body_failed", http.StatusBadRequest) } - if len(reqBody) > requestBodyMaxSize { - reqBody = reqBody[:requestBodyMaxSize] - detail.RequestBodyTruncated = true - } detail.RequestBody = conv.BytesToString(reqBody) return nil } @@ -227,7 +212,6 @@ func handleSuccessResponse(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta, r // copy body buffer // do not use bytes conv detail.ResponseBody = rw.body.String() - detail.ResponseBodyTruncated = rw.truncated return usage, relayErr } From daaf2f13185e8db28e45e79dac960fab6df0d6ef Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sun, 9 Feb 2025 20:03:55 +0800 Subject: [PATCH 133/167] feat: split think config --- service/aiproxy/controller/channel.go | 1 + service/aiproxy/model/channel.go | 15 +++----- service/aiproxy/model/log.go | 4 +- .../aiproxy/relay/adaptor/baiduv2/adaptor.go | 3 -- .../aiproxy/relay/adaptor/openai/adaptor.go | 4 +- service/aiproxy/relay/adaptor/openai/main.go | 38 ++++++++++++++++--- .../relay/adaptor/openaithink/adaptor.go | 26 ------------- service/aiproxy/relay/channeltype/define.go | 2 - service/aiproxy/relay/meta/meta.go | 12 +++--- 9 files changed, 49 insertions(+), 56 deletions(-) delete mode 100644 service/aiproxy/relay/adaptor/openaithink/adaptor.go diff --git a/service/aiproxy/controller/channel.go b/service/aiproxy/controller/channel.go index 644e54551bf..d3dca68dd72 100644 --- a/service/aiproxy/controller/channel.go +++ b/service/aiproxy/controller/channel.go @@ -160,6 +160,7 @@ func (r *AddChannelRequest) ToChannel() (*model.Channel, error) { ModelMapping: maps.Clone(r.ModelMapping), Priority: r.Priority, Status: r.Status, + Config: r.Config, }, nil } diff --git a/service/aiproxy/model/channel.go b/service/aiproxy/model/channel.go index 401d2d4693a..612460ca518 100644 --- a/service/aiproxy/model/channel.go +++ b/service/aiproxy/model/channel.go @@ -24,6 +24,10 @@ const ( ChannelStatusFail = 3 ) +type ChannelConfig struct { + SplitThink bool `json:"split_think"` +} + type Channel struct { CreatedAt time.Time `gorm:"index" json:"created_at"` LastTestErrorAt time.Time `json:"last_test_error_at"` @@ -41,6 +45,7 @@ type Channel struct { Status int `gorm:"default:1;index" json:"status"` Type int `gorm:"default:0;index" json:"type"` Priority int32 `json:"priority"` + Config ChannelConfig `gorm:"serializer:fastjson;type:text" json:"config"` } func (c *Channel) BeforeDelete(tx *gorm.DB) (err error) { @@ -130,16 +135,6 @@ func getChannelOrder(order string) string { } } -type ChannelConfig struct { - Region string `json:"region,omitempty"` - SK string `json:"sk,omitempty"` - AK string `json:"ak,omitempty"` - UserID string `json:"user_id,omitempty"` - Plugin string `json:"plugin,omitempty"` - VertexAIProjectID string `json:"vertex_ai_project_id,omitempty"` - VertexAIADC string `json:"vertex_ai_adc,omitempty"` -} - func GetAllChannels() (channels []*Channel, err error) { tx := DB.Model(&Channel{}) err = tx.Order("id desc").Find(&channels).Error diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index b2a29632c62..6b3be10e2c5 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -32,7 +32,7 @@ type RequestDetail struct { LogID int `json:"log_id"` } -func (d *RequestDetail) BeforeSave(tx *gorm.DB) (err error) { +func (d *RequestDetail) BeforeSave(_ *gorm.DB) (err error) { if len(d.RequestBody) > requestBodyMaxSize { d.RequestBody = common.TruncateByRune(d.RequestBody, requestBodyMaxSize) + "..." d.RequestBodyTruncated = true @@ -71,7 +71,7 @@ const ( contentMaxSize = 2 * 1024 // 2KB ) -func (l *Log) BeforeSave(tx *gorm.DB) (err error) { +func (l *Log) BeforeSave(_ *gorm.DB) (err error) { if len(l.Content) > contentMaxSize { l.Content = common.TruncateByRune(l.Content, contentMaxSize) + "..." } diff --git a/service/aiproxy/relay/adaptor/baiduv2/adaptor.go b/service/aiproxy/relay/adaptor/baiduv2/adaptor.go index d42ab6b67a3..1345b10176c 100644 --- a/service/aiproxy/relay/adaptor/baiduv2/adaptor.go +++ b/service/aiproxy/relay/adaptor/baiduv2/adaptor.go @@ -66,9 +66,6 @@ func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, ht meta.ActualModel = v2Model defer func() { meta.ActualModel = actModel }() } - if meta.ActualModel == "deepseek-r1" { - meta.Set(openai.SplitThinkMetaKey, true) - } return openai.ConvertRequest(meta, req) default: return "", nil, nil, fmt.Errorf("unsupported mode: %d", meta.Mode) diff --git a/service/aiproxy/relay/adaptor/openai/adaptor.go b/service/aiproxy/relay/adaptor/openai/adaptor.go index ff35c9a3e48..74cf6297f29 100644 --- a/service/aiproxy/relay/adaptor/openai/adaptor.go +++ b/service/aiproxy/relay/adaptor/openai/adaptor.go @@ -92,8 +92,6 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io } } -const SplitThinkMetaKey = "split_think" - func DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode) { switch meta.Mode { case relaymode.ImagesGenerations: @@ -110,7 +108,7 @@ func DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *re fallthrough case relaymode.ChatCompletions: if utils.IsStreamResponse(resp) { - usage, err = StreamHandler(meta, c, resp, meta.GetBool(SplitThinkMetaKey)) + usage, err = StreamHandler(meta, c, resp) } else { usage, err = Handler(meta, c, resp) } diff --git a/service/aiproxy/relay/adaptor/openai/main.go b/service/aiproxy/relay/adaptor/openai/main.go index a2694bfdedc..96118730202 100644 --- a/service/aiproxy/relay/adaptor/openai/main.go +++ b/service/aiproxy/relay/adaptor/openai/main.go @@ -31,7 +31,7 @@ type UsageAndChoicesResponse struct { Choices []*ChatCompletionsStreamResponseChoice } -func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response, splitThink bool) (*model.Usage, *model.ErrorWithStatusCode) { +func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode) { defer resp.Body.Close() log := middleware.GetLogger(c) @@ -46,7 +46,7 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response, splitTh hasReasoningContent := false var thinkSplitter *splitter.Splitter - if splitThink { + if meta.ChannelConfig.SplitThink { thinkSplitter = splitter.NewThinkSplitter() } @@ -88,8 +88,8 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response, splitTh if _, ok := respMap["model"]; ok && meta.OriginModel != "" { respMap["model"] = meta.OriginModel } - if splitThink && !hasReasoningContent { - SplitThink(respMap, thinkSplitter, func(data map[string]any) { + if meta.ChannelConfig.SplitThink && !hasReasoningContent { + StreamSplitThink(respMap, thinkSplitter, func(data map[string]any) { _ = render.ObjectData(c, data) }) continue @@ -128,7 +128,7 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response, splitTh } // renderCallback maybe reuse data, so don't modify data -func SplitThink(data map[string]any, thinkSplitter *splitter.Splitter, renderCallback func(data map[string]any)) { +func StreamSplitThink(data map[string]any, thinkSplitter *splitter.Splitter, renderCallback func(data map[string]any)) { choices, ok := data["choices"].([]any) if !ok { return @@ -160,6 +160,30 @@ func SplitThink(data map[string]any, thinkSplitter *splitter.Splitter, renderCal } } +func SplitThink(data map[string]any) { + choices, ok := data["choices"].([]any) + if !ok { + return + } + for _, choice := range choices { + choiceMap, ok := choice.(map[string]any) + if !ok { + continue + } + delta, ok := choiceMap["delta"].(map[string]any) + if !ok { + continue + } + content, ok := delta["content"].(string) + if !ok { + continue + } + think, remaining := splitter.NewThinkSplitter().Process(conv.StringToBytes(content)) + delta["reasoning_content"] = conv.BytesToString(think) + delta["content"] = conv.BytesToString(remaining) + } +} + func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode) { defer resp.Body.Close() @@ -200,6 +224,10 @@ func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage respMap["model"] = meta.OriginModel } + if meta.ChannelConfig.SplitThink { + SplitThink(respMap) + } + newData, err := stdjson.Marshal(respMap) if err != nil { return &textResponse.Usage, ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError) diff --git a/service/aiproxy/relay/adaptor/openaithink/adaptor.go b/service/aiproxy/relay/adaptor/openaithink/adaptor.go deleted file mode 100644 index 7c16df46070..00000000000 --- a/service/aiproxy/relay/adaptor/openaithink/adaptor.go +++ /dev/null @@ -1,26 +0,0 @@ -package openaithink - -import ( - "net/http" - - "github.com/gin-gonic/gin" - "github.com/labring/sealos/service/aiproxy/relay/adaptor" - "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/meta" - "github.com/labring/sealos/service/aiproxy/relay/model" -) - -var _ adaptor.Adaptor = (*Adaptor)(nil) - -type Adaptor struct { - openai.Adaptor -} - -func (a *Adaptor) GetChannelName() string { - return "openai-think" -} - -func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode) { - meta.Set(openai.SplitThinkMetaKey, true) - return a.Adaptor.DoResponse(meta, c, resp) -} diff --git a/service/aiproxy/relay/channeltype/define.go b/service/aiproxy/relay/channeltype/define.go index 6f8e613276f..4a41be3e855 100644 --- a/service/aiproxy/relay/channeltype/define.go +++ b/service/aiproxy/relay/channeltype/define.go @@ -26,7 +26,6 @@ import ( "github.com/labring/sealos/service/aiproxy/relay/adaptor/novita" "github.com/labring/sealos/service/aiproxy/relay/adaptor/ollama" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/adaptor/openaithink" "github.com/labring/sealos/service/aiproxy/relay/adaptor/siliconflow" "github.com/labring/sealos/service/aiproxy/relay/adaptor/stepfun" "github.com/labring/sealos/service/aiproxy/relay/adaptor/tencent" @@ -37,7 +36,6 @@ import ( var ChannelAdaptor = map[int]adaptor.Adaptor{ 1: &openai.Adaptor{}, - 2: &openaithink.Adaptor{}, 3: &azure.Adaptor{}, 12: &geminiopenai.Adaptor{}, 13: &baiduv2.Adaptor{}, diff --git a/service/aiproxy/relay/meta/meta.go b/service/aiproxy/relay/meta/meta.go index 9787c0e43c0..a707af773d4 100644 --- a/service/aiproxy/relay/meta/meta.go +++ b/service/aiproxy/relay/meta/meta.go @@ -16,11 +16,12 @@ type ChannelMeta struct { } type Meta struct { - values map[string]any - Channel *ChannelMeta - Group *model.GroupCache - Token *model.TokenCache - ModelConfig *model.ModelConfig + values map[string]any + Channel *ChannelMeta + ChannelConfig model.ChannelConfig + Group *model.GroupCache + Token *model.TokenCache + ModelConfig *model.ModelConfig Endpoint string RequestAt time.Time @@ -105,6 +106,7 @@ func (m *Meta) Reset(channel *model.Channel) { ID: channel.ID, Type: channel.Type, } + m.ChannelConfig = channel.Config m.ActualModel, _ = GetMappedModelName(m.OriginModel, channel.ModelMapping) m.ClearValues() } From 0a4e7b6cbdcfb4fd4bd0d0b19883b65e206c151e Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Sun, 9 Feb 2025 21:52:09 +0800 Subject: [PATCH 134/167] feat: channel default priority --- service/aiproxy/model/cache.go | 6 +++--- service/aiproxy/model/channel.go | 11 +++++++++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/service/aiproxy/model/cache.go b/service/aiproxy/model/cache.go index acad8ed56a6..927509c31d5 100644 --- a/service/aiproxy/model/cache.go +++ b/service/aiproxy/model/cache.go @@ -610,7 +610,7 @@ func buildModelToChannelsMap(channels []*Channel) map[string][]*Channel { func sortChannelsByPriority(modelMap map[string][]*Channel) { for _, channels := range modelMap { sort.Slice(channels, func(i, j int) bool { - return channels[i].Priority > channels[j].Priority + return channels[i].GetPriority() > channels[j].GetPriority() }) } } @@ -743,7 +743,7 @@ func (c *ModelCaches) GetRandomSatisfiedChannel(model string, ignoreChannel ...i var totalWeight int32 for _, ch := range channels { - totalWeight += ch.Priority + totalWeight += ch.GetPriority() } if totalWeight == 0 { @@ -752,7 +752,7 @@ func (c *ModelCaches) GetRandomSatisfiedChannel(model string, ignoreChannel ...i r := rand.Int32N(totalWeight) for _, ch := range channels { - r -= ch.Priority + r -= ch.GetPriority() if r < 0 { return ch, nil } diff --git a/service/aiproxy/model/channel.go b/service/aiproxy/model/channel.go index 612460ca518..277f3c25ed4 100644 --- a/service/aiproxy/model/channel.go +++ b/service/aiproxy/model/channel.go @@ -52,6 +52,17 @@ func (c *Channel) BeforeDelete(tx *gorm.DB) (err error) { return tx.Model(&ChannelTest{}).Where("channel_id = ?", c.ID).Delete(&ChannelTest{}).Error } +const ( + DefaultPriority = 100 +) + +func (c *Channel) GetPriority() int32 { + if c.Priority == 0 { + return DefaultPriority + } + return c.Priority +} + func GetModelConfigWithModels(models []string) ([]string, []string, error) { if len(models) == 0 || config.GetDisableModelConfig() { return models, nil, nil From 30896c6809745c7b69d1c41870a665a2b8d6c908 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 10 Feb 2025 10:16:11 +0800 Subject: [PATCH 135/167] fix: rate limit message --- service/aiproxy/middleware/distributor.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 34ed28b66b3..3d522b35874 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -1,6 +1,7 @@ package middleware import ( + "errors" "fmt" "net/http" "slices" @@ -76,6 +77,11 @@ func GetGroupAdjustedModelConfig(group *model.GroupCache, mc *model.ModelConfig) return mc } +var ( + ErrRequestRateLimitExceeded = errors.New("request rate limit exceeded, please try again later") + ErrRequestTpmLimitExceeded = errors.New("request tpm limit exceeded, please try again later") +) + func checkGroupModelRPMAndTPM(c *gin.Context, group *model.GroupCache, mc *model.ModelConfig) error { adjustedModelConfig := GetGroupAdjustedModelConfig(group, mc) @@ -88,7 +94,7 @@ func checkGroupModelRPMAndTPM(c *gin.Context, group *model.GroupCache, mc *model time.Minute, ) if !ok { - return fmt.Errorf("group (%s) is requesting too frequently", group.ID) + return ErrRequestRateLimitExceeded } } else if common.RedisEnabled { _, err := rpmlimit.PushRequest(c.Request.Context(), group.ID, mc.Model, time.Minute) @@ -106,7 +112,7 @@ func checkGroupModelRPMAndTPM(c *gin.Context, group *model.GroupCache, mc *model } if tpm >= adjustedModelConfig.TPM { - return fmt.Errorf("group (%s) tpm is too high", group.ID) + return ErrRequestTpmLimitExceeded } } return nil From 7a99c802c2c95d8d7773b97854d4d01678177609 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 10 Feb 2025 11:11:49 +0800 Subject: [PATCH 136/167] feat: channel meta api --- service/aiproxy/controller/channel.go | 4 ++++ .../aiproxy/relay/adaptor/ai360/adaptor.go | 8 ++----- service/aiproxy/relay/adaptor/ali/adaptor.go | 4 ++++ .../relay/adaptor/anthropic/adaptor.go | 12 +++++----- service/aiproxy/relay/adaptor/aws/adaptor.go | 4 ++++ service/aiproxy/relay/adaptor/aws/key.go | 4 ++++ .../aiproxy/relay/adaptor/azure/constants.go | 4 ++++ service/aiproxy/relay/adaptor/azure/key.go | 4 ++++ .../aiproxy/relay/adaptor/baichuan/adaptor.go | 8 ++----- .../aiproxy/relay/adaptor/baidu/adaptor.go | 4 ++++ .../aiproxy/relay/adaptor/baiduv2/adaptor.go | 12 +++++----- .../relay/adaptor/cloudflare/adaptor.go | 7 +++--- .../aiproxy/relay/adaptor/cohere/adaptor.go | 10 ++++---- service/aiproxy/relay/adaptor/coze/adaptor.go | 10 ++++---- service/aiproxy/relay/adaptor/coze/key.go | 4 ++++ .../aiproxy/relay/adaptor/deepseek/adaptor.go | 8 ++----- service/aiproxy/relay/adaptor/doubao/main.go | 7 +++--- .../aiproxy/relay/adaptor/doubaoaudio/key.go | 4 ++++ .../aiproxy/relay/adaptor/doubaoaudio/main.go | 7 +++--- .../aiproxy/relay/adaptor/gemini/adaptor.go | 4 ++++ .../relay/adaptor/geminiopenai/adaptor.go | 8 ++----- service/aiproxy/relay/adaptor/groq/adaptor.go | 8 ++----- service/aiproxy/relay/adaptor/interface.go | 2 ++ .../relay/adaptor/lingyiwanwu/adaptor.go | 8 ++----- .../aiproxy/relay/adaptor/minimax/adaptor.go | 7 +++--- service/aiproxy/relay/adaptor/minimax/key.go | 4 ++++ .../aiproxy/relay/adaptor/mistral/adaptor.go | 8 ++----- .../aiproxy/relay/adaptor/moonshot/adaptor.go | 8 ++----- .../aiproxy/relay/adaptor/novita/adaptor.go | 23 ++++--------------- .../aiproxy/relay/adaptor/ollama/adaptor.go | 7 +++--- .../aiproxy/relay/adaptor/openai/adaptor.go | 7 +++--- .../relay/adaptor/siliconflow/adaptor.go | 12 ++-------- .../aiproxy/relay/adaptor/stepfun/adaptor.go | 8 ++----- .../aiproxy/relay/adaptor/tencent/adaptor.go | 8 ++----- .../aiproxy/relay/adaptor/vertexai/adaptor.go | 4 ++++ service/aiproxy/relay/adaptor/vertexai/key.go | 4 ++++ .../aiproxy/relay/adaptor/xunfei/adaptor.go | 11 ++++----- service/aiproxy/relay/adaptor/xunfei/key.go | 4 ++++ .../aiproxy/relay/adaptor/zhipu/adaptor.go | 7 ++---- service/aiproxy/relay/channeltype/define.go | 23 ++++++++++++++++++- service/aiproxy/relay/controller/dohelper.go | 4 ++++ service/aiproxy/router/api.go | 1 + 42 files changed, 163 insertions(+), 142 deletions(-) diff --git a/service/aiproxy/controller/channel.go b/service/aiproxy/controller/channel.go index d3dca68dd72..29108bc2cf4 100644 --- a/service/aiproxy/controller/channel.go +++ b/service/aiproxy/controller/channel.go @@ -21,6 +21,10 @@ func ChannelTypeNames(c *gin.Context) { middleware.SuccessResponse(c, channeltype.ChannelNames) } +func ChannelTypeMetas(c *gin.Context) { + middleware.SuccessResponse(c, channeltype.ChannelMetas) +} + func GetChannels(c *gin.Context) { p, _ := strconv.Atoi(c.Query("p")) p-- diff --git a/service/aiproxy/relay/adaptor/ai360/adaptor.go b/service/aiproxy/relay/adaptor/ai360/adaptor.go index 463fdb57a85..5ce25e6e02a 100644 --- a/service/aiproxy/relay/adaptor/ai360/adaptor.go +++ b/service/aiproxy/relay/adaptor/ai360/adaptor.go @@ -3,7 +3,6 @@ package ai360 import ( "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/meta" ) type Adaptor struct { @@ -12,11 +11,8 @@ type Adaptor struct { const baseURL = "https://ai.360.cn/v1" -func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - if meta.Channel.BaseURL == "" { - meta.Channel.BaseURL = baseURL - } - return a.Adaptor.GetRequestURL(meta) +func (a *Adaptor) GetBaseURL() string { + return baseURL } func (a *Adaptor) GetModelList() []*model.ModelConfig { diff --git a/service/aiproxy/relay/adaptor/ali/adaptor.go b/service/aiproxy/relay/adaptor/ali/adaptor.go index 76ebfa8fa68..ce050f2a349 100644 --- a/service/aiproxy/relay/adaptor/ali/adaptor.go +++ b/service/aiproxy/relay/adaptor/ali/adaptor.go @@ -22,6 +22,10 @@ type Adaptor struct{} const baseURL = "https://dashscope.aliyuncs.com" +func (a *Adaptor) GetBaseURL() string { + return baseURL +} + func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { u := meta.Channel.BaseURL if u == "" { diff --git a/service/aiproxy/relay/adaptor/anthropic/adaptor.go b/service/aiproxy/relay/adaptor/anthropic/adaptor.go index e4f035793a5..3d892a419e1 100644 --- a/service/aiproxy/relay/adaptor/anthropic/adaptor.go +++ b/service/aiproxy/relay/adaptor/anthropic/adaptor.go @@ -16,14 +16,14 @@ import ( type Adaptor struct{} -const baseURL = "https://api.anthropic.com" +const baseURL = "https://api.anthropic.com/v1" + +func (a *Adaptor) GetBaseURL() string { + return baseURL +} func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - u := meta.Channel.BaseURL - if u == "" { - u = baseURL - } - return u + "/v1/messages", nil + return meta.Channel.BaseURL + "/v1/messages", nil } func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, c *gin.Context, req *http.Request) error { diff --git a/service/aiproxy/relay/adaptor/aws/adaptor.go b/service/aiproxy/relay/adaptor/aws/adaptor.go index 2c53495b039..09fa2c103f9 100644 --- a/service/aiproxy/relay/adaptor/aws/adaptor.go +++ b/service/aiproxy/relay/adaptor/aws/adaptor.go @@ -17,6 +17,10 @@ var _ adaptor.Adaptor = new(Adaptor) type Adaptor struct{} +func (a *Adaptor) GetBaseURL() string { + return "" +} + func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { adaptor := GetAdaptor(meta.ActualModel) if adaptor == nil { diff --git a/service/aiproxy/relay/adaptor/aws/key.go b/service/aiproxy/relay/adaptor/aws/key.go index e60517beaa0..e2ef5f461b3 100644 --- a/service/aiproxy/relay/adaptor/aws/key.go +++ b/service/aiproxy/relay/adaptor/aws/key.go @@ -14,3 +14,7 @@ func (a *Adaptor) ValidateKey(key string) error { } return nil } + +func (a *Adaptor) KeyHelp() string { + return "region|ak|sk" +} diff --git a/service/aiproxy/relay/adaptor/azure/constants.go b/service/aiproxy/relay/adaptor/azure/constants.go index 0eab4e89c88..1f0ac7c38b8 100644 --- a/service/aiproxy/relay/adaptor/azure/constants.go +++ b/service/aiproxy/relay/adaptor/azure/constants.go @@ -15,6 +15,10 @@ type Adaptor struct { openai.Adaptor } +func (a *Adaptor) GetBaseURL() string { + return "https://{resource_name}.openai.azure.com" +} + func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { _, apiVersion, err := getTokenAndAPIVersion(meta.Channel.Key) if err != nil { diff --git a/service/aiproxy/relay/adaptor/azure/key.go b/service/aiproxy/relay/adaptor/azure/key.go index 8de63b6fa07..7d4596b82d5 100644 --- a/service/aiproxy/relay/adaptor/azure/key.go +++ b/service/aiproxy/relay/adaptor/azure/key.go @@ -17,6 +17,10 @@ func (a *Adaptor) ValidateKey(key string) error { return nil } +func (a *Adaptor) KeyHelp() string { + return "key or key|api-version" +} + func getTokenAndAPIVersion(key string) (string, string, error) { split := strings.Split(key, "|") if len(split) == 1 { diff --git a/service/aiproxy/relay/adaptor/baichuan/adaptor.go b/service/aiproxy/relay/adaptor/baichuan/adaptor.go index 4eba1883fe1..481ec07923e 100644 --- a/service/aiproxy/relay/adaptor/baichuan/adaptor.go +++ b/service/aiproxy/relay/adaptor/baichuan/adaptor.go @@ -3,7 +3,6 @@ package baichuan import ( "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/meta" ) type Adaptor struct { @@ -12,11 +11,8 @@ type Adaptor struct { const baseURL = "https://api.baichuan-ai.com/v1" -func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - if meta.Channel.BaseURL == "" { - meta.Channel.BaseURL = baseURL - } - return a.Adaptor.GetRequestURL(meta) +func (a *Adaptor) GetBaseURL() string { + return baseURL } func (a *Adaptor) GetModelList() []*model.ModelConfig { diff --git a/service/aiproxy/relay/adaptor/baidu/adaptor.go b/service/aiproxy/relay/adaptor/baidu/adaptor.go index 0605eef7cc7..f2db94be4ae 100644 --- a/service/aiproxy/relay/adaptor/baidu/adaptor.go +++ b/service/aiproxy/relay/adaptor/baidu/adaptor.go @@ -23,6 +23,10 @@ const ( baseURL = "https://aip.baidubce.com" ) +func (a *Adaptor) GetBaseURL() string { + return baseURL +} + // Get model-specific endpoint using map var modelEndpointMap = map[string]string{ "ERNIE-4.0-8K": "completions_pro", diff --git a/service/aiproxy/relay/adaptor/baiduv2/adaptor.go b/service/aiproxy/relay/adaptor/baiduv2/adaptor.go index 1345b10176c..59b54981758 100644 --- a/service/aiproxy/relay/adaptor/baiduv2/adaptor.go +++ b/service/aiproxy/relay/adaptor/baiduv2/adaptor.go @@ -19,9 +19,13 @@ import ( type Adaptor struct{} const ( - baseURL = "https://qianfan.baidubce.com" + baseURL = "https://qianfan.baidubce.com/v2" ) +func (a *Adaptor) GetBaseURL() string { + return baseURL +} + // https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Fm2vrveyu var v2ModelMap = map[string]string{ "ERNIE-Character-8K": "ernie-char-8k", @@ -36,13 +40,9 @@ func toV2ModelName(modelName string) string { } func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - if meta.Channel.BaseURL == "" { - meta.Channel.BaseURL = baseURL - } - switch meta.Mode { case relaymode.ChatCompletions: - return meta.Channel.BaseURL + "/v2/chat/completions", nil + return meta.Channel.BaseURL + "/chat/completions", nil default: return "", fmt.Errorf("unsupported mode: %d", meta.Mode) } diff --git a/service/aiproxy/relay/adaptor/cloudflare/adaptor.go b/service/aiproxy/relay/adaptor/cloudflare/adaptor.go index 7680155fa76..47a80e8e66d 100644 --- a/service/aiproxy/relay/adaptor/cloudflare/adaptor.go +++ b/service/aiproxy/relay/adaptor/cloudflare/adaptor.go @@ -16,6 +16,10 @@ type Adaptor struct { const baseURL = "https://api.cloudflare.com" +func (a *Adaptor) GetBaseURL() string { + return baseURL +} + // WorkerAI cannot be used across accounts with AIGateWay // https://developers.cloudflare.com/ai-gateway/providers/workersai/#openai-compatible-endpoints // https://gateway.ai.cloudflare.com/v1/{account_id}/{gateway_id}/workers-ai @@ -25,9 +29,6 @@ func isAIGateWay(baseURL string) bool { func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { u := meta.Channel.BaseURL - if u == "" { - u = baseURL - } isAIGateWay := isAIGateWay(u) var urlPrefix string if isAIGateWay { diff --git a/service/aiproxy/relay/adaptor/cohere/adaptor.go b/service/aiproxy/relay/adaptor/cohere/adaptor.go index df545466358..f3691c3436f 100644 --- a/service/aiproxy/relay/adaptor/cohere/adaptor.go +++ b/service/aiproxy/relay/adaptor/cohere/adaptor.go @@ -20,12 +20,12 @@ type Adaptor struct{} const baseURL = "https://api.cohere.ai" +func (a *Adaptor) GetBaseURL() string { + return baseURL +} + func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - u := meta.Channel.BaseURL - if u == "" { - u = baseURL - } - return u + "/v1/chat", nil + return meta.Channel.BaseURL + "/v1/chat", nil } func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error { diff --git a/service/aiproxy/relay/adaptor/coze/adaptor.go b/service/aiproxy/relay/adaptor/coze/adaptor.go index 528c5e62522..50687f4f880 100644 --- a/service/aiproxy/relay/adaptor/coze/adaptor.go +++ b/service/aiproxy/relay/adaptor/coze/adaptor.go @@ -21,12 +21,12 @@ type Adaptor struct{} const baseURL = "https://api.coze.com" +func (a *Adaptor) GetBaseURL() string { + return baseURL +} + func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - u := meta.Channel.BaseURL - if u == "" { - u = baseURL - } - return u + "/open_api/v2/chat", nil + return meta.Channel.BaseURL + "/open_api/v2/chat", nil } func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error { diff --git a/service/aiproxy/relay/adaptor/coze/key.go b/service/aiproxy/relay/adaptor/coze/key.go index 1e7aef59a8d..5440e4f0ab5 100644 --- a/service/aiproxy/relay/adaptor/coze/key.go +++ b/service/aiproxy/relay/adaptor/coze/key.go @@ -17,6 +17,10 @@ func (a *Adaptor) ValidateKey(key string) error { return nil } +func (a *Adaptor) KeyHelp() string { + return "token|user_id" +} + func getTokenAndUserID(key string) (string, string, error) { split := strings.Split(key, "|") if len(split) != 2 { diff --git a/service/aiproxy/relay/adaptor/deepseek/adaptor.go b/service/aiproxy/relay/adaptor/deepseek/adaptor.go index a9145bc5829..5ed5cd32be2 100644 --- a/service/aiproxy/relay/adaptor/deepseek/adaptor.go +++ b/service/aiproxy/relay/adaptor/deepseek/adaptor.go @@ -4,7 +4,6 @@ import ( "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/meta" ) var _ adaptor.Adaptor = (*Adaptor)(nil) @@ -15,11 +14,8 @@ type Adaptor struct { const baseURL = "https://api.deepseek.com/v1" -func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - if meta.Channel.BaseURL == "" { - meta.Channel.BaseURL = baseURL - } - return a.Adaptor.GetRequestURL(meta) +func (a *Adaptor) GetBaseURL() string { + return baseURL } func (a *Adaptor) GetModelList() []*model.ModelConfig { diff --git a/service/aiproxy/relay/adaptor/doubao/main.go b/service/aiproxy/relay/adaptor/doubao/main.go index 724b5a8d0f5..debe1bac0fd 100644 --- a/service/aiproxy/relay/adaptor/doubao/main.go +++ b/service/aiproxy/relay/adaptor/doubao/main.go @@ -18,9 +18,6 @@ import ( func GetRequestURL(meta *meta.Meta) (string, error) { u := meta.Channel.BaseURL - if u == "" { - u = baseURL - } switch meta.Mode { case relaymode.ChatCompletions: if strings.HasPrefix(meta.ActualModel, "bot-") { @@ -40,6 +37,10 @@ type Adaptor struct { const baseURL = "https://ark.cn-beijing.volces.com" +func (a *Adaptor) GetBaseURL() string { + return baseURL +} + func (a *Adaptor) GetModelList() []*model.ModelConfig { return ModelList } diff --git a/service/aiproxy/relay/adaptor/doubaoaudio/key.go b/service/aiproxy/relay/adaptor/doubaoaudio/key.go index e48cda17aff..46a6b99efb5 100644 --- a/service/aiproxy/relay/adaptor/doubaoaudio/key.go +++ b/service/aiproxy/relay/adaptor/doubaoaudio/key.go @@ -14,6 +14,10 @@ func (a *Adaptor) ValidateKey(key string) error { return err } +func (a *Adaptor) KeyHelp() string { + return "app_id|app_token" +} + // key格式: app_id|app_token func getAppIDAndToken(key string) (string, string, error) { parts := strings.Split(key, "|") diff --git a/service/aiproxy/relay/adaptor/doubaoaudio/main.go b/service/aiproxy/relay/adaptor/doubaoaudio/main.go index 8c76d3db4da..f653a821a1e 100644 --- a/service/aiproxy/relay/adaptor/doubaoaudio/main.go +++ b/service/aiproxy/relay/adaptor/doubaoaudio/main.go @@ -15,9 +15,6 @@ import ( func GetRequestURL(meta *meta.Meta) (string, error) { u := meta.Channel.BaseURL - if u == "" { - u = baseURL - } switch meta.Mode { case relaymode.AudioSpeech: return u + "/api/v1/tts/ws_binary", nil @@ -30,6 +27,10 @@ type Adaptor struct{} const baseURL = "https://openspeech.bytedance.com" +func (a *Adaptor) GetBaseURL() string { + return baseURL +} + func (a *Adaptor) GetModelList() []*model.ModelConfig { return ModelList } diff --git a/service/aiproxy/relay/adaptor/gemini/adaptor.go b/service/aiproxy/relay/adaptor/gemini/adaptor.go index d5cb42928f4..18ca3b398d7 100644 --- a/service/aiproxy/relay/adaptor/gemini/adaptor.go +++ b/service/aiproxy/relay/adaptor/gemini/adaptor.go @@ -19,6 +19,10 @@ type Adaptor struct{} const baseURL = "https://generativelanguage.googleapis.com" +func (a *Adaptor) GetBaseURL() string { + return baseURL +} + var v1ModelMap = map[string]struct{}{} func getRequestURL(meta *meta.Meta, action string) string { diff --git a/service/aiproxy/relay/adaptor/geminiopenai/adaptor.go b/service/aiproxy/relay/adaptor/geminiopenai/adaptor.go index 5e6d5a890ff..b6c20ae2992 100644 --- a/service/aiproxy/relay/adaptor/geminiopenai/adaptor.go +++ b/service/aiproxy/relay/adaptor/geminiopenai/adaptor.go @@ -4,7 +4,6 @@ import ( "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/gemini" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/meta" ) type Adaptor struct { @@ -13,11 +12,8 @@ type Adaptor struct { const baseURL = "https://generativelanguage.googleapis.com/v1beta/openai" -func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - if meta.Channel.BaseURL == "" { - meta.Channel.BaseURL = baseURL - } - return a.Adaptor.GetRequestURL(meta) +func (a *Adaptor) GetBaseURL() string { + return baseURL } func (a *Adaptor) GetModelList() []*model.ModelConfig { diff --git a/service/aiproxy/relay/adaptor/groq/adaptor.go b/service/aiproxy/relay/adaptor/groq/adaptor.go index 9a7ddc16259..0db752eb2eb 100644 --- a/service/aiproxy/relay/adaptor/groq/adaptor.go +++ b/service/aiproxy/relay/adaptor/groq/adaptor.go @@ -3,7 +3,6 @@ package groq import ( "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/meta" ) type Adaptor struct { @@ -12,11 +11,8 @@ type Adaptor struct { const baseURL = "https://api.groq.com/openai/v1" -func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - if meta.Channel.BaseURL == "" { - meta.Channel.BaseURL = baseURL - } - return a.Adaptor.GetRequestURL(meta) +func (a *Adaptor) GetBaseURL() string { + return baseURL } func (a *Adaptor) GetModelList() []*model.ModelConfig { diff --git a/service/aiproxy/relay/adaptor/interface.go b/service/aiproxy/relay/adaptor/interface.go index 8fa9b5e992d..239cd818463 100644 --- a/service/aiproxy/relay/adaptor/interface.go +++ b/service/aiproxy/relay/adaptor/interface.go @@ -12,6 +12,7 @@ import ( type Adaptor interface { GetChannelName() string + GetBaseURL() string GetRequestURL(meta *meta.Meta) (string, error) SetupRequestHeader(meta *meta.Meta, c *gin.Context, req *http.Request) error ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) @@ -26,4 +27,5 @@ type Balancer interface { type KeyValidator interface { ValidateKey(key string) error + KeyHelp() string } diff --git a/service/aiproxy/relay/adaptor/lingyiwanwu/adaptor.go b/service/aiproxy/relay/adaptor/lingyiwanwu/adaptor.go index 315c0ad15ed..3f529b46196 100644 --- a/service/aiproxy/relay/adaptor/lingyiwanwu/adaptor.go +++ b/service/aiproxy/relay/adaptor/lingyiwanwu/adaptor.go @@ -3,7 +3,6 @@ package lingyiwanwu import ( "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/meta" ) type Adaptor struct { @@ -12,11 +11,8 @@ type Adaptor struct { const baseURL = "https://api.lingyiwanwu.com/v1" -func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - if meta.Channel.BaseURL == "" { - meta.Channel.BaseURL = baseURL - } - return a.Adaptor.GetRequestURL(meta) +func (a *Adaptor) GetBaseURL() string { + return baseURL } func (a *Adaptor) GetModelList() []*model.ModelConfig { diff --git a/service/aiproxy/relay/adaptor/minimax/adaptor.go b/service/aiproxy/relay/adaptor/minimax/adaptor.go index 7de3c8d3432..54b8a408040 100644 --- a/service/aiproxy/relay/adaptor/minimax/adaptor.go +++ b/service/aiproxy/relay/adaptor/minimax/adaptor.go @@ -19,6 +19,10 @@ type Adaptor struct { const baseURL = "https://api.minimax.chat/v1" +func (a *Adaptor) GetBaseURL() string { + return baseURL +} + func (a *Adaptor) GetModelList() []*model.ModelConfig { return ModelList } @@ -33,9 +37,6 @@ func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http. } func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - if meta.Channel.BaseURL == "" { - meta.Channel.BaseURL = baseURL - } _, groupID, err := GetAPIKeyAndGroupID(meta.Channel.Key) if err != nil { return "", err diff --git a/service/aiproxy/relay/adaptor/minimax/key.go b/service/aiproxy/relay/adaptor/minimax/key.go index 59e77483acb..b352343e446 100644 --- a/service/aiproxy/relay/adaptor/minimax/key.go +++ b/service/aiproxy/relay/adaptor/minimax/key.go @@ -17,6 +17,10 @@ func (a *Adaptor) ValidateKey(key string) error { return nil } +func (a *Adaptor) KeyHelp() string { + return "api_key|group_id" +} + func GetAPIKeyAndGroupID(key string) (string, string, error) { keys := strings.Split(key, "|") if len(keys) != 2 { diff --git a/service/aiproxy/relay/adaptor/mistral/adaptor.go b/service/aiproxy/relay/adaptor/mistral/adaptor.go index db34757fd3a..413cbc1797f 100644 --- a/service/aiproxy/relay/adaptor/mistral/adaptor.go +++ b/service/aiproxy/relay/adaptor/mistral/adaptor.go @@ -3,7 +3,6 @@ package mistral import ( "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/meta" ) type Adaptor struct { @@ -12,11 +11,8 @@ type Adaptor struct { const baseURL = "https://api.mistral.ai/v1" -func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - if meta.Channel.BaseURL == "" { - meta.Channel.BaseURL = baseURL - } - return a.Adaptor.GetRequestURL(meta) +func (a *Adaptor) GetBaseURL() string { + return baseURL } func (a *Adaptor) GetModelList() []*model.ModelConfig { diff --git a/service/aiproxy/relay/adaptor/moonshot/adaptor.go b/service/aiproxy/relay/adaptor/moonshot/adaptor.go index fdf5c9ab31e..0f109a1057d 100644 --- a/service/aiproxy/relay/adaptor/moonshot/adaptor.go +++ b/service/aiproxy/relay/adaptor/moonshot/adaptor.go @@ -3,7 +3,6 @@ package moonshot import ( "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/meta" ) type Adaptor struct { @@ -12,11 +11,8 @@ type Adaptor struct { const baseURL = "https://api.moonshot.cn/v1" -func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - if meta.Channel.BaseURL == "" { - meta.Channel.BaseURL = baseURL - } - return a.Adaptor.GetRequestURL(meta) +func (a *Adaptor) GetBaseURL() string { + return baseURL } func (a *Adaptor) GetModelList() []*model.ModelConfig { diff --git a/service/aiproxy/relay/adaptor/novita/adaptor.go b/service/aiproxy/relay/adaptor/novita/adaptor.go index 1ab848ad4e4..a1b45a0d36a 100644 --- a/service/aiproxy/relay/adaptor/novita/adaptor.go +++ b/service/aiproxy/relay/adaptor/novita/adaptor.go @@ -1,37 +1,22 @@ package novita import ( - "fmt" - "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/meta" - "github.com/labring/sealos/service/aiproxy/relay/relaymode" ) -func GetRequestURL(meta *meta.Meta) (string, error) { - u := meta.Channel.BaseURL - if u == "" { - u = baseURL - } - if meta.Mode == relaymode.ChatCompletions { - return u + "/chat/completions", nil - } - return "", fmt.Errorf("unsupported relay mode %d for novita", meta.Mode) -} - type Adaptor struct { openai.Adaptor } const baseURL = "https://api.novita.ai/v3/openai" -func (a *Adaptor) GetModelList() []*model.ModelConfig { - return ModelList +func (a *Adaptor) GetBaseURL() string { + return baseURL } -func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - return GetRequestURL(meta) +func (a *Adaptor) GetModelList() []*model.ModelConfig { + return ModelList } func (a *Adaptor) GetChannelName() string { diff --git a/service/aiproxy/relay/adaptor/ollama/adaptor.go b/service/aiproxy/relay/adaptor/ollama/adaptor.go index e8a7b1dc3b0..554f34de4a2 100644 --- a/service/aiproxy/relay/adaptor/ollama/adaptor.go +++ b/service/aiproxy/relay/adaptor/ollama/adaptor.go @@ -18,12 +18,13 @@ type Adaptor struct{} const baseURL = "http://localhost:11434" +func (a *Adaptor) GetBaseURL() string { + return baseURL +} + func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { // https://github.com/ollama/ollama/blob/main/docs/api.md u := meta.Channel.BaseURL - if u == "" { - u = baseURL - } switch meta.Mode { case relaymode.Embeddings: return u + "/api/embed", nil diff --git a/service/aiproxy/relay/adaptor/openai/adaptor.go b/service/aiproxy/relay/adaptor/openai/adaptor.go index 74cf6297f29..e549ec28041 100644 --- a/service/aiproxy/relay/adaptor/openai/adaptor.go +++ b/service/aiproxy/relay/adaptor/openai/adaptor.go @@ -23,11 +23,12 @@ type Adaptor struct{} const baseURL = "https://api.openai.com/v1" +func (a *Adaptor) GetBaseURL() string { + return baseURL +} + func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { u := meta.Channel.BaseURL - if u == "" { - u = baseURL - } var path string switch meta.Mode { diff --git a/service/aiproxy/relay/adaptor/siliconflow/adaptor.go b/service/aiproxy/relay/adaptor/siliconflow/adaptor.go index 5c890c79049..c814250f52f 100644 --- a/service/aiproxy/relay/adaptor/siliconflow/adaptor.go +++ b/service/aiproxy/relay/adaptor/siliconflow/adaptor.go @@ -1,7 +1,6 @@ package siliconflow import ( - "io" "net/http" "github.com/gin-gonic/gin" @@ -21,11 +20,8 @@ type Adaptor struct { const baseURL = "https://api.siliconflow.cn/v1" -func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - if meta.Channel.BaseURL == "" { - meta.Channel.BaseURL = baseURL - } - return a.Adaptor.GetRequestURL(meta) +func (a *Adaptor) GetBaseURL() string { + return baseURL } func (a *Adaptor) GetModelList() []*model.ModelConfig { @@ -36,10 +32,6 @@ func (a *Adaptor) GetChannelName() string { return "siliconflow" } -func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { - return a.Adaptor.ConvertRequest(meta, req) -} - //nolint:gocritic func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (*relaymodel.Usage, *relaymodel.ErrorWithStatusCode) { usage, err := a.Adaptor.DoResponse(meta, c, resp) diff --git a/service/aiproxy/relay/adaptor/stepfun/adaptor.go b/service/aiproxy/relay/adaptor/stepfun/adaptor.go index e57c5c59288..e2c01b31bf3 100644 --- a/service/aiproxy/relay/adaptor/stepfun/adaptor.go +++ b/service/aiproxy/relay/adaptor/stepfun/adaptor.go @@ -3,7 +3,6 @@ package stepfun import ( "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/meta" ) type Adaptor struct { @@ -12,11 +11,8 @@ type Adaptor struct { const baseURL = "https://api.stepfun.com/v1" -func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - if meta.Channel.BaseURL == "" { - meta.Channel.BaseURL = baseURL - } - return a.Adaptor.GetRequestURL(meta) +func (a *Adaptor) GetBaseURL() string { + return baseURL } func (a *Adaptor) GetModelList() []*model.ModelConfig { diff --git a/service/aiproxy/relay/adaptor/tencent/adaptor.go b/service/aiproxy/relay/adaptor/tencent/adaptor.go index 030c003fb52..4b3109ed0cc 100644 --- a/service/aiproxy/relay/adaptor/tencent/adaptor.go +++ b/service/aiproxy/relay/adaptor/tencent/adaptor.go @@ -3,7 +3,6 @@ package tencent import ( "github.com/labring/sealos/service/aiproxy/model" "github.com/labring/sealos/service/aiproxy/relay/adaptor/openai" - "github.com/labring/sealos/service/aiproxy/relay/meta" ) // https://cloud.tencent.com/document/api/1729/101837 @@ -14,11 +13,8 @@ type Adaptor struct { const baseURL = "https://api.hunyuan.cloud.tencent.com/v1" -func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - if meta.Channel.BaseURL == "" { - meta.Channel.BaseURL = baseURL - } - return a.Adaptor.GetRequestURL(meta) +func (a *Adaptor) GetBaseURL() string { + return baseURL } func (a *Adaptor) GetModelList() []*model.ModelConfig { diff --git a/service/aiproxy/relay/adaptor/vertexai/adaptor.go b/service/aiproxy/relay/adaptor/vertexai/adaptor.go index 075d532cc18..64882637b39 100644 --- a/service/aiproxy/relay/adaptor/vertexai/adaptor.go +++ b/service/aiproxy/relay/adaptor/vertexai/adaptor.go @@ -23,6 +23,10 @@ const channelName = "vertexai" type Adaptor struct{} +func (a *Adaptor) GetBaseURL() string { + return "" +} + type Config struct { Region string ProjectID string diff --git a/service/aiproxy/relay/adaptor/vertexai/key.go b/service/aiproxy/relay/adaptor/vertexai/key.go index 17f2324b042..955c9b9ca9c 100644 --- a/service/aiproxy/relay/adaptor/vertexai/key.go +++ b/service/aiproxy/relay/adaptor/vertexai/key.go @@ -17,6 +17,10 @@ func (a *Adaptor) ValidateKey(key string) error { return nil } +func (a *Adaptor) KeyHelp() string { + return "region|projectID|adcJSON" +} + // region|projectID|adcJSON func getConfigFromKey(key string) (Config, error) { region, after, ok := strings.Cut(key, "|") diff --git a/service/aiproxy/relay/adaptor/xunfei/adaptor.go b/service/aiproxy/relay/adaptor/xunfei/adaptor.go index 8d65d475c5e..d5e59771ce7 100644 --- a/service/aiproxy/relay/adaptor/xunfei/adaptor.go +++ b/service/aiproxy/relay/adaptor/xunfei/adaptor.go @@ -13,15 +13,12 @@ type Adaptor struct { openai.Adaptor } -const baseURL = "https://spark-api-open.xf-yun.com/v1" - -func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - if meta.Channel.BaseURL == "" { - meta.Channel.BaseURL = baseURL - } - return a.Adaptor.GetRequestURL(meta) +func (a *Adaptor) GetBaseURL() string { + return baseURL } +const baseURL = "https://spark-api-open.xf-yun.com/v1" + func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error) { domain, err := getXunfeiDomain(meta.ActualModel) if err != nil { diff --git a/service/aiproxy/relay/adaptor/xunfei/key.go b/service/aiproxy/relay/adaptor/xunfei/key.go index fba354f99c9..55ce0deb40d 100644 --- a/service/aiproxy/relay/adaptor/xunfei/key.go +++ b/service/aiproxy/relay/adaptor/xunfei/key.go @@ -15,3 +15,7 @@ func (a *Adaptor) ValidateKey(key string) error { } return errors.New("invalid key format") } + +func (a *Adaptor) KeyHelp() string { + return "xxx:xxx" +} diff --git a/service/aiproxy/relay/adaptor/zhipu/adaptor.go b/service/aiproxy/relay/adaptor/zhipu/adaptor.go index c14a67b623f..7d01f8138a5 100644 --- a/service/aiproxy/relay/adaptor/zhipu/adaptor.go +++ b/service/aiproxy/relay/adaptor/zhipu/adaptor.go @@ -17,11 +17,8 @@ type Adaptor struct { const baseURL = "https://open.bigmodel.cn/api/paas/v4" -func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - if meta.Channel.BaseURL == "" { - meta.Channel.BaseURL = baseURL - } - return a.Adaptor.GetRequestURL(meta) +func (a *Adaptor) GetBaseURL() string { + return baseURL } func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode) { diff --git a/service/aiproxy/relay/channeltype/define.go b/service/aiproxy/relay/channeltype/define.go index 4a41be3e855..b91fbf64ad2 100644 --- a/service/aiproxy/relay/channeltype/define.go +++ b/service/aiproxy/relay/channeltype/define.go @@ -72,7 +72,16 @@ func GetAdaptor(channel int) (adaptor.Adaptor, bool) { return a, ok } -var ChannelNames = map[int]string{} +type AdaptorMeta struct { + Name string `json:"name"` + KeyHelp string `json:"keyHelp"` + DefaultBaseURL string `json:"defaultBaseUrl"` +} + +var ( + ChannelNames = map[int]string{} + ChannelMetas = map[int]AdaptorMeta{} +) func init() { names := make(map[string]struct{}) @@ -82,6 +91,18 @@ func init() { panic("duplicate channel name: " + name) } names[name] = struct{}{} + ChannelMetas[i] = AdaptorMeta{ + Name: name, + KeyHelp: getAdaptorKeyHelp(adaptor), + DefaultBaseURL: adaptor.GetBaseURL(), + } ChannelNames[i] = name } } + +func getAdaptorKeyHelp(a adaptor.Adaptor) string { + if keyValidator, ok := a.(adaptor.KeyValidator); ok { + return keyValidator.KeyHelp() + } + return "" +} diff --git a/service/aiproxy/relay/controller/dohelper.go b/service/aiproxy/relay/controller/dohelper.go index 884f40469b0..b0a288bd6d0 100644 --- a/service/aiproxy/relay/controller/dohelper.go +++ b/service/aiproxy/relay/controller/dohelper.go @@ -138,6 +138,10 @@ func prepareAndDoRequest(a adaptor.Adaptor, c *gin.Context, meta *meta.Meta) (*h return nil, openai.ErrorWrapperWithMessage("convert request failed: "+err.Error(), "convert_request_failed", http.StatusBadRequest) } + if meta.Channel.BaseURL == "" { + meta.Channel.BaseURL = a.GetBaseURL() + } + fullRequestURL, err := a.GetRequestURL(meta) if err != nil { return nil, openai.ErrorWrapperWithMessage("get request url failed: "+err.Error(), "get_request_url_failed", http.StatusBadRequest) diff --git a/service/aiproxy/router/api.go b/service/aiproxy/router/api.go index 4e15b74bf2e..7006a085c0f 100644 --- a/service/aiproxy/router/api.go +++ b/service/aiproxy/router/api.go @@ -71,6 +71,7 @@ func SetAPIRouter(router *gin.Engine) { channelsRoute.GET("/", controller.GetChannels) channelsRoute.GET("/all", controller.GetAllChannels) channelsRoute.GET("/type_names", controller.ChannelTypeNames) + channelsRoute.GET("/type_metas", controller.ChannelTypeMetas) channelsRoute.POST("/", controller.AddChannels) channelsRoute.GET("/search", controller.SearchChannels) channelsRoute.GET("/update_balance", controller.UpdateAllChannelsBalance) From 2ed0e0ddaa83c0d9444c2a6bb4d53edb8dc5343e Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 10 Feb 2025 11:19:46 +0800 Subject: [PATCH 137/167] feat: add channel key validate help message --- service/aiproxy/controller/channel.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/service/aiproxy/controller/channel.go b/service/aiproxy/controller/channel.go index 29108bc2cf4..cdea695c4b7 100644 --- a/service/aiproxy/controller/channel.go +++ b/service/aiproxy/controller/channel.go @@ -152,7 +152,11 @@ func (r *AddChannelRequest) ToChannel() (*model.Channel, error) { if validator, ok := channelType.(adaptor.KeyValidator); ok { err := validator.ValidateKey(r.Key) if err != nil { - return nil, fmt.Errorf("%s [%s(%d)] invalid key: %w", r.Name, channeltype.ChannelNames[r.Type], r.Type, err) + keyHelp := validator.KeyHelp() + if keyHelp == "" { + return nil, fmt.Errorf("%s [%s(%d)] invalid key: %w", r.Name, channeltype.ChannelNames[r.Type], r.Type, err) + } + return nil, fmt.Errorf("%s [%s(%d)] invalid key: %w, %s", r.Name, channeltype.ChannelNames[r.Type], r.Type, err, keyHelp) } } return &model.Channel{ From 8b3c23e38d571d4a2162d6256c630f919dfe097c Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 10 Feb 2025 16:36:56 +0800 Subject: [PATCH 138/167] fix: channel config update --- service/aiproxy/controller/channel.go | 20 +++++++++---------- service/aiproxy/model/channel.go | 28 +++++++++++++++++---------- service/aiproxy/relay/meta/meta.go | 4 +++- 3 files changed, 31 insertions(+), 21 deletions(-) diff --git a/service/aiproxy/controller/channel.go b/service/aiproxy/controller/channel.go index cdea695c4b7..c8f194716ac 100644 --- a/service/aiproxy/controller/channel.go +++ b/service/aiproxy/controller/channel.go @@ -132,16 +132,16 @@ func GetChannel(c *gin.Context) { } type AddChannelRequest struct { - ModelMapping map[string]string `json:"model_mapping"` - Config model.ChannelConfig `json:"config"` - Name string `json:"name"` - Key string `json:"key"` - BaseURL string `json:"base_url"` - Other string `json:"other"` - Models []string `json:"models"` - Type int `json:"type"` - Priority int32 `json:"priority"` - Status int `json:"status"` + ModelMapping map[string]string `json:"model_mapping"` + Config *model.ChannelConfig `json:"config"` + Name string `json:"name"` + Key string `json:"key"` + BaseURL string `json:"base_url"` + Other string `json:"other"` + Models []string `json:"models"` + Type int `json:"type"` + Priority int32 `json:"priority"` + Status int `json:"status"` } func (r *AddChannelRequest) ToChannel() (*model.Channel, error) { diff --git a/service/aiproxy/model/channel.go b/service/aiproxy/model/channel.go index 277f3c25ed4..6ba0cb827f4 100644 --- a/service/aiproxy/model/channel.go +++ b/service/aiproxy/model/channel.go @@ -45,7 +45,7 @@ type Channel struct { Status int `gorm:"default:1;index" json:"status"` Type int `gorm:"default:0;index" json:"type"` Priority int32 `json:"priority"` - Config ChannelConfig `gorm:"serializer:fastjson;type:text" json:"config"` + Config *ChannelConfig `gorm:"serializer:fastjson;type:text" json:"config,omitempty"` } func (c *Channel) BeforeDelete(tx *gorm.DB) (err error) { @@ -279,6 +279,7 @@ func UpdateChannel(channel *Channel) error { Model(channel). Omit("used_amount", "request_count", "created_at", "balance_updated_at", "balance"). Clauses(clause.Returning{}). + Where("id = ?", channel.ID). Updates(channel) return HandleUpdateResult(result, ErrChannelNotFound) } @@ -325,10 +326,13 @@ func (c *Channel) UpdateModelTest(testAt time.Time, model, actualModel string, m } func (c *Channel) UpdateBalance(balance float64) error { - result := DB.Model(c).Select("balance_updated_at", "balance").Updates(Channel{ - BalanceUpdatedAt: time.Now(), - Balance: balance, - }) + result := DB.Model(&Channel{}). + Select("balance_updated_at", "balance"). + Where("id = ?", c.ID). + Updates(Channel{ + BalanceUpdatedAt: time.Now(), + Balance: balance, + }) return HandleUpdateResult(result, ErrChannelNotFound) } @@ -347,15 +351,19 @@ func DeleteChannelsByIDs(ids []int) error { } func UpdateChannelStatusByID(id int, status int) error { - result := DB.Model(&Channel{}).Where("id = ?", id).Update("status", status) + result := DB.Model(&Channel{}). + Where("id = ?", id). + Update("status", status) return HandleUpdateResult(result, ErrChannelNotFound) } func UpdateChannelUsedAmount(id int, amount float64, requestCount int) error { - result := DB.Model(&Channel{}).Where("id = ?", id).Updates(map[string]interface{}{ - "used_amount": gorm.Expr("used_amount + ?", amount), - "request_count": gorm.Expr("request_count + ?", requestCount), - }) + result := DB.Model(&Channel{}). + Where("id = ?", id). + Updates(map[string]interface{}{ + "used_amount": gorm.Expr("used_amount + ?", amount), + "request_count": gorm.Expr("request_count + ?", requestCount), + }) return HandleUpdateResult(result, ErrChannelNotFound) } diff --git a/service/aiproxy/relay/meta/meta.go b/service/aiproxy/relay/meta/meta.go index a707af773d4..bde574e3ab0 100644 --- a/service/aiproxy/relay/meta/meta.go +++ b/service/aiproxy/relay/meta/meta.go @@ -106,7 +106,9 @@ func (m *Meta) Reset(channel *model.Channel) { ID: channel.ID, Type: channel.Type, } - m.ChannelConfig = channel.Config + if channel.Config != nil { + m.ChannelConfig = *channel.Config + } m.ActualModel, _ = GetMappedModelName(m.OriginModel, channel.ModelMapping) m.ClearValues() } From 24703307cae9a0dfe4ecd028f5f8f1534e1914c7 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 10 Feb 2025 16:49:46 +0800 Subject: [PATCH 139/167] fix: split think --- service/aiproxy/common/splitter/splitter.go | 8 +++++--- service/aiproxy/model/channel.go | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/service/aiproxy/common/splitter/splitter.go b/service/aiproxy/common/splitter/splitter.go index 152c888ee50..18ac4b9c283 100644 --- a/service/aiproxy/common/splitter/splitter.go +++ b/service/aiproxy/common/splitter/splitter.go @@ -44,6 +44,9 @@ func computeKMPNext(pattern []byte) []int { } func (s *Splitter) Process(data []byte) ([]byte, []byte) { + if len(data) == 0 { + return nil, nil + } switch s.state { case 0: s.buffer = append(s.buffer, data...) @@ -66,11 +69,10 @@ func (s *Splitter) Process(data []byte) ([]byte, []byte) { } s.state = 1 - if s.headLen == len(s.buffer) { + s.buffer = s.buffer[s.headLen:] + if len(s.buffer) == 0 { return nil, nil } - tailData := s.buffer[s.headLen:] - s.buffer = tailData return s.processSeekTail() case 1: s.buffer = append(s.buffer, data...) diff --git a/service/aiproxy/model/channel.go b/service/aiproxy/model/channel.go index 6ba0cb827f4..0e04c1d3b0d 100644 --- a/service/aiproxy/model/channel.go +++ b/service/aiproxy/model/channel.go @@ -31,7 +31,7 @@ type ChannelConfig struct { type Channel struct { CreatedAt time.Time `gorm:"index" json:"created_at"` LastTestErrorAt time.Time `json:"last_test_error_at"` - ChannelTests []*ChannelTest `gorm:"foreignKey:ChannelID;references:ID" json:"channel_tests"` + ChannelTests []*ChannelTest `gorm:"foreignKey:ChannelID;references:ID" json:"channel_tests,omitempty"` BalanceUpdatedAt time.Time `json:"balance_updated_at"` ModelMapping map[string]string `gorm:"serializer:fastjson;type:text" json:"model_mapping"` Key string `gorm:"type:text;index" json:"key"` From 113900a62f5bfd29e731ab3d82cffc49a70455a6 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 10 Feb 2025 17:31:36 +0800 Subject: [PATCH 140/167] fix: claude api --- service/aiproxy/controller/dashboard.go | 14 ++++---- service/aiproxy/model/log.go | 36 ++++++++++--------- .../relay/adaptor/anthropic/adaptor.go | 2 +- 3 files changed, 27 insertions(+), 25 deletions(-) diff --git a/service/aiproxy/controller/dashboard.go b/service/aiproxy/controller/dashboard.go index 86294f85c73..31a1aeed67d 100644 --- a/service/aiproxy/controller/dashboard.go +++ b/service/aiproxy/controller/dashboard.go @@ -35,7 +35,7 @@ func getDashboardTime(t string) (time.Time, time.Time, time.Duration) { return start, end, timeSpan } -func fillGaps(data []*model.HourlyChartData, start, end time.Time, timeSpan time.Duration) []*model.HourlyChartData { +func fillGaps(data []*model.ChartData, start, end time.Time, timeSpan time.Duration) []*model.ChartData { if len(data) == 0 { return data } @@ -48,7 +48,7 @@ func fillGaps(data []*model.HourlyChartData, start, end time.Time, timeSpan time } var firstIsZero bool if !firstAlignedTime.Equal(firstPoint) { - data = append([]*model.HourlyChartData{ + data = append([]*model.ChartData{ { Timestamp: firstAlignedTime.Unix(), }, @@ -64,13 +64,13 @@ func fillGaps(data []*model.HourlyChartData, start, end time.Time, timeSpan time } var lastIsZero bool if !lastAlignedTime.Equal(lastPoint) { - data = append(data, &model.HourlyChartData{ + data = append(data, &model.ChartData{ Timestamp: lastAlignedTime.Unix(), }) lastIsZero = true } - result := make([]*model.HourlyChartData, 0, len(data)) + result := make([]*model.ChartData, 0, len(data)) result = append(result, data[0]) for i := 1; i < len(data); i++ { @@ -88,13 +88,13 @@ func fillGaps(data []*model.HourlyChartData, start, end time.Time, timeSpan time if hourDiff > 3 { // Add point for hour after prev if i != 1 || (i == 1 && !firstIsZero) { - result = append(result, &model.HourlyChartData{ + result = append(result, &model.ChartData{ Timestamp: prev.Timestamp + int64(timeSpan.Seconds()), }) } // Add point for hour before curr if i != len(data)-1 || (i == len(data)-1 && !lastIsZero) { - result = append(result, &model.HourlyChartData{ + result = append(result, &model.ChartData{ Timestamp: curr.Timestamp - int64(timeSpan.Seconds()), }) } @@ -104,7 +104,7 @@ func fillGaps(data []*model.HourlyChartData, start, end time.Time, timeSpan time // Fill gaps of 2-3 hours with zero points for j := prev.Timestamp + int64(timeSpan.Seconds()); j < curr.Timestamp; j += int64(timeSpan.Seconds()) { - result = append(result, &model.HourlyChartData{ + result = append(result, &model.ChartData{ Timestamp: j, }) } diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index 6b3be10e2c5..49ab769831c 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -61,6 +61,7 @@ type Log struct { UsedAmount float64 `gorm:"index" json:"used_amount"` PromptTokens int `json:"prompt_tokens"` CompletionTokens int `json:"completion_tokens"` + TotalTokens int `json:"total_tokens"` ChannelID int `gorm:"index" json:"channel"` Code int `gorm:"index" json:"code"` Mode int `json:"mode"` @@ -825,7 +826,7 @@ func DeleteGroupLogs(groupID string) (int64, error) { return result.RowsAffected, result.Error } -type HourlyChartData struct { +type ChartData struct { Timestamp int64 `json:"timestamp"` RequestCount int64 `json:"request_count"` UsedAmount float64 `json:"used_amount"` @@ -833,13 +834,13 @@ type HourlyChartData struct { } type DashboardResponse struct { - ChartData []*HourlyChartData `json:"chart_data"` - Models []string `json:"models"` - TotalCount int64 `json:"total_count"` - ExceptionCount int64 `json:"exception_count"` - UsedAmount float64 `json:"used_amount"` - RPM int64 `json:"rpm"` - TPM int64 `json:"tpm"` + ChartData []*ChartData `json:"chart_data"` + Models []string `json:"models"` + TotalCount int64 `json:"total_count"` + ExceptionCount int64 `json:"exception_count"` + UsedAmount float64 `json:"used_amount"` + RPM int64 `json:"rpm"` + TPM int64 `json:"tpm"` } type GroupDashboardResponse struct { @@ -860,8 +861,8 @@ func getTimeSpanFormat(timeSpan time.Duration) string { } } -func getChartData(group string, start, end time.Time, tokenName, modelName string, timeSpan time.Duration) ([]*HourlyChartData, error) { - var chartData []*HourlyChartData +func getChartData(group string, start, end time.Time, tokenName, modelName string, timeSpan time.Duration) ([]*ChartData, error) { + var chartData []*ChartData timeSpanFormat := getTimeSpanFormat(timeSpan) if timeSpanFormat == "" { @@ -891,6 +892,7 @@ func getChartData(group string, start, end time.Time, tokenName, modelName strin } err := query.Scan(&chartData).Error + return chartData, err } @@ -919,7 +921,7 @@ func getLogDistinctValues[T cmp.Ordered](field string, group string, start, end return values, nil } -func sumTotalCount(chartData []*HourlyChartData) int64 { +func sumTotalCount(chartData []*ChartData) int64 { var count int64 for _, data := range chartData { count += data.RequestCount @@ -927,7 +929,7 @@ func sumTotalCount(chartData []*HourlyChartData) int64 { return count } -func sumExceptionCount(chartData []*HourlyChartData) int64 { +func sumExceptionCount(chartData []*ChartData) int64 { var count int64 for _, data := range chartData { count += data.ExceptionCount @@ -935,7 +937,7 @@ func sumExceptionCount(chartData []*HourlyChartData) int64 { return count } -func sumUsedAmount(chartData []*HourlyChartData) float64 { +func sumUsedAmount(chartData []*ChartData) float64 { var amount decimal.Decimal for _, data := range chartData { amount = amount.Add(decimal.NewFromFloat(data.UsedAmount)) @@ -964,7 +966,7 @@ func getRPM(group string, end time.Time, tokenName, modelName string) (int64, er func getTPM(group string, end time.Time, tokenName, modelName string) (int64, error) { query := LogDB.Model(&Log{}). - Select("COALESCE(SUM(prompt_tokens + completion_tokens), 0)"). + Select("COALESCE(SUM(total_tokens), 0)"). Where("request_at >= ? AND request_at <= ?", end.Add(-time.Minute), end) if group != "" { @@ -990,7 +992,7 @@ func GetDashboardData(start, end time.Time, modelName string, timeSpan time.Dura } var ( - chartData []*HourlyChartData + chartData []*ChartData models []string rpm int64 tpm int64 @@ -1053,7 +1055,7 @@ func GetGroupDashboardData(group string, start, end time.Time, tokenName string, } var ( - chartData []*HourlyChartData + chartData []*ChartData tokenNames []string models []string rpm int64 @@ -1137,7 +1139,7 @@ func GetGroupModelTPM(group string, model string) (int64, error) { err := LogDB. Model(&Log{}). Where("group_id = ? AND request_at >= ? AND request_at <= ? AND model = ?", group, start, end, model). - Select("COALESCE(SUM(prompt_tokens + completion_tokens), 0)"). + Select("COALESCE(SUM(total_tokens), 0)"). Scan(&tpm).Error return tpm, err } diff --git a/service/aiproxy/relay/adaptor/anthropic/adaptor.go b/service/aiproxy/relay/adaptor/anthropic/adaptor.go index 3d892a419e1..6fbf59bc798 100644 --- a/service/aiproxy/relay/adaptor/anthropic/adaptor.go +++ b/service/aiproxy/relay/adaptor/anthropic/adaptor.go @@ -23,7 +23,7 @@ func (a *Adaptor) GetBaseURL() string { } func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { - return meta.Channel.BaseURL + "/v1/messages", nil + return meta.Channel.BaseURL + "/messages", nil } func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, c *gin.Context, req *http.Request) error { From 904edf2d2d44f1b3846461a7e0b2c8b6514b57f4 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 10 Feb 2025 17:36:33 +0800 Subject: [PATCH 141/167] fix: record total tokens --- service/aiproxy/model/log.go | 1 + 1 file changed, 1 insertion(+) diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index 49ab769831c..0e359972508 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -171,6 +171,7 @@ func RecordConsumeLog( Code: code, PromptTokens: promptTokens, CompletionTokens: completionTokens, + TotalTokens: promptTokens + completionTokens, TokenID: tokenID, TokenName: tokenName, Model: modelName, From 93ff481f8069d415ae0f6cfbbc2836e3c11fb0ef Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 11 Feb 2025 11:15:16 +0800 Subject: [PATCH 142/167] chore: bump go mod --- service/aiproxy/go.mod | 85 ++++++++--------- service/aiproxy/go.sum | 211 +++++++++++++++++++++-------------------- 2 files changed, 148 insertions(+), 148 deletions(-) diff --git a/service/aiproxy/go.mod b/service/aiproxy/go.mod index 927fee00171..31d6aab73e7 100644 --- a/service/aiproxy/go.mod +++ b/service/aiproxy/go.mod @@ -5,12 +5,12 @@ go 1.22.7 replace github.com/labring/sealos/service/aiproxy => ../aiproxy require ( - cloud.google.com/go/iam v1.3.0 - github.com/aws/aws-sdk-go-v2 v1.32.7 - github.com/aws/aws-sdk-go-v2/credentials v1.17.48 - github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.23.1 + cloud.google.com/go/iam v1.3.1 + github.com/aws/aws-sdk-go-v2 v1.36.1 + github.com/aws/aws-sdk-go-v2/credentials v1.17.59 + github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.24.4 github.com/gin-contrib/cors v1.7.3 - github.com/gin-contrib/gzip v1.1.0 + github.com/gin-contrib/gzip v1.2.2 github.com/gin-gonic/gin v1.10.0 github.com/glebarez/sqlite v1.11.0 github.com/golang-jwt/jwt/v5 v5.2.1 @@ -31,44 +31,43 @@ require ( github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef github.com/stretchr/testify v1.10.0 - golang.org/x/image v0.23.0 - golang.org/x/sync v0.10.0 - google.golang.org/api v0.214.0 + golang.org/x/image v0.24.0 + golang.org/x/sync v0.11.0 + google.golang.org/api v0.220.0 gorm.io/driver/mysql v1.5.7 gorm.io/driver/postgres v1.5.11 gorm.io/gorm v1.25.12 ) require ( - cloud.google.com/go/auth v0.13.0 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + cloud.google.com/go/auth v0.14.1 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect cloud.google.com/go/compute/metadata v0.6.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26 // indirect - github.com/aws/smithy-go v1.22.1 // indirect - github.com/bytedance/sonic v1.12.6 // indirect - github.com/bytedance/sonic/loader v0.2.1 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 // indirect + github.com/aws/smithy-go v1.22.2 // indirect + github.com/bytedance/sonic v1.12.8 // indirect + github.com/bytedance/sonic/loader v0.2.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cloudwego/base64x v0.1.4 // indirect - github.com/cloudwego/iasm v0.2.0 // indirect + github.com/cloudwego/base64x v0.1.5 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dlclark/regexp2 v1.11.4 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/gabriel-vasile/mimetype v1.4.7 // indirect - github.com/gin-contrib/sse v0.1.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.8 // indirect + github.com/gin-contrib/sse v1.0.0 // indirect github.com/glebarez/go-sqlite v1.22.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.23.0 // indirect + github.com/go-playground/validator/v10 v10.24.0 // indirect github.com/go-sql-driver/mysql v1.8.1 // indirect - github.com/goccy/go-json v0.10.4 // indirect - github.com/google/s2a-go v0.1.8 // indirect + github.com/goccy/go-json v0.10.5 // indirect + github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.14.1 // indirect github.com/gopherjs/gopherjs v1.17.2 // indirect @@ -91,26 +90,26 @@ require ( github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.12 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect - go.opentelemetry.io/otel v1.33.0 // indirect - go.opentelemetry.io/otel/metric v1.33.0 // indirect - go.opentelemetry.io/otel/trace v1.33.0 // indirect - golang.org/x/arch v0.12.0 // indirect - golang.org/x/crypto v0.31.0 // indirect - golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.24.0 // indirect - golang.org/x/sys v0.28.0 // indirect - golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.8.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241230172942-26aa7a208def // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241230172942-26aa7a208def // indirect - google.golang.org/grpc v1.69.2 // indirect - google.golang.org/protobuf v1.36.1 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect + golang.org/x/arch v0.14.0 // indirect + golang.org/x/crypto v0.33.0 // indirect + golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac // indirect + golang.org/x/net v0.35.0 // indirect + golang.org/x/oauth2 v0.26.0 // indirect + golang.org/x/sys v0.30.0 // indirect + golang.org/x/text v0.22.0 // indirect + golang.org/x/time v0.10.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250207221924-e9438ea467c6 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6 // indirect + google.golang.org/grpc v1.70.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - modernc.org/libc v1.61.6 // indirect + modernc.org/libc v1.61.12 // indirect modernc.org/mathutil v1.7.1 // indirect - modernc.org/memory v1.8.0 // indirect - modernc.org/sqlite v1.34.4 // indirect + modernc.org/memory v1.8.2 // indirect + modernc.org/sqlite v1.34.5 // indirect ) diff --git a/service/aiproxy/go.sum b/service/aiproxy/go.sum index d6474617571..e203358904b 100644 --- a/service/aiproxy/go.sum +++ b/service/aiproxy/go.sum @@ -1,41 +1,40 @@ -cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= -cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= -cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= -cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/auth v0.14.1 h1:AwoJbzUdxA/whv1qj3TLKwh3XX5sikny2fc40wUl+h0= +cloud.google.com/go/auth v0.14.1/go.mod h1:4JHUxlGXisL0AW8kXPtUF6ztuOksyfUQNFjfsOCXkPM= +cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M= +cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc= cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= -cloud.google.com/go/iam v1.3.0 h1:4Wo2qTaGKFtajbLpF6I4mywg900u3TLlHDb6mriLDPU= -cloud.google.com/go/iam v1.3.0/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= +cloud.google.com/go/iam v1.3.1 h1:KFf8SaT71yYq+sQtRISn90Gyhyf4X8RGgeAVC8XGf3E= +cloud.google.com/go/iam v1.3.1/go.mod h1:3wMtuyT4NcbnYNPLMBzYRFiEfjKfJlLVLrisE7bwm34= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/aws/aws-sdk-go-v2 v1.32.7 h1:ky5o35oENWi0JYWUZkB7WYvVPP+bcRF5/Iq7JWSb5Rw= -github.com/aws/aws-sdk-go-v2 v1.32.7/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc= -github.com/aws/aws-sdk-go-v2/credentials v1.17.48 h1:IYdLD1qTJ0zanRavulofmqut4afs45mOWEI+MzZtTfQ= -github.com/aws/aws-sdk-go-v2/credentials v1.17.48/go.mod h1:tOscxHN3CGmuX9idQ3+qbkzrjVIx32lqDSU1/0d/qXs= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26 h1:I/5wmGMffY4happ8NOCuIUEWGUvvFp5NSeQcXl9RHcI= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26/go.mod h1:FR8f4turZtNy6baO0KJ5FJUmXH/cSkI9fOngs0yl6mA= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26 h1:zXFLuEuMMUOvEARXFUVJdfqZ4bvvSgdGRq/ATcrQxzM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26/go.mod h1:3o2Wpy0bogG1kyOPrgkXA8pgIfEEv0+m19O9D5+W8y8= -github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.23.1 h1:rqrvjFScEwD7VfP4L0hhnrXyTkgUkpQWAdwOrW2slOo= -github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.23.1/go.mod h1:Vn5GopXsOAC6kbwzjfM6V37dxc4mo4J4xCRiF27pSZA= -github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= -github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/aws-sdk-go-v2 v1.36.1 h1:iTDl5U6oAhkNPba0e1t1hrwAo02ZMqbrGq4k5JBWM5E= +github.com/aws/aws-sdk-go-v2 v1.36.1/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg= +github.com/aws/aws-sdk-go-v2/credentials v1.17.59 h1:9btwmrt//Q6JcSdgJOLI98sdr5p7tssS9yAsGe8aKP4= +github.com/aws/aws-sdk-go-v2/credentials v1.17.59/go.mod h1:NM8fM6ovI3zak23UISdWidyZuI1ghNe2xjzUZAyT+08= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 h1:BjUcr3X3K0wZPGFg2bxOWW3VPN8rkE3/61zhP+IHviA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32/go.mod h1:80+OGC/bgzzFFTUmcuwD0lb4YutwQeKLFpmt6hoWapU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 h1:m1GeXHVMJsRsUAqG6HjZWx9dj7F5TR+cF1bjyfYyBd4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32/go.mod h1:IitoQxGfaKdVLNg0hD8/DXmAqNy0H4K2H2Sf91ti8sI= +github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.24.4 h1:NYHDOBe0ZIeQfaPSPRaQym2NePzA+QYM3O/Oh4IznKg= +github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.24.4/go.mod h1:AD+JAcEr9fNzFcfKs3CINKBdWGFK7R+/uZ+VdJRhK2U= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= -github.com/bytedance/sonic v1.12.6 h1:/isNmCUF2x3Sh8RAp/4mh4ZGkcFAX/hLrzrK3AvpRzk= -github.com/bytedance/sonic v1.12.6/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk= +github.com/bytedance/sonic v1.12.8 h1:4xYRVRlXIgvSZ4e8iVTlMF5szgpXd4AfvuWgA8I8lgs= +github.com/bytedance/sonic v1.12.8/go.mod h1:uVvFidNmlt9+wa31S1urfwwthTWteBgG0hWuoKAXTx8= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= -github.com/bytedance/sonic/loader v0.2.1 h1:1GgorWTqf12TA8mma4DDSbaQigE2wOgQo7iCjjJv3+E= -github.com/bytedance/sonic/loader v0.2.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= +github.com/bytedance/sonic/loader v0.2.3 h1:yctD0Q3v2NOGfSWPLPvG2ggA2kV6TS6s4wioyEqssH0= +github.com/bytedance/sonic/loader v0.2.3/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= -github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= -github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= +github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= +github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -48,14 +47,14 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/gabriel-vasile/mimetype v1.4.7 h1:SKFKl7kD0RiPdbht0s7hFtjl489WcQ1VyPW8ZzUMYCA= -github.com/gabriel-vasile/mimetype v1.4.7/go.mod h1:GDlAgAyIRT27BhFl53XNAFtfjzOkLaF35JdEG0P7LtU= +github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= +github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= github.com/gin-contrib/cors v1.7.3 h1:hV+a5xp8hwJoTw7OY+a70FsL8JkVVFTXw9EcfrYUdns= github.com/gin-contrib/cors v1.7.3/go.mod h1:M3bcKZhxzsvI+rlRSkkxHyljJt1ESd93COUvemZ79j4= -github.com/gin-contrib/gzip v1.1.0 h1:kVw7Nr9M+Z6Ch4qo7aGMbiqxDeyQFru+07MgAcUF62M= -github.com/gin-contrib/gzip v1.1.0/go.mod h1:iHJXCup4CWiKyPUEl+GwkHjchl+YyYuMKbOCiXujPIA= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-contrib/gzip v1.2.2 h1:iUU/EYCM8ENfkjmZaVrxbjF/ZC267Iqv5S0MMCMEliI= +github.com/gin-contrib/gzip v1.2.2/go.mod h1:C1a5cacjlDsS20cKnHlZRCPUu57D3qH6B2pV0rl+Y/s= +github.com/gin-contrib/sse v1.0.0 h1:y3bT1mUWUxDpW4JLQg/HnTqV4rozuW4tC9eFKTxYI9E= +github.com/gin-contrib/sse v1.0.0/go.mod h1:zNuFdwarAygJBht0NTKiSi3jRf6RbqeILZ9Sp6Slhe0= github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ= @@ -73,13 +72,13 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.23.0 h1:/PwmTwZhS0dPkav3cdK9kV1FsAmrL8sThn8IHr/sO+o= -github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-playground/validator/v10 v10.24.0 h1:KHQckvo8G6hlWnrPX4NJJ+aBfWNAE/HH+qdL2cBpCmg= +github.com/go-playground/validator/v10 v10.24.0/go.mod h1:GGzBIJMuE98Ic/kJsBXbz1x/7cByt++cQ+YOuDM5wus= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= -github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= -github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= @@ -89,8 +88,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo= github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= -github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= -github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= @@ -173,11 +172,13 @@ github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef/go.mod h1:nXTWP6+g github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= @@ -186,56 +187,56 @@ github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65E github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= -go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= -go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= -go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= -go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= -go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= -go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= -go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= -golang.org/x/arch v0.12.0 h1:UsYJhbzPYGsT0HbEdmYcqtCv8UNGvnaL561NnIUvaKg= -golang.org/x/arch v0.12.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo= -golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= -golang.org/x/image v0.23.0 h1:HseQ7c2OpPKTPVzNjG5fwJsOTCiiwS4QdsYi5XU6H68= -golang.org/x/image v0.23.0/go.mod h1:wJJBTdLfCCf3tiHa1fNxpZmUI4mmoZvwMCPP0ddoNKY= -golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= -golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= -golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= +go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +golang.org/x/arch v0.14.0 h1:z9JUEZWr8x4rR0OU6c4/4t6E6jOZ8/QBS2bBYBm4tx4= +golang.org/x/arch v0.14.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= +golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac h1:l5+whBCLH3iH2ZNHYLbAe58bo7yrN4mVcnkHDYz5vvs= +golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac/go.mod h1:hH+7mtFmImwwcMvScyxUhjuVHR3HGaDPMn9rMSUUbxo= +golang.org/x/image v0.24.0 h1:AN7zRgVsbvmTfNyqIbbOraYL8mSwcKncEj8ofjgzcMQ= +golang.org/x/image v0.24.0/go.mod h1:4b/ITuLfqYq1hqZcjofwctIhi7sZh2WaCjvsBNjjya8= +golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= +golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= +golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE= +golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= -golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= -google.golang.org/api v0.214.0 h1:h2Gkq07OYi6kusGOaT/9rnNljuXmqPnaig7WGPmKbwA= -google.golang.org/api v0.214.0/go.mod h1:bYPpLG8AyeMWwDU6NXoB00xC0DFkikVvd5MfwoxjLqE= -google.golang.org/genproto/googleapis/api v0.0.0-20241230172942-26aa7a208def h1:0Km0hi+g2KXbXL0+riZzSCKz23f4MmwicuEb00JeonI= -google.golang.org/genproto/googleapis/api v0.0.0-20241230172942-26aa7a208def/go.mod h1:u2DoMSpCXjrzqLdobRccQMc9wrnMAJ1DLng0a2yqM2Q= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241230172942-26aa7a208def h1:4P81qv5JXI/sDNae2ClVx88cgDDA6DPilADkG9tYKz8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241230172942-26aa7a208def/go.mod h1:bdAgzvd4kFrpykc5/AC2eLUiegK9T/qxZHD4hXYf/ho= -google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= -google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4= +golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= +golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +google.golang.org/api v0.220.0 h1:3oMI4gdBgB72WFVwE1nerDD8W3HUOS4kypK6rRLbGns= +google.golang.org/api v0.220.0/go.mod h1:26ZAlY6aN/8WgpCzjPNy18QpYaz7Zgg1h0qe1GkZEmY= +google.golang.org/genproto/googleapis/api v0.0.0-20250207221924-e9438ea467c6 h1:L9JNMl/plZH9wmzQUHleO/ZZDSN+9Gh41wPczNy+5Fk= +google.golang.org/genproto/googleapis/api v0.0.0-20250207221924-e9438ea467c6/go.mod h1:iYONQfRdizDB8JJBybql13nArx91jcUk7zCXEsOofM4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6 h1:2duwAxN2+k0xLNpjnHTXoMUgnv6VPSp5fiqTuwSxjmI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk= +google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= +google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -249,28 +250,28 @@ gorm.io/driver/postgres v1.5.11/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSk gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8= gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ= -modernc.org/cc/v4 v4.24.2 h1:uektamHbSXU7egelXcyVpMaaAsrRH4/+uMKUQAQUdOw= -modernc.org/cc/v4 v4.24.2/go.mod h1:T1lKJZhXIi2VSqGBiB4LIbKs9NsKTbUXj4IDrmGqtTI= -modernc.org/ccgo/v4 v4.23.5 h1:6uAwu8u3pnla3l/+UVUrDDO1HIGxHTYmFH6w+X9nsyw= -modernc.org/ccgo/v4 v4.23.5/go.mod h1:FogrWfBdzqLWm1ku6cfr4IzEFouq2fSAPf6aSAHdAJQ= +modernc.org/cc/v4 v4.24.4 h1:TFkx1s6dCkQpd6dKurBNmpo+G8Zl4Sq/ztJ+2+DEsh0= +modernc.org/cc/v4 v4.24.4/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= +modernc.org/ccgo/v4 v4.23.16 h1:Z2N+kk38b7SfySC1ZkpGLN2vthNJP1+ZzGZIlH7uBxo= +modernc.org/ccgo/v4 v4.23.16/go.mod h1:nNma8goMTY7aQZQNTyN9AIoJfxav4nvTnvKThAeMDdo= modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= -modernc.org/gc/v2 v2.6.0 h1:Tiw3pezQj7PfV8k4Dzyu/vhRHR2e92kOXtTFU8pbCl4= -modernc.org/gc/v2 v2.6.0/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU= -modernc.org/libc v1.61.6 h1:L2jW0wxHPCyHK0YSHaGaVlY0WxjpG/TTVdg6gRJOPqw= -modernc.org/libc v1.61.6/go.mod h1:G+DzuaCcReUYYg4nNSfigIfTDCENdj9EByglvaRx53A= +modernc.org/gc/v2 v2.6.3 h1:aJVhcqAte49LF+mGveZ5KPlsp4tdGdAOT4sipJXADjw= +modernc.org/gc/v2 v2.6.3/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= +modernc.org/libc v1.61.12 h1:Fsnh0A7XLXylYNwIOJmKux9PhnfrIvMaMnjuyJ1t/f4= +modernc.org/libc v1.61.12/go.mod h1:8F/uJWL/3nNil0Lgt1Dpz+GgkApWh04N3el3hxJcA6E= modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= -modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E= -modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU= -modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= -modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc= -modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss= -modernc.org/sqlite v1.34.4 h1:sjdARozcL5KJBvYQvLlZEmctRgW9xqIZc2ncN7PU0P8= -modernc.org/sqlite v1.34.4/go.mod h1:3QQFCG2SEMtc2nv+Wq4cQCH7Hjcg+p/RMlS1XK+zwbk= -modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= -modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= +modernc.org/memory v1.8.2 h1:cL9L4bcoAObu4NkxOlKWBWtNHIsnnACGF/TbqQ6sbcI= +modernc.org/memory v1.8.2/go.mod h1:ZbjSvMO5NQ1A2i3bWeDiVMxIorXwdClKE/0SZ+BMotU= +modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= +modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= +modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= +modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= +modernc.org/sqlite v1.34.5 h1:Bb6SR13/fjp15jt70CL4f18JIN7p7dnMExd+UFnF15g= +modernc.org/sqlite v1.34.5/go.mod h1:YLuNmX9NKs8wRNK2ko1LW1NGYcc9FkBO69JOt1AR9JE= +modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= +modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= From 257ca6ca9b1952f51d99bd9b21c09698c505b89f Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 11 Feb 2025 11:15:24 +0800 Subject: [PATCH 143/167] chore: bump go mod --- service/go.work.sum | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/service/go.work.sum b/service/go.work.sum index 792537d060c..05bf1d455c4 100644 --- a/service/go.work.sum +++ b/service/go.work.sum @@ -1,6 +1,7 @@ cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g= cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= cel.dev/expr v0.16.2/go.mod h1:gXngZQMkWJoSbE8mOzehJlXQyubn/Vg0vR9/F3W7iw8= +cel.dev/expr v0.19.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -693,6 +694,7 @@ github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.2/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.12.0-rc.0 h1:wX/F5huJxH9APBkhKSEAqaiZsuBvbbDnyBROZAqsSaY= @@ -787,7 +789,10 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bytedance/sonic v1.12.6/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk= github.com/bytedance/sonic/loader v0.2.0/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= +github.com/bytedance/sonic/loader v0.2.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= +github.com/bytedance/sonic/loader v0.2.2/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= @@ -925,6 +930,7 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/gabriel-vasile/mimetype v1.4.7/go.mod h1:GDlAgAyIRT27BhFl53XNAFtfjzOkLaF35JdEG0P7LtU= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0= @@ -945,6 +951,7 @@ github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNa github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= @@ -952,6 +959,7 @@ github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk= github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -965,6 +973,7 @@ github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.3/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -1497,6 +1506,7 @@ go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/detectors/gcp v1.31.0/go.mod h1:tzQL6E1l+iV44YFTkcAeNQqzXUiekSYP9jjJjXwEd00= +go.opentelemetry.io/contrib/detectors/gcp v1.32.0/go.mod h1:TVqo0Sda4Cv8gCIixd7LuLwW4EylumVWfhjZJjDD4DU= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= @@ -1510,6 +1520,7 @@ go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9 go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= @@ -1525,17 +1536,20 @@ go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOa go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= @@ -1553,6 +1567,7 @@ go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9i go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +golang.org/x/arch v0.12.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1565,6 +1580,8 @@ golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1655,6 +1672,8 @@ golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1678,6 +1697,7 @@ golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2 golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1689,6 +1709,7 @@ golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1743,6 +1764,8 @@ golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240208230135-b75ee8823808 h1:+Kc94D8UVEVxJnLXp/+FMfqQARZtWHfVrcRtcG8aT3g= golang.org/x/telemetry v0.0.0-20240208230135-b75ee8823808/go.mod h1:KG1lNk5ZFNssSZLrpVb4sMXKMpGwGXOxSG3rnu2gZQQ= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457 h1:zf5N6UOrA487eEFacMePxjXAJctxKmyjKUsjA11Uzuk= @@ -1751,6 +1774,8 @@ golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= +golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -1760,6 +1785,7 @@ golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1945,6 +1971,7 @@ google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 h1: google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0= google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= +google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a/go.mod h1:jehYqy3+AhJU9ve55aNOaSml7wUXjF9x6z2LcCfpAhY= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc h1:g3hIDl0jRNd9PPTs2uBzYuaD5mQuwOkZY0vSc0LR32o= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405 h1:o4S3HvTUEXgRsNSUQsALDVog0O9F/U1JJlHmmUN8Uas= @@ -1970,8 +1997,10 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/genproto/googleapis/rpc v0.0.0-20241206012308-a4fef0638583/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250124145028-65684f501c47/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -2022,6 +2051,9 @@ google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From b11b4c6e2c1397573ebb5e579cf19874c440a5b8 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 11 Feb 2025 15:38:43 +0800 Subject: [PATCH 144/167] feat: qwen open source vl models --- .../aiproxy/relay/adaptor/ali/constants.go | 56 +++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/service/aiproxy/relay/adaptor/ali/constants.go b/service/aiproxy/relay/adaptor/ali/constants.go index baca8474d9d..78c76a92536 100644 --- a/service/aiproxy/relay/adaptor/ali/constants.go +++ b/service/aiproxy/relay/adaptor/ali/constants.go @@ -379,6 +379,48 @@ var ModelList = []*model.ModelConfig{ model.WithModelConfigToolChoice(true), ), }, + { + Model: "qwen2.5-vl-72b-instruct", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerAlibaba, + InputPrice: 0.016, + OutputPrice: 0.048, + RPM: 60, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(129024), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigVision(true), + ), + }, + { + Model: "qwen2.5-vl-7b-instruct", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerAlibaba, + InputPrice: 0.002, + OutputPrice: 0.005, + RPM: 1200, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(129024), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigVision(true), + ), + }, + { + Model: "qwen2.5-vl-3b-instruct", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerAlibaba, + InputPrice: 0.0012, + OutputPrice: 0.0036, + RPM: 1200, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(131072), + model.WithModelConfigMaxInputTokens(129024), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigVision(true), + ), + }, // 通义千问2 { @@ -423,6 +465,20 @@ var ModelList = []*model.ModelConfig{ model.WithModelConfigToolChoice(true), ), }, + { + Model: "qwen2-vl-72b-instruct", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerAlibaba, + InputPrice: 0.016, + OutputPrice: 0.048, + RPM: 60, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(32768), + model.WithModelConfigMaxInputTokens(30720), + model.WithModelConfigMaxOutputTokens(2048), + model.WithModelConfigVision(true), + ), + }, // 通义千问1.5 { From 9f10f790547772bf2b0ab5e87557b71da61e7298 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 12 Feb 2025 11:26:08 +0800 Subject: [PATCH 145/167] fix: qwen2.5 vl tool choice --- service/aiproxy/relay/adaptor/ali/constants.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/service/aiproxy/relay/adaptor/ali/constants.go b/service/aiproxy/relay/adaptor/ali/constants.go index 78c76a92536..8d96969e1e4 100644 --- a/service/aiproxy/relay/adaptor/ali/constants.go +++ b/service/aiproxy/relay/adaptor/ali/constants.go @@ -391,6 +391,7 @@ var ModelList = []*model.ModelConfig{ model.WithModelConfigMaxInputTokens(129024), model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigVision(true), + model.WithModelConfigToolChoice(true), ), }, { @@ -405,6 +406,7 @@ var ModelList = []*model.ModelConfig{ model.WithModelConfigMaxInputTokens(129024), model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigVision(true), + model.WithModelConfigToolChoice(true), ), }, { @@ -419,6 +421,7 @@ var ModelList = []*model.ModelConfig{ model.WithModelConfigMaxInputTokens(129024), model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigVision(true), + model.WithModelConfigToolChoice(true), ), }, From 6e9c51b101c50e54681903d12d58c57594980525 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 12 Feb 2025 17:05:29 +0800 Subject: [PATCH 146/167] feat: stt audio duration --- service/aiproxy/Dockerfile | 16 +++- service/aiproxy/common/audio/audio.go | 76 ++++++++++++++++++ service/aiproxy/common/config/config.go | 1 + .../aiproxy/relay/adaptor/ali/stt-realtime.go | 25 ++---- service/aiproxy/relay/controller/stt.go | 78 +++++++++++++++++++ 5 files changed, 174 insertions(+), 22 deletions(-) create mode 100644 service/aiproxy/common/audio/audio.go diff --git a/service/aiproxy/Dockerfile b/service/aiproxy/Dockerfile index f078274204e..8602bc0814b 100644 --- a/service/aiproxy/Dockerfile +++ b/service/aiproxy/Dockerfile @@ -1,7 +1,15 @@ -FROM gcr.io/distroless/static:nonroot +FROM alpine:latest + ARG TARGETARCH -COPY bin/service-aiproxy-$TARGETARCH /manager +COPY bin/service-aiproxy-$TARGETARCH /aiproxy + +ENV PUID=0 PGID=0 UMASK=022 + +ENV FFPROBE_ENABLED=true + EXPOSE 3000 -USER 65532:65532 -ENTRYPOINT ["/manager"] \ No newline at end of file +RUN apk add --no-cache ca-certificates tzdata ffmpeg && \ + rm -rf /var/cache/apk/* + +ENTRYPOINT ["/aiproxy"] diff --git a/service/aiproxy/common/audio/audio.go b/service/aiproxy/common/audio/audio.go new file mode 100644 index 00000000000..d7b2d416dee --- /dev/null +++ b/service/aiproxy/common/audio/audio.go @@ -0,0 +1,76 @@ +package audio + +import ( + "errors" + "io" + "os/exec" + "strconv" + "strings" + + "github.com/labring/sealos/service/aiproxy/common/config" +) + +var ErrAudioDurationNAN = errors.New("audio duration is N/A") + +func GetAudioDuration(audio io.Reader) (float64, error) { + if !config.FfprobeEnabled { + return 0, nil + } + + ffprobeCmd := exec.Command( + "ffprobe", + "-v", "error", + "-select_streams", "a:0", + "-show_entries", "stream=duration", + "-of", "default=noprint_wrappers=1:nokey=1", + "-i", "-", + ) + ffprobeCmd.Stdin = audio + output, err := ffprobeCmd.Output() + if err != nil { + return 0, err + } + + str := strings.TrimSpace(string(output)) + + if str == "" || str == "N/A" { + return 0, ErrAudioDurationNAN + } + + duration, err := strconv.ParseFloat(str, 64) + if err != nil { + return 0, err + } + return duration, nil +} + +func GetAudioDurationFromFilePath(filePath string) (float64, error) { + if !config.FfprobeEnabled { + return 0, nil + } + + ffprobeCmd := exec.Command( + "ffprobe", + "-v", "error", + "-select_streams", "a:0", + "-show_entries", "format=duration", + "-of", "default=noprint_wrappers=1:nokey=1", + "-i", filePath, + ) + output, err := ffprobeCmd.Output() + if err != nil { + return 0, err + } + + str := strings.TrimSpace(string(output)) + + if str == "" || str == "N/A" { + return 0, ErrAudioDurationNAN + } + + duration, err := strconv.ParseFloat(str, 64) + if err != nil { + return 0, err + } + return duration, nil +} diff --git a/service/aiproxy/common/config/config.go b/service/aiproxy/common/config/config.go index 4716d8f6919..1e53220c2e4 100644 --- a/service/aiproxy/common/config/config.go +++ b/service/aiproxy/common/config/config.go @@ -18,6 +18,7 @@ var ( DisableAutoMigrateDB = env.Bool("DISABLE_AUTO_MIGRATE_DB", false) OnlyOneLogFile = env.Bool("ONLY_ONE_LOG_FILE", false) AdminKey = os.Getenv("ADMIN_KEY") + FfprobeEnabled = env.Bool("FFPROBE_ENABLED", false) ) var ( diff --git a/service/aiproxy/relay/adaptor/ali/stt-realtime.go b/service/aiproxy/relay/adaptor/ali/stt-realtime.go index 46ca83892ae..4c735c1621d 100644 --- a/service/aiproxy/relay/adaptor/ali/stt-realtime.go +++ b/service/aiproxy/relay/adaptor/ali/stt-realtime.go @@ -2,7 +2,6 @@ package ali import ( "bytes" - "errors" "io" "net/http" @@ -64,24 +63,14 @@ func ConvertSTTRequest(meta *meta.Meta, request *http.Request) (string, http.Hea if err != nil { return "", nil, nil, err } - - var audioData []byte - if files, ok := request.MultipartForm.File["file"]; !ok { - return "", nil, nil, errors.New("audio file is required") - } else if len(files) == 1 { - file, err := files[0].Open() - if err != nil { - return "", nil, nil, err - } - audioData, err = io.ReadAll(file) - file.Close() - if err != nil { - return "", nil, nil, err - } - } else { - return "", nil, nil, errors.New("audio file is required") + audioFile, _, err := request.FormFile("file") + if err != nil { + return "", nil, nil, err + } + audioData, err := io.ReadAll(audioFile) + if err != nil { + return "", nil, nil, err } - sttRequest := STTMessage{ Header: STTHeader{ Action: "run-task", diff --git a/service/aiproxy/relay/controller/stt.go b/service/aiproxy/relay/controller/stt.go index 6b570d4547c..c30d9dc0384 100644 --- a/service/aiproxy/relay/controller/stt.go +++ b/service/aiproxy/relay/controller/stt.go @@ -1,9 +1,15 @@ package controller import ( + "errors" "fmt" + "math" + "mime/multipart" + "os" "github.com/gin-gonic/gin" + "github.com/labring/sealos/service/aiproxy/common/audio" + "github.com/labring/sealos/service/aiproxy/middleware" "github.com/labring/sealos/service/aiproxy/relay/meta" relaymodel "github.com/labring/sealos/service/aiproxy/relay/model" ) @@ -15,9 +21,81 @@ func RelaySTTHelper(meta *meta.Meta, c *gin.Context) *relaymodel.ErrorWithStatus return nil, fmt.Errorf("model price not found: %s", meta.OriginModel) } + audioFile, err := c.FormFile("file") + if err != nil { + return nil, fmt.Errorf("failed to get audio file: %w", err) + } + + duration, err := getAudioDuration(audioFile) + if err != nil { + return nil, err + } + + durationInt := int(math.Ceil(duration)) + log := middleware.GetLogger(c) + log.Data["duration"] = durationInt + return &PreCheckGroupBalanceReq{ + InputTokens: durationInt, InputPrice: price, OutputPrice: completionPrice, }, nil }) } + +func getAudioDuration(audioFile *multipart.FileHeader) (float64, error) { + // Try to get duration directly from audio data + audioData, err := audioFile.Open() + if err != nil { + return 0, fmt.Errorf("failed to open audio file: %w", err) + } + defer audioData.Close() + + // If it's already an os.File, use file path method + if osFile, ok := audioData.(*os.File); ok { + duration, err := audio.GetAudioDurationFromFilePath(osFile.Name()) + if err != nil { + return 0, fmt.Errorf("failed to get audio duration from temp file: %w", err) + } + return duration, nil + } + + // Try to get duration from audio data + duration, err := audio.GetAudioDuration(audioData) + if err == nil { + return duration, nil + } + + // If duration is NaN, create temp file and try again + if errors.Is(err, audio.ErrAudioDurationNAN) { + return getDurationFromTempFile(audioFile) + } + + return 0, fmt.Errorf("failed to get audio duration: %w", err) +} + +func getDurationFromTempFile(audioFile *multipart.FileHeader) (float64, error) { + tempFile, err := os.CreateTemp("", "audio.wav") + if err != nil { + return 0, fmt.Errorf("failed to create temp file: %w", err) + } + defer os.Remove(tempFile.Name()) + defer tempFile.Close() + + newAudioData, err := audioFile.Open() + if err != nil { + return 0, fmt.Errorf("failed to open audio file: %w", err) + } + defer newAudioData.Close() + + if _, err = tempFile.ReadFrom(newAudioData); err != nil { + return 0, fmt.Errorf("failed to read from temp file: %w", err) + } + + duration, err := audio.GetAudioDurationFromFilePath(tempFile.Name()) + if err != nil { + return 0, fmt.Errorf("failed to get audio duration from temp file: %w", err) + } + + return duration, nil +} From 7568984b0903887f18192222903e88afc7f5b2fc Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 12 Feb 2025 17:35:47 +0800 Subject: [PATCH 147/167] feat: ali paraformer price --- service/aiproxy/relay/adaptor/ali/constants.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/service/aiproxy/relay/adaptor/ali/constants.go b/service/aiproxy/relay/adaptor/ali/constants.go index 8d96969e1e4..f36ca21415c 100644 --- a/service/aiproxy/relay/adaptor/ali/constants.go +++ b/service/aiproxy/relay/adaptor/ali/constants.go @@ -810,10 +810,11 @@ var ModelList = []*model.ModelConfig{ }, { - Model: "paraformer-realtime-v2", - Type: relaymode.AudioTranscription, - Owner: model.ModelOwnerAlibaba, - RPM: 20, + Model: "paraformer-realtime-v2", + Type: relaymode.AudioTranscription, + Owner: model.ModelOwnerAlibaba, + RPM: 20, + InputPrice: 0.24, Config: model.NewModelConfig( model.WithModelConfigMaxInputTokens(10000), model.WithModelConfigSupportFormats([]string{"pcm", "wav", "opus", "speex", "aac", "amr"}), From 68b482d4941c032cc5368d32bca3633c4fa094c7 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 13 Feb 2025 10:56:16 +0800 Subject: [PATCH 148/167] fix: stt usage --- service/aiproxy/relay/adaptor/openai/stt.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/service/aiproxy/relay/adaptor/openai/stt.go b/service/aiproxy/relay/adaptor/openai/stt.go index dcd158fb333..42ffb783650 100644 --- a/service/aiproxy/relay/adaptor/openai/stt.go +++ b/service/aiproxy/relay/adaptor/openai/stt.go @@ -113,7 +113,12 @@ func STTHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Us if err != nil { return nil, ErrorWrapper(err, "get_text_from_body_err", http.StatusInternalServerError) } - completionTokens := CountTokenText(text, meta.ActualModel) + var promptTokens int + if meta.InputTokens > 0 { + promptTokens = meta.InputTokens + } else { + promptTokens = CountTokenText(text, meta.ActualModel) + } for k, v := range resp.Header { c.Writer.Header().Set(k, v[0]) @@ -124,9 +129,9 @@ func STTHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Us } return &model.Usage{ - PromptTokens: 0, - CompletionTokens: completionTokens, - TotalTokens: completionTokens, + PromptTokens: promptTokens, + CompletionTokens: 0, + TotalTokens: promptTokens, }, nil } From 12fcba64bcefec99c6f53e7f3fb3058938799fc6 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 13 Feb 2025 20:11:23 +0800 Subject: [PATCH 149/167] feat: qwen mt --- service/aiproxy/common/balance/balance.go | 8 ++++-- service/aiproxy/common/balance/mock.go | 8 ++++-- service/aiproxy/common/balance/sealos.go | 5 ++-- service/aiproxy/controller/channel-billing.go | 2 +- service/aiproxy/middleware/distributor.go | 2 +- service/aiproxy/relay/adaptor/ali/adaptor.go | 24 ++++++++++------- .../aiproxy/relay/adaptor/ali/constants.go | 27 +++++++++++++++++++ service/aiproxy/relay/controller/consume.go | 2 +- 8 files changed, 60 insertions(+), 18 deletions(-) diff --git a/service/aiproxy/common/balance/balance.go b/service/aiproxy/common/balance/balance.go index b1eac62231a..a1cce170953 100644 --- a/service/aiproxy/common/balance/balance.go +++ b/service/aiproxy/common/balance/balance.go @@ -1,9 +1,13 @@ package balance -import "context" +import ( + "context" + + "github.com/labring/sealos/service/aiproxy/model" +) type GroupBalance interface { - GetGroupRemainBalance(ctx context.Context, group string) (float64, PostGroupConsumer, error) + GetGroupRemainBalance(ctx context.Context, group model.GroupCache) (float64, PostGroupConsumer, error) } type PostGroupConsumer interface { diff --git a/service/aiproxy/common/balance/mock.go b/service/aiproxy/common/balance/mock.go index 8cb2ddc7e86..f38c9d257a4 100644 --- a/service/aiproxy/common/balance/mock.go +++ b/service/aiproxy/common/balance/mock.go @@ -1,6 +1,10 @@ package balance -import "context" +import ( + "context" + + "github.com/labring/sealos/service/aiproxy/model" +) var _ GroupBalance = (*MockGroupBalance)(nil) @@ -14,7 +18,7 @@ func NewMockGroupBalance() *MockGroupBalance { return &MockGroupBalance{} } -func (q *MockGroupBalance) GetGroupRemainBalance(_ context.Context, _ string) (float64, PostGroupConsumer, error) { +func (q *MockGroupBalance) GetGroupRemainBalance(_ context.Context, _ model.GroupCache) (float64, PostGroupConsumer, error) { return mockBalance, q, nil } diff --git a/service/aiproxy/common/balance/sealos.go b/service/aiproxy/common/balance/sealos.go index 872f69fb995..c49894cf300 100644 --- a/service/aiproxy/common/balance/sealos.go +++ b/service/aiproxy/common/balance/sealos.go @@ -14,6 +14,7 @@ import ( "github.com/labring/sealos/service/aiproxy/common" "github.com/labring/sealos/service/aiproxy/common/conv" "github.com/labring/sealos/service/aiproxy/common/env" + "github.com/labring/sealos/service/aiproxy/model" "github.com/redis/go-redis/v9" "github.com/shopspring/decimal" log "github.com/sirupsen/logrus" @@ -145,10 +146,10 @@ func cacheDecreaseGroupBalance(ctx context.Context, group string, amount int64) return decreaseGroupBalanceScript.Run(ctx, common.RDB, []string{fmt.Sprintf(sealosGroupBalanceKey, group)}, amount).Err() } -func (s *Sealos) GetGroupRemainBalance(ctx context.Context, group string) (float64, PostGroupConsumer, error) { +func (s *Sealos) GetGroupRemainBalance(ctx context.Context, group model.GroupCache) (float64, PostGroupConsumer, error) { var errs []error for i := 0; ; i++ { - balance, consumer, err := s.getGroupRemainBalance(ctx, group) + balance, consumer, err := s.getGroupRemainBalance(ctx, group.ID) if err == nil { return balance, consumer, nil } diff --git a/service/aiproxy/controller/channel-billing.go b/service/aiproxy/controller/channel-billing.go index f569b60f1cf..00f6c34a4e5 100644 --- a/service/aiproxy/controller/channel-billing.go +++ b/service/aiproxy/controller/channel-billing.go @@ -104,7 +104,7 @@ func AutomaticallyUpdateChannels(frequency int) { // subscription func GetSubscription(c *gin.Context) { group := middleware.GetGroup(c) - b, _, err := balance.Default.GetGroupRemainBalance(c, group.ID) + b, _, err := balance.Default.GetGroupRemainBalance(c, *group) if err != nil { log.Errorf("get group (%s) balance failed: %s", group.ID, err) c.JSON(http.StatusOK, middleware.APIResponse{ diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 3d522b35874..0134d0fd14c 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -125,7 +125,7 @@ type GroupBalanceConsumer struct { func checkGroupBalance(c *gin.Context, group *model.GroupCache) bool { log := GetLogger(c) - groupBalance, consumer, err := balance.Default.GetGroupRemainBalance(c.Request.Context(), group.ID) + groupBalance, consumer, err := balance.Default.GetGroupRemainBalance(c.Request.Context(), *group) if err != nil { log.Errorf("get group (%s) balance error: %v", group.ID, err) abortWithMessage(c, http.StatusInternalServerError, "get group balance error") diff --git a/service/aiproxy/relay/adaptor/ali/adaptor.go b/service/aiproxy/relay/adaptor/ali/adaptor.go index ce050f2a349..7ac0f4c9a62 100644 --- a/service/aiproxy/relay/adaptor/ali/adaptor.go +++ b/service/aiproxy/relay/adaptor/ali/adaptor.go @@ -73,19 +73,25 @@ func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, ht } } +func ignoreTest(meta *meta.Meta) bool { + return meta.IsChannelTest && + (strings.Contains(meta.ActualModel, "-ocr") || + strings.HasPrefix(meta.ActualModel, "qwen-mt-")) +} + func (a *Adaptor) DoRequest(meta *meta.Meta, _ *gin.Context, req *http.Request) (*http.Response, error) { + if ignoreTest(meta) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(bytes.NewReader(nil)), + }, nil + } switch meta.Mode { case relaymode.AudioSpeech: return TTSDoRequest(meta, req) case relaymode.AudioTranscription: return STTDoRequest(meta, req) case relaymode.ChatCompletions: - if meta.IsChannelTest && strings.Contains(meta.ActualModel, "-ocr") { - return &http.Response{ - StatusCode: http.StatusOK, - Body: io.NopCloser(bytes.NewReader(nil)), - }, nil - } fallthrough default: return utils.DoRequest(req) @@ -93,15 +99,15 @@ func (a *Adaptor) DoRequest(meta *meta.Meta, _ *gin.Context, req *http.Request) } func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode) { + if ignoreTest(meta) { + return &relaymodel.Usage{}, nil + } switch meta.Mode { case relaymode.Embeddings: usage, err = EmbeddingsHandler(meta, c, resp) case relaymode.ImagesGenerations: usage, err = ImageHandler(meta, c, resp) case relaymode.ChatCompletions: - if meta.IsChannelTest && strings.Contains(meta.ActualModel, "-ocr") { - return nil, nil - } usage, err = openai.DoResponse(meta, c, resp) case relaymode.Rerank: usage, err = RerankHandler(meta, c, resp) diff --git a/service/aiproxy/relay/adaptor/ali/constants.go b/service/aiproxy/relay/adaptor/ali/constants.go index f36ca21415c..d68f360e18c 100644 --- a/service/aiproxy/relay/adaptor/ali/constants.go +++ b/service/aiproxy/relay/adaptor/ali/constants.go @@ -728,6 +728,33 @@ var ModelList = []*model.ModelConfig{ ), }, + { + Model: "qwen-mt-plus", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerAlibaba, + InputPrice: 0.015, + OutputPrice: 0.045, + RPM: 60, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(2048), + model.WithModelConfigMaxInputTokens(1024), + model.WithModelConfigMaxOutputTokens(1024), + ), + }, + { + Model: "qwen-mt-turbo", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerAlibaba, + InputPrice: 0.001, + OutputPrice: 0.003, + RPM: 60, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(2048), + model.WithModelConfigMaxInputTokens(1024), + model.WithModelConfigMaxOutputTokens(1024), + ), + }, + // stable-diffusion { Model: "stable-diffusion-xl", diff --git a/service/aiproxy/relay/controller/consume.go b/service/aiproxy/relay/controller/consume.go index ce1714cb77a..f2bfb86ced4 100644 --- a/service/aiproxy/relay/controller/consume.go +++ b/service/aiproxy/relay/controller/consume.go @@ -49,7 +49,7 @@ func getGroupBalance(ctx *gin.Context, meta *meta.Meta) (float64, balance.PostGr groupBalance, ok := ctx.Get(ctxkey.GroupBalance) if !ok { - return balance.Default.GetGroupRemainBalance(ctx.Request.Context(), meta.Group.ID) + return balance.Default.GetGroupRemainBalance(ctx.Request.Context(), *meta.Group) } groupBalanceConsumer := groupBalance.(*middleware.GroupBalanceConsumer) From 942bb2546a14ac551ab2a01894bd3a35bf83369b Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 14 Feb 2025 14:12:32 +0800 Subject: [PATCH 150/167] fix: render when split skip --- service/aiproxy/relay/adaptor/openai/main.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/service/aiproxy/relay/adaptor/openai/main.go b/service/aiproxy/relay/adaptor/openai/main.go index 96118730202..7d81d9f754f 100644 --- a/service/aiproxy/relay/adaptor/openai/main.go +++ b/service/aiproxy/relay/adaptor/openai/main.go @@ -136,17 +136,24 @@ func StreamSplitThink(data map[string]any, thinkSplitter *splitter.Splitter, ren for _, choice := range choices { choiceMap, ok := choice.(map[string]any) if !ok { + renderCallback(data) continue } delta, ok := choiceMap["delta"].(map[string]any) if !ok { + renderCallback(data) continue } content, ok := delta["content"].(string) if !ok { + renderCallback(data) continue } think, remaining := thinkSplitter.Process(conv.StringToBytes(content)) + if len(think) == 0 && len(remaining) == 0 { + renderCallback(data) + continue + } if len(think) > 0 { delta["content"] = "" delta["reasoning_content"] = conv.BytesToString(think) From ee088149a1979ea3fe053d94fb81521a70f4ca46 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Fri, 14 Feb 2025 18:06:14 +0800 Subject: [PATCH 151/167] feat: sealos realname check --- service/aiproxy/common/balance/balance.go | 1 - service/aiproxy/common/balance/mock.go | 4 - service/aiproxy/common/balance/sealos.go | 117 +++++++++++++++--- service/aiproxy/controller/channel-billing.go | 10 +- service/aiproxy/middleware/distributor.go | 4 + 5 files changed, 112 insertions(+), 24 deletions(-) diff --git a/service/aiproxy/common/balance/balance.go b/service/aiproxy/common/balance/balance.go index a1cce170953..faf7172638e 100644 --- a/service/aiproxy/common/balance/balance.go +++ b/service/aiproxy/common/balance/balance.go @@ -12,7 +12,6 @@ type GroupBalance interface { type PostGroupConsumer interface { PostGroupConsume(ctx context.Context, tokenName string, usage float64) (float64, error) - GetBalance(ctx context.Context) (float64, error) } var Default GroupBalance = NewMockGroupBalance() diff --git a/service/aiproxy/common/balance/mock.go b/service/aiproxy/common/balance/mock.go index f38c9d257a4..fe94e2247ca 100644 --- a/service/aiproxy/common/balance/mock.go +++ b/service/aiproxy/common/balance/mock.go @@ -25,7 +25,3 @@ func (q *MockGroupBalance) GetGroupRemainBalance(_ context.Context, _ model.Grou func (q *MockGroupBalance) PostGroupConsume(_ context.Context, _ string, usage float64) (float64, error) { return usage, nil } - -func (q *MockGroupBalance) GetBalance(_ context.Context) (float64, error) { - return mockBalance, nil -} diff --git a/service/aiproxy/common/balance/sealos.go b/service/aiproxy/common/balance/sealos.go index c49894cf300..9f7f2f316c7 100644 --- a/service/aiproxy/common/balance/sealos.go +++ b/service/aiproxy/common/balance/sealos.go @@ -26,6 +26,7 @@ const ( appType = "LLM-TOKEN" sealosRequester = "sealos-admin" sealosGroupBalanceKey = "sealos:balance:%s" + sealosUserRealNameKey = "sealos:realName:%s" getBalanceRetry = 3 ) @@ -39,6 +40,11 @@ var ( sealosCacheExpire = 3 * time.Minute ) +var ( + sealosCheckRealNameEnable = env.Bool("BALANCE_SEALOS_CHECK_REAL_NAME_ENABLE", false) + sealosNoRealNameUsedAmountLimit = env.Float64("BALANCE_SEALOS_NO_REAL_NAME_USED_AMOUNT_LIMIT", 1) +) + type Sealos struct { accountURL string } @@ -146,12 +152,20 @@ func cacheDecreaseGroupBalance(ctx context.Context, group string, amount int64) return decreaseGroupBalanceScript.Run(ctx, common.RDB, []string{fmt.Sprintf(sealosGroupBalanceKey, group)}, amount).Err() } +var ErrRealNameUsedAmountLimit = errors.New("real name used amount limit reached") + func (s *Sealos) GetGroupRemainBalance(ctx context.Context, group model.GroupCache) (float64, PostGroupConsumer, error) { var errs []error for i := 0; ; i++ { - balance, consumer, err := s.getGroupRemainBalance(ctx, group.ID) + balance, userUID, err := s.getGroupRemainBalance(ctx, group.ID) if err == nil { - return balance, consumer, nil + if sealosCheckRealNameEnable && + !s.checkRealName(ctx, userUID) && + group.UsedAmount > sealosNoRealNameUsedAmountLimit { + return 0, nil, ErrRealNameUsedAmountLimit + } + return decimal.NewFromInt(balance).Div(decimalBalancePrecision).InexactFloat64(), + newSealosPostGroupConsumer(s.accountURL, group.ID, userUID), nil } errs = append(errs, err) if i == getBalanceRetry-1 { @@ -161,26 +175,105 @@ func (s *Sealos) GetGroupRemainBalance(ctx context.Context, group model.GroupCac } } +func cacheGetUserRealName(ctx context.Context, userUID string) (bool, error) { + if !common.RedisEnabled || !sealosRedisCacheEnable { + return true, redis.Nil + } + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + realName, err := common.RDB.Get(ctx, fmt.Sprintf(sealosUserRealNameKey, userUID)).Bool() + if err != nil { + return false, err + } + return realName, nil +} + +func cacheSetUserRealName(ctx context.Context, userUID string, realName bool) error { + if !common.RedisEnabled || !sealosRedisCacheEnable { + return nil + } + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + var expireTime time.Duration + if realName { + expireTime = time.Hour * 12 + } else { + expireTime = time.Minute * 1 + } + return common.RDB.Set(ctx, fmt.Sprintf(sealosUserRealNameKey, userUID), realName, expireTime).Err() +} + +func (s *Sealos) checkRealName(ctx context.Context, userUID string) bool { + if cache, err := cacheGetUserRealName(ctx, userUID); err == nil { + return cache + } else if err != nil && !errors.Is(err, redis.Nil) { + log.Errorf("get user (%s) real name cache failed: %s", userUID, err) + } + + realName, err := s.fetchRealNameFromAPI(ctx, userUID) + if err != nil { + log.Errorf("fetch user (%s) real name failed: %s", userUID, err) + return true + } + + if err := cacheSetUserRealName(ctx, userUID, realName); err != nil { + log.Errorf("set user (%s) real name cache failed: %s", userUID, err) + } + + return realName +} + +type sealosGetRealNameInfoResp struct { + IsRealName bool `json:"isRealName"` + Error string `json:"error"` +} + +func (s *Sealos) fetchRealNameFromAPI(ctx context.Context, userUID string) (bool, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + req, err := http.NewRequestWithContext(ctx, http.MethodGet, + fmt.Sprintf("%s/admin/v1alpha1/real-name-info?userUID=%s", s.accountURL, userUID), nil) + if err != nil { + return false, err + } + + req.Header.Set("Authorization", "Bearer "+jwtToken) + resp, err := sealosHTTPClient.Do(req) + if err != nil { + return false, err + } + defer resp.Body.Close() + + var sealosResp sealosGetRealNameInfoResp + if err := json.NewDecoder(resp.Body).Decode(&sealosResp); err != nil { + return false, err + } + + if resp.StatusCode != http.StatusOK || sealosResp.Error != "" { + return false, fmt.Errorf("get user (%s) real name failed with status code %d, error: %s", userUID, resp.StatusCode, sealosResp.Error) + } + + return sealosResp.IsRealName, nil +} + // GroupBalance interface implementation -func (s *Sealos) getGroupRemainBalance(ctx context.Context, group string) (float64, PostGroupConsumer, error) { +func (s *Sealos) getGroupRemainBalance(ctx context.Context, group string) (int64, string, error) { if cache, err := cacheGetGroupBalance(ctx, group); err == nil && cache.UserUID != "" { - return decimal.NewFromInt(cache.Balance).Div(decimalBalancePrecision).InexactFloat64(), - newSealosPostGroupConsumer(s.accountURL, group, cache.UserUID, cache.Balance), nil + return cache.Balance, cache.UserUID, nil } else if err != nil && !errors.Is(err, redis.Nil) { log.Errorf("get group (%s) balance cache failed: %s", group, err) } balance, userUID, err := s.fetchBalanceFromAPI(ctx, group) if err != nil { - return 0, nil, err + return 0, "", err } if err := cacheSetGroupBalance(ctx, group, balance, userUID); err != nil { log.Errorf("set group (%s) balance cache failed: %s", group, err) } - return decimal.NewFromInt(balance).Div(decimalBalancePrecision).InexactFloat64(), - newSealosPostGroupConsumer(s.accountURL, group, userUID, balance), nil + return balance, userUID, nil } func (s *Sealos) fetchBalanceFromAPI(ctx context.Context, group string) (balance int64, userUID string, err error) { @@ -219,22 +312,16 @@ type SealosPostGroupConsumer struct { accountURL string group string uid string - balance int64 } -func newSealosPostGroupConsumer(accountURL, group, uid string, balance int64) *SealosPostGroupConsumer { +func newSealosPostGroupConsumer(accountURL, group, uid string) *SealosPostGroupConsumer { return &SealosPostGroupConsumer{ accountURL: accountURL, group: group, uid: uid, - balance: balance, } } -func (s *SealosPostGroupConsumer) GetBalance(_ context.Context) (float64, error) { - return decimal.NewFromInt(s.balance).Div(decimalBalancePrecision).InexactFloat64(), nil -} - func (s *SealosPostGroupConsumer) PostGroupConsume(ctx context.Context, tokenName string, usage float64) (float64, error) { amount := s.calculateAmount(usage) diff --git a/service/aiproxy/controller/channel-billing.go b/service/aiproxy/controller/channel-billing.go index 00f6c34a4e5..7879b5c10ea 100644 --- a/service/aiproxy/controller/channel-billing.go +++ b/service/aiproxy/controller/channel-billing.go @@ -1,6 +1,7 @@ package controller import ( + "errors" "fmt" "net/http" "strconv" @@ -106,11 +107,12 @@ func GetSubscription(c *gin.Context) { group := middleware.GetGroup(c) b, _, err := balance.Default.GetGroupRemainBalance(c, *group) if err != nil { + if errors.Is(err, balance.ErrRealNameUsedAmountLimit) { + middleware.ErrorResponse(c, http.StatusForbidden, err.Error()) + return + } log.Errorf("get group (%s) balance failed: %s", group.ID, err) - c.JSON(http.StatusOK, middleware.APIResponse{ - Success: false, - Message: fmt.Sprintf("get group (%s) balance failed", group.ID), - }) + middleware.ErrorResponse(c, http.StatusInternalServerError, fmt.Sprintf("get group (%s) balance failed", group.ID)) return } token := middleware.GetToken(c) diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 0134d0fd14c..5679c7901bd 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -127,6 +127,10 @@ func checkGroupBalance(c *gin.Context, group *model.GroupCache) bool { log := GetLogger(c) groupBalance, consumer, err := balance.Default.GetGroupRemainBalance(c.Request.Context(), *group) if err != nil { + if errors.Is(err, balance.ErrRealNameUsedAmountLimit) { + abortLogWithMessage(c, http.StatusForbidden, balance.ErrRealNameUsedAmountLimit.Error()) + return false + } log.Errorf("get group (%s) balance error: %v", group.ID, err) abortWithMessage(c, http.StatusInternalServerError, "get group balance error") return false From 2fa905078e4d4e6fd034f34b0167bddede4c1c73 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 17 Feb 2025 14:41:03 +0800 Subject: [PATCH 152/167] feat: gemini usage support --- service/aiproxy/middleware/utils.go | 2 +- .../aiproxy/relay/adaptor/gemini/constants.go | 73 ++++++---- service/aiproxy/relay/adaptor/gemini/main.go | 125 ++++++++---------- service/aiproxy/relay/utils/error.go | 11 +- 4 files changed, 114 insertions(+), 97 deletions(-) diff --git a/service/aiproxy/middleware/utils.go b/service/aiproxy/middleware/utils.go index 778e54d2b67..0d37992adec 100644 --- a/service/aiproxy/middleware/utils.go +++ b/service/aiproxy/middleware/utils.go @@ -12,7 +12,7 @@ const ( ) func MessageWithRequestID(message string, id string) string { - return fmt.Sprintf("%s (request id: %s)", message, id) + return fmt.Sprintf("%s (aiproxy: %s)", message, id) } func abortLogWithMessage(c *gin.Context, statusCode int, message string) { diff --git a/service/aiproxy/relay/adaptor/gemini/constants.go b/service/aiproxy/relay/adaptor/gemini/constants.go index 5e89798b914..896ebef15be 100644 --- a/service/aiproxy/relay/adaptor/gemini/constants.go +++ b/service/aiproxy/relay/adaptor/gemini/constants.go @@ -6,43 +6,70 @@ import ( ) // https://ai.google.dev/models/gemini - +// https://ai.google.dev/gemini-api/docs/pricing var ModelList = []*model.ModelConfig{ { - Model: "gemini-pro", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerGoogle, + Model: "gemini-1.5-pro", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerGoogle, + InputPrice: 0.0025, + OutputPrice: 0.01, + RPM: 120, }, { - Model: "gemini-1.5-flash", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerGoogle, + Model: "gemini-1.5-flash", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerGoogle, + InputPrice: 0.00015, + OutputPrice: 0.0006, + RPM: 120, }, { - Model: "gemini-1.5-pro", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerGoogle, + Model: "gemini-1.5-flash-8b", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerGoogle, + InputPrice: 0.000075, + OutputPrice: 0.0003, + RPM: 120, }, { - Model: "gemini-2.0-flash-exp", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerGoogle, + Model: "gemini-2.0-flash", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerGoogle, + InputPrice: 0.0001, + OutputPrice: 0.0004, + RPM: 120, }, { - Model: "gemini-2.0-flash-thinking-exp", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerGoogle, + Model: "gemini-2.0-flash-lite-preview", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerGoogle, + InputPrice: 0.000075, + OutputPrice: 0.0003, + RPM: 120, + }, + { + Model: "gemini-2.0-flash-thinking-exp", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerGoogle, + InputPrice: 0.0001, + OutputPrice: 0.0004, + RPM: 120, }, - { - Model: "text-embedding-004", - Type: relaymode.Embeddings, - Owner: model.ModelOwnerGoogle, + Model: "gemini-2.0-pro-exp", + Type: relaymode.ChatCompletions, + Owner: model.ModelOwnerGoogle, + InputPrice: 0.0025, + OutputPrice: 0.01, + RPM: 120, }, { - Model: "aqa", - Type: relaymode.ChatCompletions, - Owner: model.ModelOwnerGoogle, + Model: "text-embedding-004", + Type: relaymode.Embeddings, + Owner: model.ModelOwnerGoogle, + InputPrice: 0.0001, + RPM: 300, }, } diff --git a/service/aiproxy/relay/adaptor/gemini/main.go b/service/aiproxy/relay/adaptor/gemini/main.go index e155055b391..eaadafca63c 100644 --- a/service/aiproxy/relay/adaptor/gemini/main.go +++ b/service/aiproxy/relay/adaptor/gemini/main.go @@ -4,7 +4,6 @@ import ( "bufio" "bytes" "context" - "fmt" "io" "net/http" "strings" @@ -198,13 +197,6 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io return "", nil, nil, err } - tokenCount, err := CountTokens(req.Context(), meta, contents) - if err != nil { - log.Error("count tokens failed: " + err.Error()) - } else { - meta.InputTokens = tokenCount - } - // Build actual request geminiRequest := ChatRequest{ Contents: contents, @@ -223,40 +215,48 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io return http.MethodPost, nil, bytes.NewReader(data), nil } -func CountTokens(ctx context.Context, meta *meta.Meta, chat []*ChatContent) (int, error) { - countReq := ChatRequest{ - Contents: chat, - } - countData, err := json.Marshal(countReq) - if err != nil { - return 0, err - } - req, err := http.NewRequestWithContext(ctx, http.MethodPost, getRequestURL(meta, "countTokens"), bytes.NewReader(countData)) - if err != nil { - return 0, err - } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("X-Goog-Api-Key", meta.Channel.Key) - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - var tokenCount CountTokensResponse - if err := json.NewDecoder(resp.Body).Decode(&tokenCount); err != nil { - return 0, err - } - if tokenCount.Error != nil { - return 0, fmt.Errorf("count tokens error: %s, code: %d, status: %s", tokenCount.Error.Message, tokenCount.Error.Code, resp.Status) - } - return tokenCount.TotalTokens, nil -} +// func CountTokens(ctx context.Context, meta *meta.Meta, chat []*ChatContent) (int, error) { +// countReq := ChatRequest{ +// Contents: chat, +// } +// countData, err := json.Marshal(countReq) +// if err != nil { +// return 0, err +// } +// req, err := http.NewRequestWithContext(ctx, http.MethodPost, getRequestURL(meta, "countTokens"), bytes.NewReader(countData)) +// if err != nil { +// return 0, err +// } +// req.Header.Set("Content-Type", "application/json") +// req.Header.Set("X-Goog-Api-Key", meta.Channel.Key) + +// resp, err := http.DefaultClient.Do(req) +// if err != nil { +// return 0, err +// } +// defer resp.Body.Close() + +// var tokenCount CountTokensResponse +// if err := json.NewDecoder(resp.Body).Decode(&tokenCount); err != nil { +// return 0, err +// } +// if tokenCount.Error != nil { +// return 0, fmt.Errorf("count tokens error: %s, code: %d, status: %s", tokenCount.Error.Message, tokenCount.Error.Code, resp.Status) +// } +// return tokenCount.TotalTokens, nil +// } type ChatResponse struct { Candidates []*ChatCandidate `json:"candidates"` PromptFeedback ChatPromptFeedback `json:"promptFeedback"` + UsageMetadata *UsageMetadata `json:"usageMetadata"` + ModelVersion string `json:"modelVersion"` +} + +type UsageMetadata struct { + PromptTokenCount int `json:"promptTokenCount"` + CandidatesTokenCount int `json:"candidatesTokenCount"` + TotalTokenCount int `json:"totalTokenCount"` } func (g *ChatResponse) GetResponseText() string { @@ -361,6 +361,13 @@ func streamResponseGeminiChat2OpenAI(meta *meta.Meta, geminiResponse *ChatRespon Object: "chat.completion.chunk", Choices: make([]*openai.ChatCompletionsStreamResponseChoice, 0, len(geminiResponse.Candidates)), } + if geminiResponse.UsageMetadata != nil { + response.Usage = &model.Usage{ + PromptTokens: geminiResponse.UsageMetadata.PromptTokenCount, + CompletionTokens: geminiResponse.UsageMetadata.CandidatesTokenCount, + TotalTokens: geminiResponse.UsageMetadata.TotalTokenCount, + } + } for i, candidate := range geminiResponse.Candidates { choice := openai.ChatCompletionsStreamResponseChoice{ Index: i, @@ -393,12 +400,15 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model log := middleware.GetLogger(c) responseText := strings.Builder{} - respContent := []*ChatContent{} scanner := bufio.NewScanner(resp.Body) scanner.Split(bufio.ScanLines) common.SetEventStreamHeaders(c) + usage := model.Usage{ + PromptTokens: meta.InputTokens, + } + for scanner.Scan() { data := scanner.Bytes() if len(data) < 6 || conv.BytesToString(data[:6]) != "data: " { @@ -416,12 +426,9 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model log.Error("error unmarshalling stream response: " + err.Error()) continue } - for _, candidate := range geminiResponse.Candidates { - respContent = append(respContent, &candidate.Content) - } response := streamResponseGeminiChat2OpenAI(meta, &geminiResponse) - if response == nil { - continue + if response.Usage != nil { + usage = *response.Usage } responseText.WriteString(response.Choices[0].Delta.StringContent()) @@ -435,26 +442,12 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model render.Done(c) - usage := model.Usage{ - PromptTokens: meta.InputTokens, - } - - tokenCount, err := CountTokens(c.Request.Context(), meta, respContent) - if err != nil { - log.Error("count tokens failed: " + err.Error()) - usage.CompletionTokens = openai.CountTokenText(responseText.String(), meta.ActualModel) - } else { - usage.CompletionTokens = tokenCount - } - usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens return &usage, nil } func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode) { defer resp.Body.Close() - log := middleware.GetLogger(c) - var geminiResponse ChatResponse err := json.NewDecoder(resp.Body).Decode(&geminiResponse) if err != nil { @@ -465,22 +458,12 @@ func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage } fullTextResponse := responseGeminiChat2OpenAI(meta, &geminiResponse) fullTextResponse.Model = meta.OriginModel - respContent := []*ChatContent{} - for _, candidate := range geminiResponse.Candidates { - respContent = append(respContent, &candidate.Content) - } usage := model.Usage{ - PromptTokens: meta.InputTokens, - } - tokenCount, err := CountTokens(c.Request.Context(), meta, respContent) - if err != nil { - log.Error("count tokens failed: " + err.Error()) - usage.CompletionTokens = openai.CountTokenText(geminiResponse.GetResponseText(), meta.ActualModel) - } else { - usage.CompletionTokens = tokenCount + PromptTokens: geminiResponse.UsageMetadata.PromptTokenCount, + CompletionTokens: geminiResponse.UsageMetadata.CandidatesTokenCount, + TotalTokens: geminiResponse.UsageMetadata.TotalTokenCount, } - usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens fullTextResponse.Usage = usage jsonResponse, err := json.Marshal(fullTextResponse) if err != nil { diff --git a/service/aiproxy/relay/utils/error.go b/service/aiproxy/relay/utils/error.go index 5f41c330e94..0843cdf0f9e 100644 --- a/service/aiproxy/relay/utils/error.go +++ b/service/aiproxy/relay/utils/error.go @@ -99,8 +99,15 @@ func RelayDefaultErrorHanlder(resp *http.Response) *model.ErrorWithStatusCode { var errResponse GeneralErrorResponse err = json.Unmarshal(respBody, &errResponse) if err != nil { - ErrorWithStatusCode.Error.Message = conv.BytesToString(respBody) - return ErrorWithStatusCode + var errsResp []GeneralErrorResponse + err = json.Unmarshal(respBody, &errsResp) + if err != nil { + ErrorWithStatusCode.Error.Message = conv.BytesToString(respBody) + return ErrorWithStatusCode + } + if len(errsResp) > 0 { + errResponse = errsResp[0] + } } if errResponse.Error.Message != "" { From 32d96180501d48546002e2c200fed9f95a874b66 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 17 Feb 2025 15:33:10 +0800 Subject: [PATCH 153/167] fix: lint --- service/aiproxy/common/balance/sealos.go | 6 +++--- service/aiproxy/relay/adaptor/gemini/constants.go | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/service/aiproxy/common/balance/sealos.go b/service/aiproxy/common/balance/sealos.go index 9f7f2f316c7..e8d4ba7319a 100644 --- a/service/aiproxy/common/balance/sealos.go +++ b/service/aiproxy/common/balance/sealos.go @@ -160,8 +160,8 @@ func (s *Sealos) GetGroupRemainBalance(ctx context.Context, group model.GroupCac balance, userUID, err := s.getGroupRemainBalance(ctx, group.ID) if err == nil { if sealosCheckRealNameEnable && - !s.checkRealName(ctx, userUID) && - group.UsedAmount > sealosNoRealNameUsedAmountLimit { + group.UsedAmount > sealosNoRealNameUsedAmountLimit && + !s.checkRealName(ctx, userUID) { return 0, nil, ErrRealNameUsedAmountLimit } return decimal.NewFromInt(balance).Div(decimalBalancePrecision).InexactFloat64(), @@ -206,7 +206,7 @@ func cacheSetUserRealName(ctx context.Context, userUID string, realName bool) er func (s *Sealos) checkRealName(ctx context.Context, userUID string) bool { if cache, err := cacheGetUserRealName(ctx, userUID); err == nil { return cache - } else if err != nil && !errors.Is(err, redis.Nil) { + } else if !errors.Is(err, redis.Nil) { log.Errorf("get user (%s) real name cache failed: %s", userUID, err) } diff --git a/service/aiproxy/relay/adaptor/gemini/constants.go b/service/aiproxy/relay/adaptor/gemini/constants.go index 896ebef15be..9de5e7c7563 100644 --- a/service/aiproxy/relay/adaptor/gemini/constants.go +++ b/service/aiproxy/relay/adaptor/gemini/constants.go @@ -7,6 +7,7 @@ import ( // https://ai.google.dev/models/gemini // https://ai.google.dev/gemini-api/docs/pricing + var ModelList = []*model.ModelConfig{ { Model: "gemini-1.5-pro", From d5263ca42c20d2b34b49cbf3096e9e390aeafe9c Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 17 Feb 2025 16:04:53 +0800 Subject: [PATCH 154/167] fix: error message --- service/aiproxy/common/balance/sealos.go | 4 ++-- service/aiproxy/controller/channel-billing.go | 2 +- service/aiproxy/middleware/distributor.go | 8 ++++---- service/aiproxy/relay/controller/handle.go | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/service/aiproxy/common/balance/sealos.go b/service/aiproxy/common/balance/sealos.go index e8d4ba7319a..163d60f743b 100644 --- a/service/aiproxy/common/balance/sealos.go +++ b/service/aiproxy/common/balance/sealos.go @@ -152,7 +152,7 @@ func cacheDecreaseGroupBalance(ctx context.Context, group string, amount int64) return decreaseGroupBalanceScript.Run(ctx, common.RDB, []string{fmt.Sprintf(sealosGroupBalanceKey, group)}, amount).Err() } -var ErrRealNameUsedAmountLimit = errors.New("real name used amount limit reached") +var ErrNoRealNameUsedAmountLimit = errors.New("no real name used amount limit reached") func (s *Sealos) GetGroupRemainBalance(ctx context.Context, group model.GroupCache) (float64, PostGroupConsumer, error) { var errs []error @@ -162,7 +162,7 @@ func (s *Sealos) GetGroupRemainBalance(ctx context.Context, group model.GroupCac if sealosCheckRealNameEnable && group.UsedAmount > sealosNoRealNameUsedAmountLimit && !s.checkRealName(ctx, userUID) { - return 0, nil, ErrRealNameUsedAmountLimit + return 0, nil, ErrNoRealNameUsedAmountLimit } return decimal.NewFromInt(balance).Div(decimalBalancePrecision).InexactFloat64(), newSealosPostGroupConsumer(s.accountURL, group.ID, userUID), nil diff --git a/service/aiproxy/controller/channel-billing.go b/service/aiproxy/controller/channel-billing.go index 7879b5c10ea..cb6cf9d0504 100644 --- a/service/aiproxy/controller/channel-billing.go +++ b/service/aiproxy/controller/channel-billing.go @@ -107,7 +107,7 @@ func GetSubscription(c *gin.Context) { group := middleware.GetGroup(c) b, _, err := balance.Default.GetGroupRemainBalance(c, *group) if err != nil { - if errors.Is(err, balance.ErrRealNameUsedAmountLimit) { + if errors.Is(err, balance.ErrNoRealNameUsedAmountLimit) { middleware.ErrorResponse(c, http.StatusForbidden, err.Error()) return } diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index 5679c7901bd..b5ebcd5f6ed 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -127,18 +127,18 @@ func checkGroupBalance(c *gin.Context, group *model.GroupCache) bool { log := GetLogger(c) groupBalance, consumer, err := balance.Default.GetGroupRemainBalance(c.Request.Context(), *group) if err != nil { - if errors.Is(err, balance.ErrRealNameUsedAmountLimit) { - abortLogWithMessage(c, http.StatusForbidden, balance.ErrRealNameUsedAmountLimit.Error()) + if errors.Is(err, balance.ErrNoRealNameUsedAmountLimit) { + abortLogWithMessage(c, http.StatusForbidden, balance.ErrNoRealNameUsedAmountLimit.Error()) return false } log.Errorf("get group (%s) balance error: %v", group.ID, err) - abortWithMessage(c, http.StatusInternalServerError, "get group balance error") + abortWithMessage(c, http.StatusInternalServerError, fmt.Sprintf("get group (%s) balance error", group.ID)) return false } log.Data["balance"] = strconv.FormatFloat(groupBalance, 'f', -1, 64) if groupBalance <= 0 { - abortLogWithMessage(c, http.StatusForbidden, "group balance not enough") + abortLogWithMessage(c, http.StatusForbidden, fmt.Sprintf("group (%s) balance not enough", group.ID)) return false } c.Set(ctxkey.GroupBalance, &GroupBalanceConsumer{ diff --git a/service/aiproxy/relay/controller/handle.go b/service/aiproxy/relay/controller/handle.go index 1b732ad22f2..94f9b806b3d 100644 --- a/service/aiproxy/relay/controller/handle.go +++ b/service/aiproxy/relay/controller/handle.go @@ -52,7 +52,7 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa } if !meta.IsChannelTest && groupRemainBalance <= 0 { - return openai.ErrorWrapperWithMessage("group balance not enough", "insufficient_group_balance", http.StatusForbidden) + return openai.ErrorWrapperWithMessage(fmt.Sprintf("group (%s) balance not enough", meta.Group.ID), "insufficient_group_balance", http.StatusForbidden) } // 3. Pre-process request @@ -80,7 +80,7 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa // 4. Pre-check balance ok = checkGroupBalance(preCheckReq, meta, groupRemainBalance) if !ok { - return openai.ErrorWrapper(errors.New("group balance is not enough"), "insufficient_group_balance", http.StatusForbidden) + return openai.ErrorWrapper(errors.New(fmt.Sprintf("group (%s) balance is not enough", meta.Group.ID)), "insufficient_group_balance", http.StatusForbidden) } meta.InputTokens = preCheckReq.InputTokens From 5019b036b06892baa7590cdd8fd14e2d8ba28fff Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 17 Feb 2025 16:08:29 +0800 Subject: [PATCH 155/167] fix: lint --- service/aiproxy/relay/controller/handle.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/service/aiproxy/relay/controller/handle.go b/service/aiproxy/relay/controller/handle.go index 94f9b806b3d..aae7f7ad17b 100644 --- a/service/aiproxy/relay/controller/handle.go +++ b/service/aiproxy/relay/controller/handle.go @@ -1,7 +1,6 @@ package controller import ( - "errors" "fmt" "net/http" "strconv" @@ -80,7 +79,7 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa // 4. Pre-check balance ok = checkGroupBalance(preCheckReq, meta, groupRemainBalance) if !ok { - return openai.ErrorWrapper(errors.New(fmt.Sprintf("group (%s) balance is not enough", meta.Group.ID)), "insufficient_group_balance", http.StatusForbidden) + return openai.ErrorWrapperWithMessage(fmt.Sprintf("group (%s) balance is not enough", meta.Group.ID), "insufficient_group_balance", http.StatusForbidden) } meta.InputTokens = preCheckReq.InputTokens From 6325e0cfbfd6248afbe2ad18a0b89be0f4633b51 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Mon, 17 Feb 2025 22:23:11 +0800 Subject: [PATCH 156/167] fix: search token --- service/aiproxy/model/token.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/service/aiproxy/model/token.go b/service/aiproxy/model/token.go index 1312741d92b..b49fb3505d6 100644 --- a/service/aiproxy/model/token.go +++ b/service/aiproxy/model/token.go @@ -131,15 +131,15 @@ func SearchTokens(group string, keyword string, startIdx int, num int, order str if group == "" { if common.UsingPostgreSQL { - conditions = append(conditions, "name ILIKE ?") + conditions = append(conditions, "group_id ILIKE ?") } else { - conditions = append(conditions, "name LIKE ?") + conditions = append(conditions, "group_id LIKE ?") } values = append(values, "%"+keyword+"%") } if status == 0 { conditions = append(conditions, "status = ?") - values = append(values, 1) + values = append(values, String2Int(keyword)) } if name == "" { if common.UsingPostgreSQL { @@ -155,7 +155,7 @@ func SearchTokens(group string, keyword string, startIdx int, num int, order str } else { conditions = append(conditions, "key LIKE ?") } - values = append(values, keyword) + values = append(values, keyword+"%") } if len(conditions) > 0 { tx = tx.Where(fmt.Sprintf("(%s)", strings.Join(conditions, " OR ")), values...) From 7d5d014ceef659f95fc8d929c6bbf50a855dc786 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 18 Feb 2025 10:07:24 +0800 Subject: [PATCH 157/167] fix: no real name limit han message --- service/aiproxy/common/balance/sealos.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/service/aiproxy/common/balance/sealos.go b/service/aiproxy/common/balance/sealos.go index 163d60f743b..f7c8a386d65 100644 --- a/service/aiproxy/common/balance/sealos.go +++ b/service/aiproxy/common/balance/sealos.go @@ -152,7 +152,7 @@ func cacheDecreaseGroupBalance(ctx context.Context, group string, amount int64) return decreaseGroupBalanceScript.Run(ctx, common.RDB, []string{fmt.Sprintf(sealosGroupBalanceKey, group)}, amount).Err() } -var ErrNoRealNameUsedAmountLimit = errors.New("no real name used amount limit reached") +var ErrNoRealNameUsedAmountLimit = errors.New("达到未实名用户使用额度限制,请实名认证") func (s *Sealos) GetGroupRemainBalance(ctx context.Context, group model.GroupCache) (float64, PostGroupConsumer, error) { var errs []error From ee404548a286430f08eb3bdfbe91b5bbdb23591f Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 18 Feb 2025 14:18:06 +0800 Subject: [PATCH 158/167] feat: gemini model config --- service/aiproxy/controller/dashboard.go | 3 +- service/aiproxy/middleware/auth.go | 2 +- .../aiproxy/relay/adaptor/gemini/constants.go | 61 ++++++++++++++++--- 3 files changed, 56 insertions(+), 10 deletions(-) diff --git a/service/aiproxy/controller/dashboard.go b/service/aiproxy/controller/dashboard.go index 31a1aeed67d..87ede4ebe65 100644 --- a/service/aiproxy/controller/dashboard.go +++ b/service/aiproxy/controller/dashboard.go @@ -1,6 +1,7 @@ package controller import ( + "fmt" "net/http" "strconv" "time" @@ -198,7 +199,7 @@ func GetGroupDashboardModels(c *gin.Context) { } groupCache, err := model.CacheGetGroup(group) if err != nil { - middleware.ErrorResponse(c, http.StatusOK, "failed to get group") + middleware.ErrorResponse(c, http.StatusOK, fmt.Sprintf("failed to get group: %v", err)) return } diff --git a/service/aiproxy/middleware/auth.go b/service/aiproxy/middleware/auth.go index 710e4fe6818..53c19082d7c 100644 --- a/service/aiproxy/middleware/auth.go +++ b/service/aiproxy/middleware/auth.go @@ -103,7 +103,7 @@ func TokenAuth(c *gin.Context) { var err error group, err = model.CacheGetGroup(token.Group) if err != nil { - abortLogWithMessage(c, http.StatusInternalServerError, err.Error()) + abortLogWithMessage(c, http.StatusInternalServerError, fmt.Sprintf("failed to get group: %v", err)) return } if group.Status != model.GroupStatusEnabled { diff --git a/service/aiproxy/relay/adaptor/gemini/constants.go b/service/aiproxy/relay/adaptor/gemini/constants.go index 9de5e7c7563..bdf2eab28db 100644 --- a/service/aiproxy/relay/adaptor/gemini/constants.go +++ b/service/aiproxy/relay/adaptor/gemini/constants.go @@ -15,7 +15,13 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerGoogle, InputPrice: 0.0025, OutputPrice: 0.01, - RPM: 120, + RPM: 600, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(2097152), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + model.WithModelConfigVision(true), + ), }, { Model: "gemini-1.5-flash", @@ -23,7 +29,13 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerGoogle, InputPrice: 0.00015, OutputPrice: 0.0006, - RPM: 120, + RPM: 600, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(1048576), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + model.WithModelConfigVision(true), + ), }, { Model: "gemini-1.5-flash-8b", @@ -31,7 +43,13 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerGoogle, InputPrice: 0.000075, OutputPrice: 0.0003, - RPM: 120, + RPM: 600, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(1048576), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + model.WithModelConfigVision(true), + ), }, { Model: "gemini-2.0-flash", @@ -39,7 +57,13 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerGoogle, InputPrice: 0.0001, OutputPrice: 0.0004, - RPM: 120, + RPM: 600, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(1048576), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + model.WithModelConfigVision(true), + ), }, { Model: "gemini-2.0-flash-lite-preview", @@ -47,7 +71,13 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerGoogle, InputPrice: 0.000075, OutputPrice: 0.0003, - RPM: 120, + RPM: 600, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(1048576), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + model.WithModelConfigVision(true), + ), }, { Model: "gemini-2.0-flash-thinking-exp", @@ -55,7 +85,12 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerGoogle, InputPrice: 0.0001, OutputPrice: 0.0004, - RPM: 120, + RPM: 600, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(1048576), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigVision(true), + ), }, { Model: "gemini-2.0-pro-exp", @@ -63,7 +98,13 @@ var ModelList = []*model.ModelConfig{ Owner: model.ModelOwnerGoogle, InputPrice: 0.0025, OutputPrice: 0.01, - RPM: 120, + RPM: 600, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(2097152), + model.WithModelConfigMaxOutputTokens(8192), + model.WithModelConfigToolChoice(true), + model.WithModelConfigVision(true), + ), }, { @@ -71,6 +112,10 @@ var ModelList = []*model.ModelConfig{ Type: relaymode.Embeddings, Owner: model.ModelOwnerGoogle, InputPrice: 0.0001, - RPM: 300, + RPM: 1500, + Config: model.NewModelConfig( + model.WithModelConfigMaxContextTokens(2048), + model.WithModelConfigMaxOutputTokens(768), + ), }, } From b02d23a4c325c25ce47dc1f717a57bac93be10f5 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 18 Feb 2025 14:22:37 +0800 Subject: [PATCH 159/167] fix: get group error hans message --- service/aiproxy/model/group.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/service/aiproxy/model/group.go b/service/aiproxy/model/group.go index 5f5e343e56c..9f1e63ec0d6 100644 --- a/service/aiproxy/model/group.go +++ b/service/aiproxy/model/group.go @@ -76,7 +76,7 @@ func GetGroups(startIdx int, num int, order string, onlyDisabled bool) (groups [ func GetGroupByID(id string) (*Group, error) { if id == "" { - return nil, errors.New("id 为空!") + return nil, errors.New("group id is empty") } group := Group{ID: id} err := DB.First(&group, "id = ?", id).Error @@ -85,7 +85,7 @@ func GetGroupByID(id string) (*Group, error) { func DeleteGroupByID(id string) (err error) { if id == "" { - return errors.New("id 为空!") + return errors.New("group id is empty") } defer func() { if err == nil { From b4bc1a5ce286292bd6273ca413cac773fc6b47c2 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 18 Feb 2025 14:28:59 +0800 Subject: [PATCH 160/167] fix: get group dashboard models --- service/aiproxy/controller/dashboard.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/service/aiproxy/controller/dashboard.go b/service/aiproxy/controller/dashboard.go index 87ede4ebe65..54c95ad7914 100644 --- a/service/aiproxy/controller/dashboard.go +++ b/service/aiproxy/controller/dashboard.go @@ -1,6 +1,7 @@ package controller import ( + "errors" "fmt" "net/http" "strconv" @@ -199,7 +200,11 @@ func GetGroupDashboardModels(c *gin.Context) { } groupCache, err := model.CacheGetGroup(group) if err != nil { - middleware.ErrorResponse(c, http.StatusOK, fmt.Sprintf("failed to get group: %v", err)) + if errors.Is(err, model.NotFoundError(model.ErrGroupNotFound)) { + middleware.SuccessResponse(c, model.LoadModelCaches().EnabledModelConfigs) + } else { + middleware.ErrorResponse(c, http.StatusOK, fmt.Sprintf("failed to get group: %v", err)) + } return } From 5434c4741066fa2d21ebfaf441e7d914546a28af Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 18 Feb 2025 16:12:57 +0800 Subject: [PATCH 161/167] feat: channel and token model search --- service/aiproxy/model/channel.go | 3 +++ service/aiproxy/model/token.go | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/service/aiproxy/model/channel.go b/service/aiproxy/model/channel.go index 0e04c1d3b0d..4ff72811c39 100644 --- a/service/aiproxy/model/channel.go +++ b/service/aiproxy/model/channel.go @@ -238,6 +238,9 @@ func SearchChannels(keyword string, startIdx int, num int, id int, name string, values = append(values, "%"+keyword+"%") } + conditions = append(conditions, "models LIKE ?") + values = append(values, "%"+keyword+"%") + if len(conditions) > 0 { tx = tx.Where(fmt.Sprintf("(%s)", strings.Join(conditions, " OR ")), values...) } diff --git a/service/aiproxy/model/token.go b/service/aiproxy/model/token.go index b49fb3505d6..31a6b950ff7 100644 --- a/service/aiproxy/model/token.go +++ b/service/aiproxy/model/token.go @@ -157,6 +157,10 @@ func SearchTokens(group string, keyword string, startIdx int, num int, order str } values = append(values, keyword+"%") } + + conditions = append(conditions, "models LIKE ?") + values = append(values, "%"+keyword+"%") + if len(conditions) > 0 { tx = tx.Where(fmt.Sprintf("(%s)", strings.Join(conditions, " OR ")), values...) } From 0a9453b9fc0c37d643f6b391df8a79fa72bad608 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Tue, 18 Feb 2025 17:49:01 +0800 Subject: [PATCH 162/167] feat: support ali completions --- service/aiproxy/common/config/config.go | 9 ++------- service/aiproxy/model/option.go | 3 --- service/aiproxy/relay/adaptor/ali/adaptor.go | 6 ++++-- service/aiproxy/relay/adaptor/openai/adaptor.go | 4 ++-- service/aiproxy/relay/adaptor/openai/main.go | 3 +++ service/aiproxy/relay/adaptor/openai/model.go | 1 + 6 files changed, 12 insertions(+), 14 deletions(-) diff --git a/service/aiproxy/common/config/config.go b/service/aiproxy/common/config/config.go index 1e53220c2e4..4ec33efa157 100644 --- a/service/aiproxy/common/config/config.go +++ b/service/aiproxy/common/config/config.go @@ -32,7 +32,7 @@ var ( enableModelErrorAutoBan atomic.Bool modelErrorAutoBanRate = math.Float64bits(0.5) timeoutWithModelType atomic.Value - disableModelConfig atomic.Bool + disableModelConfig = env.Bool("DISABLE_MODEL_CONFIG", false) ) var ( @@ -57,12 +57,7 @@ func init() { } func GetDisableModelConfig() bool { - return disableModelConfig.Load() -} - -func SetDisableModelConfig(disabled bool) { - disabled = env.Bool("DISABLE_MODEL_CONFIG", disabled) - disableModelConfig.Store(disabled) + return disableModelConfig } func GetRetryTimes() int64 { diff --git a/service/aiproxy/model/option.go b/service/aiproxy/model/option.go index f3aa912c900..f74fa3ea019 100644 --- a/service/aiproxy/model/option.go +++ b/service/aiproxy/model/option.go @@ -60,7 +60,6 @@ func initOptionMap() error { optionMap["DisableServe"] = strconv.FormatBool(config.GetDisableServe()) optionMap["BillingEnabled"] = strconv.FormatBool(config.GetBillingEnabled()) optionMap["RetryTimes"] = strconv.FormatInt(config.GetRetryTimes(), 10) - optionMap["DisableModelConfig"] = strconv.FormatBool(config.GetDisableModelConfig()) optionMap["ModelErrorAutoBanRate"] = strconv.FormatFloat(config.GetModelErrorAutoBanRate(), 'f', -1, 64) optionMap["EnableModelErrorAutoBan"] = strconv.FormatBool(config.GetEnableModelErrorAutoBan()) timeoutWithModelTypeJSON, err := json.Marshal(config.GetTimeoutWithModelType()) @@ -187,8 +186,6 @@ func updateOption(key string, value string, isInit bool) (err error) { switch key { case "InternalToken": config.SetInternalToken(value) - case "DisableModelConfig": - config.SetDisableModelConfig(isTrue(value)) case "LogDetailStorageHours": logDetailStorageHours, err := strconv.ParseInt(value, 10, 64) if err != nil { diff --git a/service/aiproxy/relay/adaptor/ali/adaptor.go b/service/aiproxy/relay/adaptor/ali/adaptor.go index 7ac0f4c9a62..ab48abb942e 100644 --- a/service/aiproxy/relay/adaptor/ali/adaptor.go +++ b/service/aiproxy/relay/adaptor/ali/adaptor.go @@ -38,6 +38,8 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) { return u + "/api/v1/services/aigc/text2image/image-synthesis", nil case relaymode.ChatCompletions: return u + "/compatible-mode/v1/chat/completions", nil + case relaymode.Completions: + return u + "/compatible-mode/v1/completions", nil case relaymode.AudioSpeech, relaymode.AudioTranscription: return u + "/api-ws/v1/inference", nil case relaymode.Rerank: @@ -62,7 +64,7 @@ func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, ht return ConvertRerankRequest(meta, req) case relaymode.Embeddings: return ConvertEmbeddingsRequest(meta, req) - case relaymode.ChatCompletions: + case relaymode.ChatCompletions, relaymode.Completions: return openai.ConvertRequest(meta, req) case relaymode.AudioSpeech: return ConvertTTSRequest(meta, req) @@ -107,7 +109,7 @@ func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Respons usage, err = EmbeddingsHandler(meta, c, resp) case relaymode.ImagesGenerations: usage, err = ImageHandler(meta, c, resp) - case relaymode.ChatCompletions: + case relaymode.ChatCompletions, relaymode.Completions: usage, err = openai.DoResponse(meta, c, resp) case relaymode.Rerank: usage, err = RerankHandler(meta, c, resp) diff --git a/service/aiproxy/relay/adaptor/openai/adaptor.go b/service/aiproxy/relay/adaptor/openai/adaptor.go index e549ec28041..6288124a932 100644 --- a/service/aiproxy/relay/adaptor/openai/adaptor.go +++ b/service/aiproxy/relay/adaptor/openai/adaptor.go @@ -76,7 +76,7 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io case relaymode.Moderations: meta.Set(MetaEmbeddingsPatchInputToSlices, true) return ConvertEmbeddingsRequest(meta, req) - case relaymode.Embeddings: + case relaymode.Embeddings, relaymode.Completions: return ConvertEmbeddingsRequest(meta, req) case relaymode.ChatCompletions: return ConvertTextRequest(meta, req, meta.GetBool(DoNotPatchStreamOptionsIncludeUsageMetaKey)) @@ -105,7 +105,7 @@ func DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *re usage, err = RerankHandler(meta, c, resp) case relaymode.Moderations: usage, err = ModerationsHandler(meta, c, resp) - case relaymode.Embeddings: + case relaymode.Embeddings, relaymode.Completions: fallthrough case relaymode.ChatCompletions: if utils.IsStreamResponse(resp) { diff --git a/service/aiproxy/relay/adaptor/openai/main.go b/service/aiproxy/relay/adaptor/openai/main.go index 7d81d9f754f..ab9229d5597 100644 --- a/service/aiproxy/relay/adaptor/openai/main.go +++ b/service/aiproxy/relay/adaptor/openai/main.go @@ -105,6 +105,9 @@ func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model for _, choice := range streamResponse.Choices { responseText += choice.Text } + if streamResponse.Usage != nil { + usage = streamResponse.Usage + } render.StringData(c, data) } } diff --git a/service/aiproxy/relay/adaptor/openai/model.go b/service/aiproxy/relay/adaptor/openai/model.go index b83898c2daa..ad0d579bf56 100644 --- a/service/aiproxy/relay/adaptor/openai/model.go +++ b/service/aiproxy/relay/adaptor/openai/model.go @@ -138,6 +138,7 @@ type CompletionsStreamResponse struct { Text string `json:"text"` FinishReason string `json:"finish_reason"` } `json:"choices"` + Usage *model.Usage `json:"usage"` } type SubscriptionResponse struct { From 3a389bebda90210565e65b67cb39b9d6f654c53d Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 19 Feb 2025 11:18:13 +0800 Subject: [PATCH 163/167] feat: internal group and search optimize --- service/aiproxy/common/balance/balance.go | 9 +++- service/aiproxy/middleware/auth.go | 15 +++--- service/aiproxy/middleware/distributor.go | 26 ++++++---- service/aiproxy/model/channel.go | 14 ++++-- service/aiproxy/model/group.go | 15 +----- service/aiproxy/model/log.go | 60 +++++++++-------------- service/aiproxy/model/token.go | 30 +++++------- 7 files changed, 78 insertions(+), 91 deletions(-) diff --git a/service/aiproxy/common/balance/balance.go b/service/aiproxy/common/balance/balance.go index faf7172638e..934448d706b 100644 --- a/service/aiproxy/common/balance/balance.go +++ b/service/aiproxy/common/balance/balance.go @@ -14,4 +14,11 @@ type PostGroupConsumer interface { PostGroupConsume(ctx context.Context, tokenName string, usage float64) (float64, error) } -var Default GroupBalance = NewMockGroupBalance() +var ( + mock GroupBalance = NewMockGroupBalance() + Default = mock +) + +func MockGetGroupRemainBalance(ctx context.Context, group model.GroupCache) (float64, PostGroupConsumer, error) { + return mock.GetGroupRemainBalance(ctx, group) +} diff --git a/service/aiproxy/middleware/auth.go b/service/aiproxy/middleware/auth.go index 53c19082d7c..2390d11e881 100644 --- a/service/aiproxy/middleware/auth.go +++ b/service/aiproxy/middleware/auth.go @@ -63,7 +63,7 @@ func TokenAuth(c *gin.Context) { var token *model.TokenCache var useInternalToken bool - if config.GetInternalToken() != "" && config.GetInternalToken() == key { + if config.GetInternalToken() != "" && config.GetInternalToken() == key || config.AdminKey != "" && config.AdminKey == key { token = &model.TokenCache{} useInternalToken = true } else { @@ -75,7 +75,7 @@ func TokenAuth(c *gin.Context) { } } - SetLogTokenFields(log.Data, token) + SetLogTokenFields(log.Data, token, useInternalToken) if token.Subnet != "" { if ok, err := network.IsIPInSubnets(c.ClientIP(), token.Subnet); err != nil { @@ -97,7 +97,7 @@ func TokenAuth(c *gin.Context) { var group *model.GroupCache if useInternalToken { group = &model.GroupCache{ - Status: model.GroupStatusEnabled, + Status: model.GroupStatusInternal, } } else { var err error @@ -106,7 +106,7 @@ func TokenAuth(c *gin.Context) { abortLogWithMessage(c, http.StatusInternalServerError, fmt.Sprintf("failed to get group: %v", err)) return } - if group.Status != model.GroupStatusEnabled { + if group.Status != model.GroupStatusEnabled && group.Status != model.GroupStatusInternal { abortLogWithMessage(c, http.StatusForbidden, "group is disabled") return } @@ -172,7 +172,7 @@ func SetLogFieldsFromMeta(m *meta.Meta, fields logrus.Fields) { } SetLogGroupFields(fields, m.Group) - SetLogTokenFields(fields, m.Token) + SetLogTokenFields(fields, m.Token, false) SetLogChannelFields(fields, m.Channel) } @@ -213,7 +213,7 @@ func SetLogGroupFields(fields logrus.Fields, group *model.GroupCache) { } } -func SetLogTokenFields(fields logrus.Fields, token *model.TokenCache) { +func SetLogTokenFields(fields logrus.Fields, token *model.TokenCache, internal bool) { if token == nil { return } @@ -226,6 +226,9 @@ func SetLogTokenFields(fields logrus.Fields, token *model.TokenCache) { if token.Key != "" { fields["key"] = maskTokenKey(token.Key) } + if internal { + fields["internal"] = "true" + } } func maskTokenKey(key string) string { diff --git a/service/aiproxy/middleware/distributor.go b/service/aiproxy/middleware/distributor.go index b5ebcd5f6ed..86d0e71ed6d 100644 --- a/service/aiproxy/middleware/distributor.go +++ b/service/aiproxy/middleware/distributor.go @@ -124,18 +124,26 @@ type GroupBalanceConsumer struct { } func checkGroupBalance(c *gin.Context, group *model.GroupCache) bool { - log := GetLogger(c) - groupBalance, consumer, err := balance.Default.GetGroupRemainBalance(c.Request.Context(), *group) - if err != nil { - if errors.Is(err, balance.ErrNoRealNameUsedAmountLimit) { - abortLogWithMessage(c, http.StatusForbidden, balance.ErrNoRealNameUsedAmountLimit.Error()) + var groupBalance float64 + var consumer balance.PostGroupConsumer + + if group.Status == model.GroupStatusInternal { + groupBalance, consumer, _ = balance.MockGetGroupRemainBalance(c.Request.Context(), *group) + } else { + log := GetLogger(c) + var err error + groupBalance, consumer, err = balance.Default.GetGroupRemainBalance(c.Request.Context(), *group) + if err != nil { + if errors.Is(err, balance.ErrNoRealNameUsedAmountLimit) { + abortLogWithMessage(c, http.StatusForbidden, balance.ErrNoRealNameUsedAmountLimit.Error()) + return false + } + log.Errorf("get group (%s) balance error: %v", group.ID, err) + abortWithMessage(c, http.StatusInternalServerError, fmt.Sprintf("get group (%s) balance error", group.ID)) return false } - log.Errorf("get group (%s) balance error: %v", group.ID, err) - abortWithMessage(c, http.StatusInternalServerError, fmt.Sprintf("get group (%s) balance error", group.ID)) - return false + log.Data["balance"] = strconv.FormatFloat(groupBalance, 'f', -1, 64) } - log.Data["balance"] = strconv.FormatFloat(groupBalance, 'f', -1, 64) if groupBalance <= 0 { abortLogWithMessage(c, http.StatusForbidden, fmt.Sprintf("group (%s) balance not enough", group.ID)) diff --git a/service/aiproxy/model/channel.go b/service/aiproxy/model/channel.go index 4ff72811c39..938f25a7f10 100644 --- a/service/aiproxy/model/channel.go +++ b/service/aiproxy/model/channel.go @@ -209,6 +209,10 @@ func SearchChannels(keyword string, startIdx int, num int, id int, name string, conditions = append(conditions, "id = ?") values = append(values, String2Int(keyword)) } + if channelType == 0 { + conditions = append(conditions, "type = ?") + values = append(values, String2Int(keyword)) + } if name == "" { if common.UsingPostgreSQL { conditions = append(conditions, "name ILIKE ?") @@ -225,10 +229,6 @@ func SearchChannels(keyword string, startIdx int, num int, id int, name string, } values = append(values, "%"+keyword+"%") } - if channelType == 0 { - conditions = append(conditions, "type = ?") - values = append(values, String2Int(keyword)) - } if baseURL == "" { if common.UsingPostgreSQL { conditions = append(conditions, "base_url ILIKE ?") @@ -238,7 +238,11 @@ func SearchChannels(keyword string, startIdx int, num int, id int, name string, values = append(values, "%"+keyword+"%") } - conditions = append(conditions, "models LIKE ?") + if common.UsingPostgreSQL { + conditions = append(conditions, "models ILIKE ?") + } else { + conditions = append(conditions, "models LIKE ?") + } values = append(values, "%"+keyword+"%") if len(conditions) > 0 { diff --git a/service/aiproxy/model/group.go b/service/aiproxy/model/group.go index 9f1e63ec0d6..63f93369cf2 100644 --- a/service/aiproxy/model/group.go +++ b/service/aiproxy/model/group.go @@ -2,7 +2,6 @@ package model import ( "errors" - "fmt" "strings" "time" @@ -20,6 +19,7 @@ const ( const ( GroupStatusEnabled = 1 // don't use 0, 0 is the default value! GroupStatusDisabled = 2 // also don't use 0 + GroupStatusInternal = 3 ) type Group struct { @@ -233,19 +233,6 @@ func SearchGroup(keyword string, startIdx int, num int, order string, status int } else { tx = tx.Where("id LIKE ?", "%"+keyword+"%") } - if keyword != "" { - var conditions []string - var values []interface{} - - if status == 0 { - conditions = append(conditions, "status = ?") - values = append(values, 1) - } - - if len(conditions) > 0 { - tx = tx.Where(fmt.Sprintf("(%s)", strings.Join(conditions, " OR ")), values...) - } - } err = tx.Count(&total).Error if err != nil { return nil, 0, err diff --git a/service/aiproxy/model/log.go b/service/aiproxy/model/log.go index 0e359972508..18cdcba0a60 100644 --- a/service/aiproxy/model/log.go +++ b/service/aiproxy/model/log.go @@ -540,12 +540,8 @@ func buildSearchLogsQuery( var values []interface{} if group == "" { - if common.UsingPostgreSQL { - conditions = append(conditions, "content ILIKE ?") - } else { - conditions = append(conditions, "content LIKE ?") - } - values = append(values, "%"+keyword+"%") + conditions = append(conditions, "group_id = ?") + values = append(values, keyword) } if num := String2Int(keyword); num != 0 { @@ -558,38 +554,33 @@ func buildSearchLogsQuery( values = append(values, num) } } - if endpoint == "" { - if common.UsingPostgreSQL { - conditions = append(conditions, "endpoint ILIKE ?") - } else { - conditions = append(conditions, "endpoint LIKE ?") - } - values = append(values, "%"+keyword+"%") - } if requestID == "" { - if common.UsingPostgreSQL { - conditions = append(conditions, "request_id ILIKE ?") - } else { - conditions = append(conditions, "request_id LIKE ?") - } - values = append(values, "%"+keyword+"%") + conditions = append(conditions, "request_id = ?") + values = append(values, keyword) } if tokenName == "" { - if common.UsingPostgreSQL { - conditions = append(conditions, "token_name ILIKE ?") - } else { - conditions = append(conditions, "token_name LIKE ?") - } - values = append(values, "%"+keyword+"%") + conditions = append(conditions, "token_name = ?") + values = append(values, keyword) } if modelName == "" { - if common.UsingPostgreSQL { - conditions = append(conditions, "model ILIKE ?") - } else { - conditions = append(conditions, "model LIKE ?") - } - values = append(values, "%"+keyword+"%") + conditions = append(conditions, "model = ?") + values = append(values, keyword) + } + + if ip != "" { + conditions = append(conditions, "ip = ?") + values = append(values, ip) } + + // if endpoint == "" { + // if common.UsingPostgreSQL { + // conditions = append(conditions, "endpoint ILIKE ?") + // } else { + // conditions = append(conditions, "endpoint LIKE ?") + // } + // values = append(values, "%"+keyword+"%") + // } + if common.UsingPostgreSQL { conditions = append(conditions, "content ILIKE ?") } else { @@ -597,11 +588,6 @@ func buildSearchLogsQuery( } values = append(values, "%"+keyword+"%") - if ip != "" { - conditions = append(conditions, "ip = ?") - values = append(values, ip) - } - if len(conditions) > 0 { tx = tx.Where(fmt.Sprintf("(%s)", strings.Join(conditions, " OR ")), values...) } diff --git a/service/aiproxy/model/token.go b/service/aiproxy/model/token.go index 31a6b950ff7..8ed234002e1 100644 --- a/service/aiproxy/model/token.go +++ b/service/aiproxy/model/token.go @@ -130,35 +130,27 @@ func SearchTokens(group string, keyword string, startIdx int, num int, order str var values []interface{} if group == "" { - if common.UsingPostgreSQL { - conditions = append(conditions, "group_id ILIKE ?") - } else { - conditions = append(conditions, "group_id LIKE ?") - } - values = append(values, "%"+keyword+"%") + conditions = append(conditions, "group_id = ?") + values = append(values, keyword) } if status == 0 { conditions = append(conditions, "status = ?") values = append(values, String2Int(keyword)) } if name == "" { - if common.UsingPostgreSQL { - conditions = append(conditions, "name ILIKE ?") - } else { - conditions = append(conditions, "name LIKE ?") - } - values = append(values, "%"+keyword+"%") + conditions = append(conditions, "name = ?") + values = append(values, keyword) } if key == "" { - if common.UsingPostgreSQL { - conditions = append(conditions, "key ILIKE ?") - } else { - conditions = append(conditions, "key LIKE ?") - } - values = append(values, keyword+"%") + conditions = append(conditions, "key = ?") + values = append(values, keyword) } - conditions = append(conditions, "models LIKE ?") + if common.UsingPostgreSQL { + conditions = append(conditions, "models ILIKE ?") + } else { + conditions = append(conditions, "models LIKE ?") + } values = append(values, "%"+keyword+"%") if len(conditions) > 0 { From cc89fa72db98b87fd0ebb7add05085ad8f252e49 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 19 Feb 2025 14:35:47 +0800 Subject: [PATCH 164/167] feat: conv gemini tool choice --- service/aiproxy/relay/adaptor/gemini/main.go | 104 ++++++++++-------- service/aiproxy/relay/adaptor/gemini/model.go | 19 +++- 2 files changed, 73 insertions(+), 50 deletions(-) diff --git a/service/aiproxy/relay/adaptor/gemini/main.go b/service/aiproxy/relay/adaptor/gemini/main.go index eaadafca63c..eb3da76b465 100644 --- a/service/aiproxy/relay/adaptor/gemini/main.go +++ b/service/aiproxy/relay/adaptor/gemini/main.go @@ -151,27 +151,72 @@ func buildContents(ctx context.Context, textRequest *model.GeneralOpenAIRequest) Parts: make([]Part, 0), } - // Process message content - openaiContent := message.ParseContent() - for _, part := range openaiContent { - if part.Type == model.ContentTypeImageURL { - imageNum++ - if imageNum > VisionMaxImageNum { - continue + if message.Role == "assistant" && len(message.ToolCalls) > 0 { + for _, toolCall := range message.ToolCalls { + var args map[string]any + if toolCall.Function.Arguments != "" { + if err := json.Unmarshal([]byte(toolCall.Function.Arguments), &args); err != nil { + args = make(map[string]any) + } + } else { + args = make(map[string]any) } + content.Parts = append(content.Parts, Part{ + FunctionCall: &FunctionCall{ + Name: toolCall.Function.Name, + Args: args, + }, + }) + } + } else if message.Role == "tool" && message.ToolCallID != "" { + var contentMap map[string]any + if message.Content != nil { + switch content := message.Content.(type) { + case map[string]any: + contentMap = content + case string: + if err := json.Unmarshal([]byte(content), &contentMap); err != nil { + log.Error("unmarshal content failed: " + err.Error()) + } + } + } else { + contentMap = make(map[string]any) } + content.Parts = append(content.Parts, Part{ + FunctionResponse: &FunctionResponse{ + Name: *message.Name, + Response: struct { + Name string `json:"name"` + Content map[string]any `json:"content"` + }{ + Name: *message.Name, + Content: contentMap, + }, + }, + }) + } else { + openaiContent := message.ParseContent() + for _, part := range openaiContent { + if part.Type == model.ContentTypeImageURL { + imageNum++ + if imageNum > VisionMaxImageNum { + continue + } + } - parts, err := buildMessageParts(ctx, part) - if err != nil { - return nil, nil, err + parts, err := buildMessageParts(ctx, part) + if err != nil { + return nil, nil, err + } + content.Parts = append(content.Parts, parts...) } - content.Parts = append(content.Parts, parts...) } - // Convert role names switch content.Role { case "assistant": content.Role = "model" + case "tool": + content.Role = "user" case "system": systemContent = &content continue @@ -215,37 +260,6 @@ func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io return http.MethodPost, nil, bytes.NewReader(data), nil } -// func CountTokens(ctx context.Context, meta *meta.Meta, chat []*ChatContent) (int, error) { -// countReq := ChatRequest{ -// Contents: chat, -// } -// countData, err := json.Marshal(countReq) -// if err != nil { -// return 0, err -// } -// req, err := http.NewRequestWithContext(ctx, http.MethodPost, getRequestURL(meta, "countTokens"), bytes.NewReader(countData)) -// if err != nil { -// return 0, err -// } -// req.Header.Set("Content-Type", "application/json") -// req.Header.Set("X-Goog-Api-Key", meta.Channel.Key) - -// resp, err := http.DefaultClient.Do(req) -// if err != nil { -// return 0, err -// } -// defer resp.Body.Close() - -// var tokenCount CountTokensResponse -// if err := json.NewDecoder(resp.Body).Decode(&tokenCount); err != nil { -// return 0, err -// } -// if tokenCount.Error != nil { -// return 0, fmt.Errorf("count tokens error: %s, code: %d, status: %s", tokenCount.Error.Message, tokenCount.Error.Code, resp.Status) -// } -// return tokenCount.TotalTokens, nil -// } - type ChatResponse struct { Candidates []*ChatCandidate `json:"candidates"` PromptFeedback ChatPromptFeedback `json:"promptFeedback"` @@ -298,7 +312,7 @@ func getToolCalls(candidate *ChatCandidate) []*model.Tool { if item.FunctionCall == nil { return toolCalls } - argsBytes, err := json.Marshal(item.FunctionCall.Arguments) + argsBytes, err := json.Marshal(item.FunctionCall.Args) if err != nil { log.Error("getToolCalls failed: " + err.Error()) return toolCalls @@ -308,7 +322,7 @@ func getToolCalls(candidate *ChatCandidate) []*model.Tool { Type: "function", Function: model.Function{ Arguments: conv.BytesToString(argsBytes), - Name: item.FunctionCall.FunctionName, + Name: item.FunctionCall.Name, }, } toolCalls = append(toolCalls, &toolCall) diff --git a/service/aiproxy/relay/adaptor/gemini/model.go b/service/aiproxy/relay/adaptor/gemini/model.go index 9dec4502b75..8ed1a99734b 100644 --- a/service/aiproxy/relay/adaptor/gemini/model.go +++ b/service/aiproxy/relay/adaptor/gemini/model.go @@ -42,14 +42,23 @@ type InlineData struct { } type FunctionCall struct { - Arguments any `json:"args"` - FunctionName string `json:"name"` + Args map[string]any `json:"args"` + Name string `json:"name"` +} + +type FunctionResponse struct { + Name string `json:"name"` + Response struct { + Name string `json:"name"` + Content map[string]any `json:"content"` + } `json:"response"` } type Part struct { - InlineData *InlineData `json:"inlineData,omitempty"` - FunctionCall *FunctionCall `json:"functionCall,omitempty"` - Text string `json:"text,omitempty"` + InlineData *InlineData `json:"inlineData,omitempty"` + FunctionCall *FunctionCall `json:"functionCall,omitempty"` + FunctionResponse *FunctionResponse `json:"functionResponse,omitempty"` + Text string `json:"text,omitempty"` } type ChatContent struct { From 6e83f27cffd929bf5eb32e83cc79a6920cbace18 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Wed, 19 Feb 2025 15:29:41 +0800 Subject: [PATCH 165/167] fix: gemini empty tool parameters --- service/aiproxy/relay/adaptor/gemini/main.go | 54 +++++++++++++++++--- 1 file changed, 47 insertions(+), 7 deletions(-) diff --git a/service/aiproxy/relay/adaptor/gemini/main.go b/service/aiproxy/relay/adaptor/gemini/main.go index eb3da76b465..4a9707b7a99 100644 --- a/service/aiproxy/relay/adaptor/gemini/main.go +++ b/service/aiproxy/relay/adaptor/gemini/main.go @@ -83,6 +83,13 @@ func buildTools(textRequest *model.GeneralOpenAIRequest) []ChatTools { if textRequest.Tools != nil { functions := make([]model.Function, 0, len(textRequest.Tools)) for _, tool := range textRequest.Tools { + if parameters, ok := tool.Function.Parameters.(map[string]any); ok { + if properties, ok := parameters["properties"].(map[string]any); ok { + if len(properties) == 0 { + tool.Function.Parameters = nil + } + } + } functions = append(functions, tool.Function) } return []ChatTools{{FunctionDeclarations: functions}} @@ -305,10 +312,13 @@ type ChatPromptFeedback struct { SafetyRatings []ChatSafetyRating `json:"safetyRatings"` } -func getToolCalls(candidate *ChatCandidate) []*model.Tool { - var toolCalls []*model.Tool +func getToolCalls(candidate *ChatCandidate, toolCallIndex int) []*model.Tool { + if len(candidate.Content.Parts) <= toolCallIndex { + return nil + } - item := candidate.Content.Parts[0] + var toolCalls []*model.Tool + item := candidate.Content.Parts[toolCallIndex] if item.FunctionCall == nil { return toolCalls } @@ -346,8 +356,23 @@ func responseGeminiChat2OpenAI(meta *meta.Meta, response *ChatResponse) *openai. FinishReason: constant.StopFinishReason, } if len(candidate.Content.Parts) > 0 { - if candidate.Content.Parts[0].FunctionCall != nil { - choice.Message.ToolCalls = getToolCalls(candidate) + toolCallIndex := -1 + for i, part := range candidate.Content.Parts { + if part.FunctionCall != nil { + toolCallIndex = i + break + } + } + if toolCallIndex != -1 { + choice.Message.ToolCalls = getToolCalls(candidate, toolCallIndex) + content := strings.Builder{} + for i, part := range candidate.Content.Parts { + if i == toolCallIndex { + continue + } + content.WriteString(part.Text) + } + choice.Message.Content = content.String() } else { builder := strings.Builder{} for i, part := range candidate.Content.Parts { @@ -387,8 +412,23 @@ func streamResponseGeminiChat2OpenAI(meta *meta.Meta, geminiResponse *ChatRespon Index: i, } if len(candidate.Content.Parts) > 0 { - if candidate.Content.Parts[0].FunctionCall != nil { - choice.Delta.ToolCalls = getToolCalls(candidate) + toolCallIndex := -1 + for i, part := range candidate.Content.Parts { + if part.FunctionCall != nil { + toolCallIndex = i + break + } + } + if toolCallIndex != -1 { + choice.Delta.ToolCalls = getToolCalls(candidate, toolCallIndex) + content := strings.Builder{} + for i, part := range candidate.Content.Parts { + if i == toolCallIndex { + continue + } + content.WriteString(part.Text) + } + choice.Delta.Content = content.String() } else { builder := strings.Builder{} for i, part := range candidate.Content.Parts { From 4266480219a8cd0e6f804fed474eedf83d60a43e Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 20 Feb 2025 09:35:16 +0800 Subject: [PATCH 166/167] chore: env readme --- service/aiproxy/README.md | 10 ++++++++++ service/aiproxy/deploy/Kubefile | 3 +++ .../aiproxy/deploy/manifests/aiproxy-config.yaml.tmpl | 2 ++ 3 files changed, 15 insertions(+) diff --git a/service/aiproxy/README.md b/service/aiproxy/README.md index 442a287699c..50c0e872935 100644 --- a/service/aiproxy/README.md +++ b/service/aiproxy/README.md @@ -14,3 +14,13 @@ sealos run ghcr.io/labring/sealos-cloud-aiproxy-service:latest \ -e cloudDomain= \ -e LOG_SQL_DSN="" ``` + +# Envs + +- `ADMIN_KEY`: The admin key for the AI Proxy Service, admin key is used to admin api and relay api, default is empty +- `SEALOS_JWT_KEY`: Used to sealos balance service, default is empty +- `SQL_DSN`: The database connection string, default is empty +- `LOG_SQL_DSN`: The log database connection string, default is empty +- `REDIS_CONN_STRING`: The redis connection string, default is empty +- `BALANCE_SEALOS_CHECK_REAL_NAME_ENABLE`: Whether to check real name, default is `false` +- `BALANCE_SEALOS_NO_REAL_NAME_USED_AMOUNT_LIMIT`: The amount of used balance when the user has no real name, default is `1` diff --git a/service/aiproxy/deploy/Kubefile b/service/aiproxy/deploy/Kubefile index 9fc2c9a21f6..0ec02c20cb6 100644 --- a/service/aiproxy/deploy/Kubefile +++ b/service/aiproxy/deploy/Kubefile @@ -13,4 +13,7 @@ ENV SQL_DSN="" ENV LOG_SQL_DSN="" ENV REDIS_CONN_STRING="" +ENV BALANCE_SEALOS_CHECK_REAL_NAME_ENABLE="false" +ENV BALANCE_SEALOS_NO_REAL_NAME_USED_AMOUNT_LIMIT="1" + CMD ["bash scripts/init.sh"] diff --git a/service/aiproxy/deploy/manifests/aiproxy-config.yaml.tmpl b/service/aiproxy/deploy/manifests/aiproxy-config.yaml.tmpl index 94214c61537..d345fb62b82 100644 --- a/service/aiproxy/deploy/manifests/aiproxy-config.yaml.tmpl +++ b/service/aiproxy/deploy/manifests/aiproxy-config.yaml.tmpl @@ -10,3 +10,5 @@ data: SQL_DSN: "{{ .SQL_DSN }}" LOG_SQL_DSN: "{{ .LOG_SQL_DSN }}" REDIS_CONN_STRING: "{{ .REDIS_CONN_STRING }}" + BALANCE_SEALOS_CHECK_REAL_NAME_ENABLE: "{{ .BALANCE_SEALOS_CHECK_REAL_NAME_ENABLE }}" + BALANCE_SEALOS_NO_REAL_NAME_USED_AMOUNT_LIMIT: "{{ .BALANCE_SEALOS_NO_REAL_NAME_USED_AMOUNT_LIMIT }}" From 0b1544f0c5c665751ab49785c2602077cddc8b82 Mon Sep 17 00:00:00 2001 From: zijiren233 Date: Thu, 20 Feb 2025 10:49:22 +0800 Subject: [PATCH 167/167] fix: ci lint --- service/aiproxy/relay/controller/handle.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/service/aiproxy/relay/controller/handle.go b/service/aiproxy/relay/controller/handle.go index aae7f7ad17b..785b4085c9d 100644 --- a/service/aiproxy/relay/controller/handle.go +++ b/service/aiproxy/relay/controller/handle.go @@ -87,12 +87,14 @@ func Handle(meta *meta.Meta, c *gin.Context, preProcess func() (*PreCheckGroupBa // 5. Do request usage, detail, respErr := DoHelper(adaptor, c, meta) if respErr != nil { + var logDetail *model.RequestDetail if detail != nil && config.DebugEnabled { + logDetail = detail log.Errorf( "handle failed: %+v\nrequest detail:\n%s\nresponse detail:\n%s", respErr.Error, - detail.RequestBody, - detail.ResponseBody, + logDetail.RequestBody, + logDetail.ResponseBody, ) } else { log.Errorf("handle failed: %+v", respErr.Error)