package wire import ( "encoding/json" "testing" "git.flytoex.net/yuanwei/flyto-agent/pkg/flyto" ) // openaiReqProbe 是 OpenAICompatClient.buildRequest 输出的 JSON 解析探针. // 仅声明本测试关心的 sampling 字段; omitempty 缺失时反序列化为 nil. // // openaiReqProbe parses the JSON emitted by OpenAICompatClient.buildRequest // for inspection. Only the sampling fields under test are declared; missing // omitempty fields unmarshal to nil. type openaiReqProbe struct { Temperature *float64 `json:"temperature"` TopP *float64 `json:"top_p"` } func parseOpenAIReq(t *testing.T, data []byte) openaiReqProbe { t.Helper() var p openaiReqProbe if err := json.Unmarshal(data, &p); err != nil { t.Fatalf("unmarshal openai request: %v", err) } return p } // TestOpenAIBuildRequest_TemperatureTopP_Forwarded 锁 OpenAI 兼容路径 // (4 provider: openai/ollama/lmstudio/openrouter) 把 Request.Temperature/ // TopP 透传到 wire JSON 的 top-level temperature / top_p 字段. // // Locks the OpenAI-compat path forwarding Request.Temperature/TopP to the // top-level wire JSON fields. func TestOpenAIBuildRequest_TemperatureTopP_Forwarded(t *testing.T) { c := NewOpenAICompatClient("fake-key", "https://fake.api") req := &StreamRequest{ Model: "gpt-4o", MaxTokens: 100, Temperature: flyto.Float(0.7), TopP: flyto.Float(0.9), } got, err := c.buildRequest(req) if err != nil { t.Fatalf("buildRequest: %v", err) } p := parseOpenAIReq(t, got) if p.Temperature == nil || *p.Temperature != 0.7 { t.Errorf("temperature = %v, want 0.7", p.Temperature) } if p.TopP == nil || *p.TopP != 0.9 { t.Errorf("top_p = %v, want 0.9", p.TopP) } } // TestOpenAIBuildRequest_NilSampling_OmittedOnWire 锁 nil 时 wire 不传字段 // (passthrough policy 的默认行为: 上游用 provider 默认值). // // Locks the nil-omits-on-wire passthrough policy: upstream falls back to // its provider/model default when the caller doesn't set a sampling knob. func TestOpenAIBuildRequest_NilSampling_OmittedOnWire(t *testing.T) { c := NewOpenAICompatClient("fake-key", "https://fake.api") req := &StreamRequest{Model: "gpt-4o", MaxTokens: 100} got, err := c.buildRequest(req) if err != nil { t.Fatalf("buildRequest: %v", err) } // 解析 raw map 直接断言键缺失 (omitempty 验证). var raw map[string]json.RawMessage if err := json.Unmarshal(got, &raw); err != nil { t.Fatalf("unmarshal raw: %v", err) } if _, ok := raw["temperature"]; ok { t.Errorf("temperature key present on wire when Request.Temperature == nil") } if _, ok := raw["top_p"]; ok { t.Errorf("top_p key present on wire when Request.TopP == nil") } } // TestOpenAIBuildRequest_ZeroTemperature_TransmittedExplicitly 锁 // "0 是合法的 deterministic 值" 语义: 调用方主动传 *0 时 wire 上必须有 // "temperature":0, 不能被 omitempty 吞掉. Go 的 *float64 + omitempty 在 // json 编码时只有 nil 才省略, 这条 test 防止未来误改成 float64+sentinel. // // Locks the "0 is a valid deterministic value" semantic. When the caller // explicitly sets *0, the wire must carry "temperature":0; omitempty only // triggers on nil. Guards against a future regression to float64+sentinel. func TestOpenAIBuildRequest_ZeroTemperature_TransmittedExplicitly(t *testing.T) { c := NewOpenAICompatClient("fake-key", "https://fake.api") req := &StreamRequest{ Model: "gpt-4o", MaxTokens: 100, Temperature: flyto.Float(0), } got, err := c.buildRequest(req) if err != nil { t.Fatalf("buildRequest: %v", err) } p := parseOpenAIReq(t, got) if p.Temperature == nil { t.Fatal("temperature missing on wire when *Temperature = 0 (omitempty must not eat explicit zero)") } if *p.Temperature != 0 { t.Errorf("temperature = %v, want 0", *p.Temperature) } } // geminiSamplingProbe 解析 Gemini buildRequest 输出的 generationConfig // 只关注 sampling 字段. // // geminiSamplingProbe parses the generationConfig from Gemini's // buildRequest output, restricted to sampling fields under test. type geminiSamplingProbe struct { GenerationConfig struct { Temperature *float64 `json:"temperature"` TopP *float64 `json:"topP"` } `json:"generationConfig"` } // TestGeminiBuildRequest_TemperatureTopP_Forwarded 锁 Gemini 路径把 // Request.Temperature/TopP 嵌入 generationConfig.temperature/topP // (而非 top-level, Gemini 特有 wire format). // // Locks the Gemini path nesting Request.Temperature/TopP under // generationConfig.temperature/topP, matching Gemini's wire format // (not top-level like OpenAI-compat). func TestGeminiBuildRequest_TemperatureTopP_Forwarded(t *testing.T) { c := NewGeminiClient("fake-key", "https://fake.api") req := &StreamRequest{ Model: "gemini-2.0-flash", MaxTokens: 1024, Temperature: flyto.Float(0.5), TopP: flyto.Float(0.85), } got, err := c.buildRequest(req) if err != nil { t.Fatalf("buildRequest: %v", err) } var p geminiSamplingProbe if err := json.Unmarshal(got, &p); err != nil { t.Fatalf("unmarshal gemini request: %v", err) } if p.GenerationConfig.Temperature == nil || *p.GenerationConfig.Temperature != 0.5 { t.Errorf("generationConfig.temperature = %v, want 0.5", p.GenerationConfig.Temperature) } if p.GenerationConfig.TopP == nil || *p.GenerationConfig.TopP != 0.85 { t.Errorf("generationConfig.topP = %v, want 0.85", p.GenerationConfig.TopP) } } // TestGeminiBuildRequest_NilSampling_OmittedOnWire 锁 Gemini 路径 nil // 时 generationConfig 内不出现 temperature/topP 键. // // Locks the Gemini path omitting temperature/topP keys when callers // don't set them. func TestGeminiBuildRequest_NilSampling_OmittedOnWire(t *testing.T) { c := NewGeminiClient("fake-key", "https://fake.api") req := &StreamRequest{Model: "gemini-2.0-flash", MaxTokens: 1024} got, err := c.buildRequest(req) if err != nil { t.Fatalf("buildRequest: %v", err) } var raw struct { GenerationConfig map[string]json.RawMessage `json:"generationConfig"` } if err := json.Unmarshal(got, &raw); err != nil { t.Fatalf("unmarshal raw: %v", err) } if _, ok := raw.GenerationConfig["temperature"]; ok { t.Errorf("temperature present in generationConfig when Request.Temperature == nil") } if _, ok := raw.GenerationConfig["topP"]; ok { t.Errorf("topP present in generationConfig when Request.TopP == nil") } }