Skip to content

Commit badce93

Browse files
refactor: extract duplicated Responses API adapters into shared helpers
The three Responses-to-Chat conversion helpers were byte-for-byte identical across Gemini, Groq, and Ollama. Extract them into providers.ResponsesViaChat/StreamResponsesViaChat so each provider's Responses methods become one-line delegations, and consolidate the duplicated unit tests into a single test file. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
1 parent b26abbe commit badce93

8 files changed

Lines changed: 432 additions & 1053 deletions

File tree

internal/providers/gemini/gemini.go

Lines changed: 2 additions & 126 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,6 @@ import (
88
"strings"
99
"time"
1010

11-
"github.com/google/uuid"
12-
1311
"gomodel/internal/core"
1412
"gomodel/internal/llmclient"
1513
"gomodel/internal/providers"
@@ -198,134 +196,12 @@ func (p *Provider) ListModels(ctx context.Context) (*core.ModelsResponse, error)
198196
}, nil
199197
}
200198

201-
// convertResponsesRequestToChat converts a ResponsesRequest to ChatRequest for Gemini
202-
func convertResponsesRequestToChat(req *core.ResponsesRequest) *core.ChatRequest {
203-
chatReq := &core.ChatRequest{
204-
Model: req.Model,
205-
Messages: make([]core.Message, 0),
206-
Temperature: req.Temperature,
207-
Stream: req.Stream,
208-
}
209-
210-
if req.MaxOutputTokens != nil {
211-
chatReq.MaxTokens = req.MaxOutputTokens
212-
}
213-
214-
// Add system instruction if provided
215-
if req.Instructions != "" {
216-
chatReq.Messages = append(chatReq.Messages, core.Message{
217-
Role: "system",
218-
Content: req.Instructions,
219-
})
220-
}
221-
222-
// Convert input to messages
223-
switch input := req.Input.(type) {
224-
case string:
225-
chatReq.Messages = append(chatReq.Messages, core.Message{
226-
Role: "user",
227-
Content: input,
228-
})
229-
case []interface{}:
230-
for _, item := range input {
231-
if msgMap, ok := item.(map[string]interface{}); ok {
232-
role, _ := msgMap["role"].(string)
233-
content := extractContentFromInput(msgMap["content"])
234-
if role != "" && content != "" {
235-
chatReq.Messages = append(chatReq.Messages, core.Message{
236-
Role: role,
237-
Content: content,
238-
})
239-
}
240-
}
241-
}
242-
}
243-
244-
return chatReq
245-
}
246-
247-
// extractContentFromInput extracts text content from responses input
248-
func extractContentFromInput(content interface{}) string {
249-
switch c := content.(type) {
250-
case string:
251-
return c
252-
case []interface{}:
253-
// Array of content parts - extract text
254-
var texts []string
255-
for _, part := range c {
256-
if partMap, ok := part.(map[string]interface{}); ok {
257-
if text, ok := partMap["text"].(string); ok {
258-
texts = append(texts, text)
259-
}
260-
}
261-
}
262-
return strings.Join(texts, " ")
263-
}
264-
return ""
265-
}
266-
267-
// convertChatResponseToResponses converts a ChatResponse to ResponsesResponse
268-
func convertChatResponseToResponses(resp *core.ChatResponse) *core.ResponsesResponse {
269-
content := ""
270-
if len(resp.Choices) > 0 {
271-
content = resp.Choices[0].Message.Content
272-
}
273-
274-
return &core.ResponsesResponse{
275-
ID: resp.ID,
276-
Object: "response",
277-
CreatedAt: resp.Created,
278-
Model: resp.Model,
279-
Provider: resp.Provider,
280-
Status: "completed",
281-
Output: []core.ResponsesOutputItem{
282-
{
283-
ID: "msg_" + uuid.New().String(),
284-
Type: "message",
285-
Role: "assistant",
286-
Status: "completed",
287-
Content: []core.ResponsesContentItem{
288-
{
289-
Type: "output_text",
290-
Text: content,
291-
Annotations: []string{},
292-
},
293-
},
294-
},
295-
},
296-
Usage: &core.ResponsesUsage{
297-
InputTokens: resp.Usage.PromptTokens,
298-
OutputTokens: resp.Usage.CompletionTokens,
299-
TotalTokens: resp.Usage.TotalTokens,
300-
},
301-
}
302-
}
303-
304199
// Responses sends a Responses API request to Gemini (converted to chat format)
305200
func (p *Provider) Responses(ctx context.Context, req *core.ResponsesRequest) (*core.ResponsesResponse, error) {
306-
// Convert ResponsesRequest to ChatRequest
307-
chatReq := convertResponsesRequestToChat(req)
308-
309-
// Use the existing ChatCompletion method
310-
chatResp, err := p.ChatCompletion(ctx, chatReq)
311-
if err != nil {
312-
return nil, err
313-
}
314-
315-
return convertChatResponseToResponses(chatResp), nil
201+
return providers.ResponsesViaChat(ctx, p, req)
316202
}
317203

318204
// StreamResponses returns a raw response body for streaming Responses API (caller must close)
319205
func (p *Provider) StreamResponses(ctx context.Context, req *core.ResponsesRequest) (io.ReadCloser, error) {
320-
// Convert ResponsesRequest to ChatRequest
321-
chatReq := convertResponsesRequestToChat(req)
322-
323-
// Get the streaming response from chat completions
324-
stream, err := p.StreamChatCompletion(ctx, chatReq)
325-
if err != nil {
326-
return nil, err
327-
}
328-
329-
// Wrap the stream to convert chat completion format to Responses API format
330-
return providers.NewOpenAIResponsesStreamConverter(stream, req.Model, "gemini"), nil
206+
return providers.StreamResponsesViaChat(ctx, p, req, "gemini")
331207
}

internal/providers/gemini/gemini_test.go

Lines changed: 0 additions & 127 deletions
Original file line numberDiff line numberDiff line change
@@ -479,130 +479,3 @@ data: [DONE]
479479
t.Error("response should end with [DONE]")
480480
}
481481
}
482-
483-
func TestConvertResponsesRequestToChat(t *testing.T) {
484-
temp := 0.7
485-
maxTokens := 1024
486-
487-
tests := []struct {
488-
name string
489-
input *core.ResponsesRequest
490-
checkFn func(*testing.T, *core.ChatRequest)
491-
}{
492-
{
493-
name: "string input",
494-
input: &core.ResponsesRequest{
495-
Model: "gemini-2.0-flash",
496-
Input: "Hello",
497-
},
498-
checkFn: func(t *testing.T, req *core.ChatRequest) {
499-
if req.Model != "gemini-2.0-flash" {
500-
t.Errorf("Model = %q, want %q", req.Model, "gemini-2.0-flash")
501-
}
502-
if len(req.Messages) != 1 {
503-
t.Errorf("len(Messages) = %d, want 1", len(req.Messages))
504-
}
505-
if req.Messages[0].Role != "user" {
506-
t.Errorf("Messages[0].Role = %q, want %q", req.Messages[0].Role, "user")
507-
}
508-
},
509-
},
510-
{
511-
name: "with instructions",
512-
input: &core.ResponsesRequest{
513-
Model: "gemini-2.0-flash",
514-
Input: "Hello",
515-
Instructions: "Be helpful",
516-
},
517-
checkFn: func(t *testing.T, req *core.ChatRequest) {
518-
if len(req.Messages) < 2 {
519-
t.Fatalf("len(Messages) = %d, want at least 2", len(req.Messages))
520-
}
521-
if req.Messages[0].Role != "system" {
522-
t.Errorf("Messages[0].Role = %q, want %q", req.Messages[0].Role, "system")
523-
}
524-
if req.Messages[0].Content != "Be helpful" {
525-
t.Errorf("Messages[0].Content = %q, want %q", req.Messages[0].Content, "Be helpful")
526-
}
527-
},
528-
},
529-
{
530-
name: "with parameters",
531-
input: &core.ResponsesRequest{
532-
Model: "gemini-2.0-flash",
533-
Input: "Hello",
534-
Temperature: &temp,
535-
MaxOutputTokens: &maxTokens,
536-
},
537-
checkFn: func(t *testing.T, req *core.ChatRequest) {
538-
if req.Temperature == nil || *req.Temperature != 0.7 {
539-
t.Errorf("Temperature = %v, want 0.7", req.Temperature)
540-
}
541-
if req.MaxTokens == nil || *req.MaxTokens != 1024 {
542-
t.Errorf("MaxTokens = %v, want 1024", req.MaxTokens)
543-
}
544-
},
545-
},
546-
}
547-
548-
for _, tt := range tests {
549-
t.Run(tt.name, func(t *testing.T) {
550-
result := convertResponsesRequestToChat(tt.input)
551-
tt.checkFn(t, result)
552-
})
553-
}
554-
}
555-
556-
func TestConvertChatResponseToResponses(t *testing.T) {
557-
resp := &core.ChatResponse{
558-
ID: "gemini-123",
559-
Object: "chat.completion",
560-
Model: "gemini-2.0-flash",
561-
Created: 1677652288,
562-
Choices: []core.Choice{
563-
{
564-
Index: 0,
565-
Message: core.Message{
566-
Role: "assistant",
567-
Content: "Hello! How can I help you today?",
568-
},
569-
FinishReason: "stop",
570-
},
571-
},
572-
Usage: core.Usage{
573-
PromptTokens: 10,
574-
CompletionTokens: 20,
575-
TotalTokens: 30,
576-
},
577-
}
578-
579-
result := convertChatResponseToResponses(resp)
580-
581-
if result.ID != "gemini-123" {
582-
t.Errorf("ID = %q, want %q", result.ID, "gemini-123")
583-
}
584-
if result.Object != "response" {
585-
t.Errorf("Object = %q, want %q", result.Object, "response")
586-
}
587-
if result.Model != "gemini-2.0-flash" {
588-
t.Errorf("Model = %q, want %q", result.Model, "gemini-2.0-flash")
589-
}
590-
if result.Status != "completed" {
591-
t.Errorf("Status = %q, want %q", result.Status, "completed")
592-
}
593-
if len(result.Output) != 1 {
594-
t.Fatalf("len(Output) = %d, want 1", len(result.Output))
595-
}
596-
if result.Usage == nil {
597-
t.Fatal("Usage should not be nil")
598-
}
599-
if result.Usage.InputTokens != 10 {
600-
t.Errorf("InputTokens = %d, want 10", result.Usage.InputTokens)
601-
}
602-
if result.Usage.OutputTokens != 20 {
603-
t.Errorf("OutputTokens = %d, want 20", result.Usage.OutputTokens)
604-
}
605-
if result.Usage.TotalTokens != 30 {
606-
t.Errorf("TotalTokens = %d, want 30", result.Usage.TotalTokens)
607-
}
608-
}

0 commit comments

Comments
 (0)