137 lines
3.5 KiB
Go
137 lines
3.5 KiB
Go
package main
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"encoding/json"
|
|
"fmt"
|
|
"io"
|
|
"net/http"
|
|
"os"
|
|
"strconv"
|
|
"sync/atomic"
|
|
"time"
|
|
)
|
|
|
|
// FunctionCall.Arguments is a JSON-encoded string per the OpenAI spec, not a
|
|
// JSON object — keep it as a string and decode when dispatching.
|
|
|
|
type Message struct {
|
|
Role string `json:"role"`
|
|
Content string `json:"content,omitempty"`
|
|
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
|
|
ToolCallID string `json:"tool_call_id,omitempty"`
|
|
Name string `json:"name,omitempty"`
|
|
}
|
|
|
|
type ToolCall struct {
|
|
ID string `json:"id"`
|
|
Type string `json:"type"`
|
|
Function FunctionCall `json:"function"`
|
|
}
|
|
|
|
type FunctionCall struct {
|
|
Name string `json:"name"`
|
|
Arguments string `json:"arguments"`
|
|
}
|
|
|
|
type chatRequest struct {
|
|
Model string `json:"model"`
|
|
Messages []Message `json:"messages"`
|
|
Tools []Tool `json:"tools,omitempty"`
|
|
}
|
|
|
|
type chatResponse struct {
|
|
Choices []struct {
|
|
Message Message `json:"message"`
|
|
FinishReason string `json:"finish_reason"`
|
|
} `json:"choices"`
|
|
Error *struct {
|
|
Message string `json:"message"`
|
|
Type string `json:"type"`
|
|
} `json:"error,omitempty"`
|
|
}
|
|
|
|
type Client struct {
|
|
BaseURL string
|
|
APIKey string
|
|
Model string
|
|
HTTP *http.Client
|
|
}
|
|
|
|
func NewClient(baseURL, apiKey, model string) *Client {
|
|
timeout := 600 * time.Second
|
|
if v := os.Getenv("OPENAI_TIMEOUT_SECONDS"); v != "" {
|
|
if n, err := strconv.Atoi(v); err == nil && n > 0 {
|
|
timeout = time.Duration(n) * time.Second
|
|
}
|
|
}
|
|
return &Client{
|
|
BaseURL: baseURL,
|
|
APIKey: apiKey,
|
|
Model: model,
|
|
HTTP: &http.Client{Timeout: timeout},
|
|
}
|
|
}
|
|
|
|
var aiCallCounter uint64
|
|
|
|
func logAIIO() bool { return os.Getenv("LOG_AI_IO") == "1" }
|
|
|
|
func (c *Client) Chat(ctx context.Context, messages []Message, tools []Tool) (Message, error) {
|
|
body, err := json.Marshal(chatRequest{Model: c.Model, Messages: messages, Tools: tools})
|
|
if err != nil {
|
|
return Message{}, err
|
|
}
|
|
|
|
callID := atomic.AddUint64(&aiCallCounter, 1)
|
|
debug := logAIIO()
|
|
if debug {
|
|
fmt.Fprintf(os.Stderr, "\n===== AI REQUEST #%d model=%s msgs=%d tools=%d =====\n%s\n",
|
|
callID, c.Model, len(messages), len(tools), string(body))
|
|
}
|
|
|
|
start := time.Now()
|
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.BaseURL+"/chat/completions", bytes.NewReader(body))
|
|
if err != nil {
|
|
return Message{}, err
|
|
}
|
|
req.Header.Set("Content-Type", "application/json")
|
|
if c.APIKey != "" {
|
|
req.Header.Set("Authorization", "Bearer "+c.APIKey)
|
|
}
|
|
resp, err := c.HTTP.Do(req)
|
|
if err != nil {
|
|
if debug {
|
|
fmt.Fprintf(os.Stderr, "===== AI REQUEST #%d FAILED after %s: %v =====\n",
|
|
callID, time.Since(start).Round(time.Millisecond), err)
|
|
}
|
|
return Message{}, err
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
raw, err := io.ReadAll(resp.Body)
|
|
if err != nil {
|
|
return Message{}, err
|
|
}
|
|
if debug {
|
|
fmt.Fprintf(os.Stderr, "===== AI RESPONSE #%d status=%d elapsed=%s bytes=%d =====\n%s\n",
|
|
callID, resp.StatusCode, time.Since(start).Round(time.Millisecond), len(raw), string(raw))
|
|
}
|
|
if resp.StatusCode >= 400 {
|
|
return Message{}, fmt.Errorf("api error %d: %s", resp.StatusCode, string(raw))
|
|
}
|
|
|
|
var parsed chatResponse
|
|
if err := json.Unmarshal(raw, &parsed); err != nil {
|
|
return Message{}, fmt.Errorf("decode response: %w; body=%s", err, string(raw))
|
|
}
|
|
if parsed.Error != nil {
|
|
return Message{}, fmt.Errorf("api error: %s", parsed.Error.Message)
|
|
}
|
|
if len(parsed.Choices) == 0 {
|
|
return Message{}, fmt.Errorf("no choices in response: %s", string(raw))
|
|
}
|
|
return parsed.Choices[0].Message, nil
|
|
}
|