package providers

import (
	"context"
	"fmt"
	"io"

	"xagent/internal/streaming"

	"github.com/sashabaranov/go-openai"
)

// OpenAIProvider implements LLMProvider using OpenAI's API
type OpenAIProvider struct {
	client *openai.Client
	config *ProviderConfig
}

// NewOpenAIProvider creates a new OpenAI provider
func NewOpenAIProvider(config *ProviderConfig) (*OpenAIProvider, error) {
	if config.APIKey == "" {
		return nil, fmt.Errorf("OpenAI API key is required")
	}

	return &OpenAIProvider{
		client: openai.NewClient(config.APIKey),
		config: config,
	}, nil
}

// Complete implements LLMProvider
func (p *OpenAIProvider) Complete(ctx context.Context, prompt string, options *CompletionOptions) (string, error) {
	req := openai.CompletionRequest{
		Model:       options.Model,
		Prompt:      prompt,
		MaxTokens:   options.MaxTokens,
		Temperature: options.Temperature,
		TopP:        options.TopP,
		Stop:        options.Stop,
	}

	resp, err := p.client.CreateCompletion(ctx, req)
	if err != nil {
		return "", fmt.Errorf("failed to create completion: %w", err)
	}

	if len(resp.Choices) == 0 {
		return "", fmt.Errorf("no completion choices returned")
	}

	return resp.Choices[0].Text, nil
}

// Chat implements LLMProvider
func (p *OpenAIProvider) Chat(ctx context.Context, messages []Message, options *ChatOptions) (string, error) {
	handler := &streaming.DefaultStreamHandler{}
	var result string
	handler.OnEventFunc = func(event *streaming.StreamEvent) error {
		result += event.Content
		return nil
	}

	err := p.StreamChatWithHandler(ctx, messages, options, handler)
	if err != nil {
		return "", err
	}

	return result, nil
}

// StreamChat implements streaming chat
func (p *OpenAIProvider) StreamChat(ctx context.Context, messages []Message, options *ChatOptions) (<-chan streaming.StreamEvent, error) {
	ch := make(chan streaming.StreamEvent)

	go func() {
		defer close(ch)

		handler := &streaming.DefaultStreamHandler{}
		if err := p.StreamChatWithHandler(ctx, messages, options, handler); err != nil {
			ch <- streaming.StreamEvent{
				Error: err,
				Done:  true,
			}
			return
		}

		text := handler.GetText()
		ch <- streaming.StreamEvent{
			Token: text,
			Done:  true,
		}
	}()

	return ch, nil
}

// StreamChatWithHandler implements streaming chat with custom handler
func (p *OpenAIProvider) StreamChatWithHandler(ctx context.Context, messages []Message, options *ChatOptions, handler streaming.StreamHandler) error {
	stream, err := p.client.CreateChatCompletionStream(ctx, openai.ChatCompletionRequest{
		Model:       options.Model,
		Messages:    convertMessages(messages),
		MaxTokens:   options.MaxTokens,
		Temperature: options.Temperature,
		TopP:        options.TopP,
		Stop:        options.Stop,
	})
	if err != nil {
		return fmt.Errorf("failed to create chat completion stream: %v", err)
	}
	defer stream.Close()

	for {
		response, err := stream.Recv()
		if err != nil {
			if err == io.EOF {
				return handler.OnComplete(ctx)
			}
			return handler.OnError(ctx, err)
		}

		if len(response.Choices) == 0 {
			continue
		}

		event := &streaming.StreamEvent{
			Type:    "token",
			Content: response.Choices[0].Delta.Content,
			Done:    false,
		}

		if err := handler.OnEvent(event); err != nil {
			return err
		}
	}
}

// Embed implements LLMProvider
func (p *OpenAIProvider) Embed(ctx context.Context, text string) ([]float32, error) {
	reqBody := map[string]interface{}{
		"model": "text-embedding-ada-002",
		"input": text,
	}

	resp, err := p.makeRequest(ctx, "/embeddings", reqBody)
	if err != nil {
		return nil, err
	}

	return resp.Data[0].Embedding, nil
}

// Helper methods

type openAIResponse struct {
	Choices []struct {
		Message struct {
			Content string `json:"content"`
		} `json:"message"`
	} `json:"choices"`
	Data []struct {
		Embedding []float32 `json:"embedding"`
	} `json:"data"`
}

func (p *OpenAIProvider) makeRequest(ctx context.Context, endpoint string, body map[string]interface{}) (*openAIResponse, error) {
	// Convert the generic body to ChatCompletionRequest
	messages := make([]openai.ChatCompletionMessage, 0)
	if msgs, ok := body["messages"].([]map[string]interface{}); ok {
		for _, msg := range msgs {
			messages = append(messages, openai.ChatCompletionMessage{
				Role:    msg["role"].(string),
				Content: msg["content"].(string),
			})
		}
	}

	req := openai.ChatCompletionRequest{
		Model:       body["model"].(string),
		Messages:    messages,
		Temperature: body["temperature"].(float32),
	}

	resp, err := p.client.CreateChatCompletion(ctx, req)
	if err != nil {
		return nil, fmt.Errorf("failed to create chat completion: %v", err)
	}

	// Convert the response to our internal format
	result := &openAIResponse{
		Choices: []struct {
			Message struct {
				Content string `json:"content"`
			} `json:"message"`
		}{
			{
				Message: struct {
					Content string `json:"content"`
				}{
					Content: resp.Choices[0].Message.Content,
				},
			},
		},
	}

	return result, nil
}

// streamResponse handles streaming response from OpenAI
func (p *OpenAIProvider) streamResponse(ctx context.Context, resp io.ReadCloser, handler streaming.StreamHandler) error {
	defer resp.Close()

	reader := streaming.NewStreamReader(ctx, resp, handler)
	return reader.Process()
}

// GetConfig returns the provider configuration
func (p *OpenAIProvider) GetConfig() *ProviderConfig {
	return p.config
}

// GetName implements LLMProvider
func (p *OpenAIProvider) GetName() string {
	return "openai"
}

// convertMessages converts internal message format to OpenAI format
func convertMessages(messages []Message) []openai.ChatCompletionMessage {
	openaiMessages := make([]openai.ChatCompletionMessage, len(messages))
	for i, msg := range messages {
		openaiMessages[i] = openai.ChatCompletionMessage{
			Role:    msg.Role,
			Content: msg.Content,
		}
	}
	return openaiMessages
}
