package streaming

import (
	"context"
	"encoding/json"
	"fmt"
	"io"
	"sync"
	"time"

	"github.com/sirupsen/logrus"
)

// StreamReader reads and processes streaming responses
type StreamReader struct {
	reader  io.Reader
	handler StreamHandler
	decoder *json.Decoder
	ctx     context.Context
}

// NewStreamReader creates a new StreamReader
func NewStreamReader(ctx context.Context, reader io.Reader, handler StreamHandler) *StreamReader {
	return &StreamReader{
		reader:  reader,
		handler: handler,
		decoder: json.NewDecoder(reader),
		ctx:     ctx,
	}
}

// Process reads and processes the stream
func (s *StreamReader) Process() error {
	for {
		var event StreamEvent
		if err := s.decoder.Decode(&event); err != nil {
			if err == io.EOF {
				return s.handler.OnComplete(s.ctx)
			}
			return s.handler.OnError(s.ctx, err)
		}

		if event.Error != nil {
			return s.handler.OnError(s.ctx, event.Error)
		}

		if err := s.handler.OnEvent(&event); err != nil {
			return err
		}

		if event.Done {
			return s.handler.OnComplete(s.ctx)
		}
	}
}

// Stream represents a streaming session
type Stream struct {
	ID      string
	State   StreamState
	Config  *StreamConfig
	Metrics *StreamMetrics
	Events  chan *StreamEvent
	done    chan struct{}
	mutex   sync.RWMutex
	logger  *logrus.Entry
}

// NewStream creates a new stream
func NewStream(id string, config *StreamConfig) *Stream {
	if config == nil {
		config = NewDefaultStreamConfig()
	}

	return &Stream{
		ID:      id,
		State:   StreamStateInitialized,
		Config:  config,
		Metrics: &StreamMetrics{StartTime: time.Now()},
		Events:  make(chan *StreamEvent, 100),
		done:    make(chan struct{}),
		logger:  logrus.WithField("stream_id", id),
	}
}

// Start starts the stream processing
func (s *Stream) Start(ctx context.Context) error {
	s.mutex.Lock()
	if s.State != StreamStateInitialized {
		s.mutex.Unlock()
		return fmt.Errorf("stream already started")
	}
	s.State = StreamStateRunning
	s.mutex.Unlock()

	go s.processEvents(ctx)
	return nil
}

// Stop stops the stream
func (s *Stream) Stop() error {
	s.mutex.Lock()
	defer s.mutex.Unlock()

	if s.State == StreamStateStopped {
		return nil
	}

	close(s.done)
	s.State = StreamStateStopped
	return nil
}

// Send sends an event to the stream
func (s *Stream) Send(event *StreamEvent) error {
	s.mutex.RLock()
	if s.State != StreamStateRunning {
		s.mutex.RUnlock()
		return fmt.Errorf("stream not running")
	}
	s.mutex.RUnlock()

	select {
	case s.Events <- event:
		s.updateMetrics(event, true)
		return nil
	case <-s.done:
		return fmt.Errorf("stream stopped")
	default:
		return fmt.Errorf("stream buffer full")
	}
}

// processEvents processes incoming events
func (s *Stream) processEvents(ctx context.Context) {
	for {
		select {
		case event := <-s.Events:
			s.handleEvent(ctx, event)
		case <-s.done:
			return
		case <-ctx.Done():
			s.Stop()
			return
		}
	}
}

// handleEvent handles a stream event
func (s *Stream) handleEvent(ctx context.Context, event *StreamEvent) {
	if event == nil {
		return
	}

	s.updateMetrics(event, false)

	if event.Error != nil {
		s.logger.WithError(event.Error).Error("Stream error")
		return
	}

	if event.Done {
		s.Stop()
		return
	}

	s.logger.WithFields(logrus.Fields{
		"token": event.Token,
		"done":  event.Done,
	}).Debug("Stream event")
}

// updateMetrics updates stream metrics
func (s *Stream) updateMetrics(event *StreamEvent, sent bool) {
	s.mutex.Lock()
	defer s.mutex.Unlock()

	if sent {
		s.Metrics.EventsSent++
	} else {
		s.Metrics.EventsProcessed++
	}

	s.Metrics.LastEventTime = time.Now()
	s.Metrics.Duration = time.Since(s.Metrics.StartTime)
}
