// Register the side panel
chrome.sidePanel.setPanelBehavior({ openPanelOnActionClick: true });

// Listen for messages from the popup
chrome.runtime.onMessage.addListener(function(request, sender, sendResponse) {
  if (request.action === 'summarize') {
    console.log('Received summarize request with settings:', request.settings);
    
    summarizeText(request.subtitles, request.settings)
      .then(summary => {
        console.log('Summary generated successfully');
        sendResponse({success: true, summary: summary});
      })
      .catch(error => {
        console.error('Error generating summary:', error);
        sendResponse({success: false, error: error.message});
      });
    
    // Return true to indicate we will send a response asynchronously
    return true;
  } else if (request.action === 'summarizeStream') {
    console.log('Received streaming summarize request with settings:', request.settings);
    
    // Start the streaming summarization process
    summarizeTextWithStreaming(request.subtitles, request.settings, request.messageId);
    
    // No need to send a response here as we'll be streaming the results
    return false;
  }
});

/**
 * Summarize text using the specified AI provider
 * @param {string} text - The text to summarize
 * @param {Object} settings - The AI provider settings
 * @returns {Promise<string>} - The summarized text
 */
async function summarizeText(text, settings) {
  if (!text || text.trim() === '') {
    throw new Error('No text to summarize');
  }
  
  console.log(`Using AI provider: ${settings.provider}`);
  
  // Handle long transcripts by chunking if necessary
  const processedText = prepareTextForSummarization(text);
  
  if (settings.provider === 'ollama') {
    return await summarizeWithOllama(processedText, settings);
  } else if (settings.provider === 'openai') {
    return await summarizeWithOpenAI(processedText, settings);
  } else {
    throw new Error('Invalid AI provider');
  }
}

/**
 * Summarize text with streaming updates
 * @param {string} text - The text to summarize
 * @param {Object} settings - The AI provider settings
 * @param {string} messageId - A unique ID for this summarization request
 */
async function summarizeTextWithStreaming(text, settings, messageId) {
  if (!text || text.trim() === '') {
    sendStreamError(messageId, 'No text to summarize');
    return;
  }
  
  console.log(`Using AI provider: ${settings.provider} with streaming`);
  
  try {
    // Handle long transcripts by chunking if necessary
    const processedText = prepareTextForSummarization(text);
    
    if (settings.provider === 'ollama') {
      await summarizeWithOllamaStreaming(processedText, settings, messageId);
    } else if (settings.provider === 'openai') {
      await summarizeWithOpenAIStreaming(processedText, settings, messageId);
    } else {
      sendStreamError(messageId, 'Invalid AI provider');
    }
  } catch (error) {
    console.error('Error in streaming summarization:', error);
    sendStreamError(messageId, error.message);
  }
}

/**
 * Send an error message for a streaming request
 * @param {string} messageId - The ID of the message
 * @param {string} errorMessage - The error message
 */
function sendStreamError(messageId, errorMessage) {
  chrome.runtime.sendMessage({
    action: 'streamUpdate',
    messageId: messageId,
    error: errorMessage,
    done: true
  });
}

/**
 * Send a chunk of text for a streaming request
 * @param {string} messageId - The ID of the message
 * @param {string} text - The full text so far
 * @param {boolean} done - Whether this is the final chunk
 */
function sendStreamChunk(messageId, text, done = false) {
  chrome.runtime.sendMessage({
    action: 'streamUpdate',
    messageId: messageId,
    text: text,
    done: done
  });
}

/**
 * Prepare text for summarization by handling long transcripts
 * @param {string} text - The original text to prepare
 * @returns {string} - The processed text ready for summarization
 */
function prepareTextForSummarization(text) {
  // If text is short enough, return it as is
  if (text.length <= 8000) {
    return text;
  }
  
  console.log(`Text is very long (${text.length} chars), processing for better summarization`);
  
  // For very long texts, we'll use a more sophisticated approach:
  // 1. Split into chunks
  // 2. Extract key sentences from each chunk
  // 3. Combine the key sentences
  
  // Split into chunks of roughly 1000 words
  const words = text.split(/\s+/);
  const chunkSize = 1000;
  const chunks = [];
  
  for (let i = 0; i < words.length; i += chunkSize) {
    chunks.push(words.slice(i, i + chunkSize).join(' '));
  }
  
  console.log(`Split text into ${chunks.length} chunks`);
  
  // Extract key sentences from each chunk (simple approach: take first and last few sentences)
  let processedChunks = [];
  
  chunks.forEach((chunk, index) => {
    // Split into sentences (simple approach)
    const sentences = chunk.split(/[.!?]+/).filter(s => s.trim().length > 0);
    
    if (sentences.length <= 5) {
      // If chunk has 5 or fewer sentences, keep all of them
      processedChunks.push(chunk);
    } else {
      // Take first 2 and last 2 sentences
      const keyPart = sentences.slice(0, 2).join('. ') + '. ... ' + sentences.slice(-2).join('. ') + '.';
      processedChunks.push(keyPart);
    }
  });
  
  // Combine processed chunks with markers
  let processedText = '';
  processedChunks.forEach((chunk, index) => {
    processedText += `[Part ${index + 1}] ${chunk}\n\n`;
  });
  
  console.log(`Processed text length: ${processedText.length} chars`);
  
  // If still too long, truncate with a note
  const maxLength = 8000;
  if (processedText.length > maxLength) {
    processedText = processedText.substring(0, maxLength) + 
      `\n\n[Note: The transcript was too long and has been truncated. This summary covers approximately the first ${Math.round((maxLength / text.length) * 100)}% of the video.]`;
  }
  
  return processedText;
}

/**
 * Summarize text using Ollama
 * @param {string} text - The text to summarize
 * @param {Object} settings - The Ollama settings
 * @returns {Promise<string>} - The summarized text
 */
async function summarizeWithOllama(text, settings) {
  try {
    // Validate settings
    if (!settings.url) {
      throw new Error('Ollama URL is required');
    }
    
    if (!settings.model) {
      throw new Error('Ollama model is required');
    }
    
    const url = `${settings.url}/api/generate`;
    console.log(`Making request to Ollama API at: ${url} with model: ${settings.model}`);
    
    // Prepare the prompt using custom prompt if available
    const prompt = settings.customPrompt 
      ? settings.customPrompt.replace('{transcript}', text)
      : `Please provide a comprehensive summary of the following YouTube video transcript. Focus on the main topics, key points, and important takeaways:\n\n${text}`;
    
    // Prepare the request data
    const requestData = {
      model: settings.model,
      prompt: prompt,
      stream: false
    };
    
    // Try different approaches to handle CORS
    try {
      // Approach 1: Standard fetch with CORS mode
      console.log('Trying standard fetch with CORS mode');
      const response = await fetch(url, {
        method: 'POST',
        headers: {
          'Content-Type': 'application/json',
          'Origin': chrome.runtime.getURL('')
        },
        mode: 'cors',
        body: JSON.stringify(requestData)
      });
      
      if (!response.ok) {
        throw new Error(`HTTP error ${response.status}: ${response.statusText}`);
      }
      
      const data = await response.json();
      
      if (!data.response) {
        throw new Error('Ollama returned an empty response');
      }
      
      return data.response;
    } catch (corsError) {
      console.error('CORS error with standard mode:', corsError);
      
      // Approach 2: Try with XMLHttpRequest which might handle CORS differently
      console.log('Trying XMLHttpRequest approach');
      try {
        const xhrResponse = await new Promise((resolve, reject) => {
          const xhr = new XMLHttpRequest();
          xhr.open('POST', url, true);
          xhr.setRequestHeader('Content-Type', 'application/json');
          
          xhr.onload = function() {
            if (xhr.status >= 200 && xhr.status < 300) {
              try {
                const data = JSON.parse(xhr.responseText);
                resolve(data);
              } catch (e) {
                reject(new Error('Failed to parse Ollama response'));
              }
            } else {
              reject(new Error(`HTTP error ${xhr.status}: ${xhr.statusText}`));
            }
          };
          
          xhr.onerror = function() {
            reject(new Error('Network error occurred'));
          };
          
          xhr.send(JSON.stringify(requestData));
        });
        
        if (!xhrResponse.response) {
          throw new Error('Ollama returned an empty response');
        }
        
        return xhrResponse.response;
      } catch (xhrError) {
        console.error('XMLHttpRequest approach failed:', xhrError);
        
        // If all approaches fail, provide a helpful error message
        const extensionId = chrome.runtime.id;
        throw new Error(`CORS error when connecting to Ollama at ${settings.url}. Please run Ollama with CORS enabled using: 'OLLAMA_ORIGINS=chrome-extension://${extensionId} ollama serve'`);
      }
    }
  } catch (error) {
    console.error('Error summarizing with Ollama:', error);
    throw new Error(`Failed to summarize with Ollama: ${error.message}`);
  }
}

/**
 * Summarize text using Ollama with streaming
 * @param {string} text - The text to summarize
 * @param {Object} settings - The Ollama settings
 * @param {string} messageId - The ID of the message
 */
async function summarizeWithOllamaStreaming(text, settings, messageId) {
  console.log('Summarizing with Ollama (streaming)');
  
  try {
    // Replace the placeholder in the custom prompt
    const prompt = settings.customPrompt.replace('{transcript}', text);
    
    // Prepare the request
    const requestBody = {
      model: settings.ollamaModel,
      prompt: prompt,
      stream: true
    };
    
    // Make the request
    const response = await fetch(`${settings.ollamaUrl}/api/generate`, {
      method: 'POST',
      headers: {
        'Content-Type': 'application/json'
      },
      body: JSON.stringify(requestBody)
    });
    
    if (!response.ok) {
      const errorText = await response.text();
      throw new Error(`Ollama API error: ${response.status} ${errorText}`);
    }
    
    // Process the streaming response
    const reader = response.body.getReader();
    const decoder = new TextDecoder();
    let fullText = '';
    
    while (true) {
      const { value, done } = await reader.read();
      
      if (done) {
        // Send the final chunk
        sendStreamChunk(messageId, fullText, true);
        break;
      }
      
      // Decode the chunk
      const chunk = decoder.decode(value, { stream: true });
      
      try {
        // Ollama returns each chunk as a JSON object
        const lines = chunk.split('\n').filter(line => line.trim());
        
        for (const line of lines) {
          const data = JSON.parse(line);
          
          if (data.response) {
            fullText += data.response;
            sendStreamChunk(messageId, fullText);
          }
        }
      } catch (error) {
        console.error('Error parsing Ollama response chunk:', error);
        // Continue processing even if one chunk fails
      }
    }
  } catch (error) {
    console.error('Error in Ollama streaming:', error);
    sendStreamError(messageId, error.message);
  }
}

/**
 * Summarize text using OpenAI compatible API
 * @param {string} text - The text to summarize
 * @param {Object} settings - The OpenAI settings
 * @returns {Promise<string>} - The summarized text
 */
async function summarizeWithOpenAI(text, settings) {
  try {
    // Validate settings
    if (!settings.url) {
      throw new Error('OpenAI API URL is required');
    }
    
    if (!settings.apiKey) {
      throw new Error('OpenAI API key is required');
    }
    
    if (!settings.model) {
      throw new Error('OpenAI model is required');
    }
    
    const url = `${settings.url}/chat/completions`;
    console.log(`Making request to OpenAI API at: ${url} with model: ${settings.model}`);
    
    // Prepare system and user messages
    const systemMessage = 'You are a helpful assistant that summarizes YouTube video transcripts. Provide comprehensive summaries that capture the main topics, key points, and important takeaways.';
    
    // Use custom prompt if available
    const userMessage = settings.customPrompt 
      ? settings.customPrompt.replace('{transcript}', text)
      : `Please summarize the following YouTube video transcript:\n\n${text}`;
    
    const response = await fetch(url, {
      method: 'POST',
      headers: {
        'Content-Type': 'application/json',
        'Authorization': `Bearer ${settings.apiKey}`
      },
      body: JSON.stringify({
        model: settings.model,
        messages: [
          {
            role: 'system',
            content: systemMessage
          },
          {
            role: 'user',
            content: userMessage
          }
        ],
        temperature: 0.7,
        max_tokens: 800
      })
    });
    
    if (!response.ok) {
      let errorMessage = `HTTP error ${response.status}: ${response.statusText}`;
      
      try {
        const errorData = await response.json();
        errorMessage = `OpenAI API error: ${errorData.error?.message || response.statusText}`;
      } catch (e) {
        // If we can't parse the error as JSON, just use the HTTP error
      }
      
      throw new Error(errorMessage);
    }
    
    const data = await response.json();
    
    if (!data.choices || data.choices.length === 0 || !data.choices[0].message) {
      throw new Error('OpenAI returned an invalid response');
    }
    
    return data.choices[0].message.content;
  } catch (error) {
    console.error('Error summarizing with OpenAI:', error);
    throw new Error(`Failed to summarize with OpenAI: ${error.message}`);
  }
}

/**
 * Summarize text using OpenAI with streaming
 * @param {string} text - The text to summarize
 * @param {Object} settings - The OpenAI settings
 * @param {string} messageId - The ID of the message
 */
async function summarizeWithOpenAIStreaming(text, settings, messageId) {
  console.log('Summarizing with OpenAI (streaming)');
  
  try {
    // Replace the placeholder in the custom prompt
    const prompt = settings.customPrompt.replace('{transcript}', text);
    
    // Prepare the request
    const requestBody = {
      model: settings.openaiModel,
      messages: [
        {
          role: 'user',
          content: prompt
        }
      ],
      stream: true
    };
    
    // Make the request
    const response = await fetch(`${settings.openaiUrl}/chat/completions`, {
      method: 'POST',
      headers: {
        'Content-Type': 'application/json',
        'Authorization': `Bearer ${settings.openaiKey}`
      },
      body: JSON.stringify(requestBody)
    });
    
    if (!response.ok) {
      const errorData = await response.json();
      throw new Error(`OpenAI API error: ${response.status} ${JSON.stringify(errorData)}`);
    }
    
    // Process the streaming response
    const reader = response.body.getReader();
    const decoder = new TextDecoder();
    let fullText = '';
    
    while (true) {
      const { value, done } = await reader.read();
      
      if (done) {
        // Send the final chunk
        sendStreamChunk(messageId, fullText, true);
        break;
      }
      
      // Decode the chunk
      const chunk = decoder.decode(value, { stream: true });
      
      try {
        // OpenAI returns each chunk as a SSE message
        const lines = chunk.split('\n').filter(line => line.trim().startsWith('data:'));
        
        for (const line of lines) {
          const data = line.substring(5).trim(); // Remove 'data: ' prefix
          
          if (data === '[DONE]') {
            continue;
          }
          
          const json = JSON.parse(data);
          
          if (json.choices && json.choices[0].delta && json.choices[0].delta.content) {
            fullText += json.choices[0].delta.content;
            sendStreamChunk(messageId, fullText);
          }
        }
      } catch (error) {
        console.error('Error parsing OpenAI response chunk:', error);
        // Continue processing even if one chunk fails
      }
    }
  } catch (error) {
    console.error('Error in OpenAI streaming:', error);
    sendStreamError(messageId, error.message);
  }
} 