import { OpenAI } from "openai";
import { getLogger, waitFor } from "../../utils";
import getConfig from "../../config";
import { insertRecords, query } from "../../db/clickhouse";
import { chunk } from "lodash";
import { Task } from "..";

const task: Task = {
  cron: '0 * * * *',
  singleInstance: true,
  callback: async () => {
    const logger = getLogger('IssueAnalysisTask');
    const config: any = await getConfig();

    const concurrentRequestNumber = 10;
    const qualityOptions = ['Very Low', 'Low', 'Medium', 'High', 'Very High'];

    interface InputIssue {
      id: number;
      platform: string;
      repoName: string;
      number: number;
      title: string;
      body: string;
    }

    interface OutputIssue {
      id: number;
      platform: string;
      informationQuality: string;
      isAutomaticallyGenerated: string;
      hostileOrAbusive: string;
    }

    const openai = new OpenAI({
      apiKey: config.qwen.token,
      baseURL: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
    });

    const createIssueInfoTable = async () => {
      const sql = `
    CREATE TABLE IF NOT EXISTS issue_info
    (
      \`id\` UInt64,
      \`platform\` LowCardinality(String),
      \`information_quality\` Enum('Very Low' = 1, 'Low' = 2, 'Medium' = 3, 'High' = 4, 'Very High' = 5),
      \`is_automatically_generated\` Enum('Yes' = 1, 'Uncertain' = 2, 'No' = 3),
      \`hostile_or_abusive\` Enum('No' = 1, 'Yes' = 2)
    )
    ENGINE = ReplacingMergeTree
    ORDER BY (id, platform)
    SETTINGS index_granularity = 8192`;
      await query(sql);
    };

    const analyzeIssue = async (issue: InputIssue): Promise<OutputIssue | null> => {
      const prompt = `
  You are an intelligent assistant tasked with analyzing GitHub issues based on their title, body content, and the repository's description. Your goal is to evaluate the following aspects of the issue:

- Information Quality: Assess the amount of useful information provided in the issue. Is it detailed, relevant, and actionable?
  - Return values: Very Low, Low, Medium, High, Very High
- Is Automatically Generated: Determine whether the issue is automatically generated by a tool.
  - Return values: Yes, Uncertain, No
- Hostile Or Abusive: Identify if the issue is hostile or abusive, such as:
  - Spam or Advertising: Promotions, irrelevant links, or commercial content.
  - Malicious Content: Harmful, defamatory, or phishing attempts.
  - Sensitive Topics: Political, religious, or controversial discussions unrelated to the repository.
  - Explicit Content: Pornographic, gambling, or other NSFW material.
  - Return vaules: Yes, No

# Provide a concise evaluation for each of the three aspects above.
- Use a scale of Very Low, Low, Medium, High, or Very High to rate the Information Quality.
- Use Yes or No to indicate whether the issue is Automatically Generated or Hostile Or Abusive.
- Make sure to return the results in the format above, do not return any other text or format.

# Example Output:

Information Quality: Medium
Is Automatically Generated: No
Hostile Or Abusive: No

# Issue Data:

Title: ${issue.title}
Body: ${issue.body}
    `;

      try {
        const response = await openai.chat.completions.create({
          model: 'qwen3-32b',
          enable_thinking: false,
          messages: [{ role: 'user', content: prompt }],
        } as any);

        const resultStr = response.choices[0].message.content!;
        // extract data from the returned string content
        // Use regex to extract data from the returned string content
        const outputIssue: Partial<OutputIssue> = {
          id: issue.id,
          platform: issue.platform,
        };

        // Helper to extract each line by key
        function extractValue(regex: RegExp, str: string, values?: string[]) {
          const match = str.match(regex);
          const ret = match ? match[1].trim() : undefined;
          if (values && ret && !values.includes(ret)) {
            throw new Error(`Invalid value: ${ret}`);
          }
          return ret;
        }

        outputIssue.informationQuality = extractValue(/Information Quality:\s*([^\n]+)/i, resultStr, qualityOptions);
        outputIssue.isAutomaticallyGenerated = extractValue(/Is Automatically Generated:\s*([^\n]+)/i, resultStr, ['Yes', 'Uncertain', 'No']);
        outputIssue.hostileOrAbusive = extractValue(/Hostile Or Abusive:\s*([^\n]+)/i, resultStr, ['Yes', 'No']);

        return (outputIssue as OutputIssue);
      } catch (e) {
        if (e instanceof Error && e.message.includes('Input data may contain inappropriate content.')) {
          // inappropriate content, return default values
          return {
            id: issue.id,
            platform: issue.platform,
            informationQuality: 'Very Low',
            isAutomaticallyGenerated: 'Uncertain',
            hostileOrAbusive: 'Yes',
          } as OutputIssue;
        }
        logger.error(`Error analyzing issue ${issue.id}: ${e}`);
        return null;
      }
    };

    const getIssues = async (num: number): Promise<InputIssue[]> => {
      const q = `SELECT platform, id, repo_name, issue_number, issue_title, body
    FROM issues_with_label WHERE (platform, id) NOT IN (SELECT platform, id FROM issue_info)
    LIMIT ${num}`;
      let issues = await query(q);
      if (issues.length === 0) {
        return [];
      }
      return issues.map(item => ({
        id: +item[1],
        platform: item[0],
        repoName: item[2],
        number: item[3],
        title: item[4],
        body: item[5],
      }));
    };

    const saveIssues = async (issues: Array<OutputIssue | null>) => {
      try {
        const issuesToSave = issues.filter(i => i !== null) as OutputIssue[];
        await insertRecords(issuesToSave.map(i => ({
          id: i.id,
          platform: i.platform,
          information_quality: i.informationQuality,
          is_automatically_generated: i.isAutomaticallyGenerated,
          hostile_or_abusive: i.hostileOrAbusive,
        })), 'issue_info');
      } catch (e) {
        logger.error(`Error saving issues: ${e}`);
      }
    };

    await createIssueInfoTable();

    let issues = await getIssues(concurrentRequestNumber * 60);

    do {
      logger.info(`Found ${issues.length} issues to analyze.`);
      const chunks = chunk(issues, concurrentRequestNumber);
      for (const chunk of chunks) {
        Promise.all(chunk.map(i => analyzeIssue(i))).then(outputIssues => saveIssues(outputIssues));
        await waitFor(2000);
      }
      issues = await getIssues(concurrentRequestNumber * 60);
    } while (issues.length > 0)

    logger.info('IssueAnalysisTask done.');
  }
};

module.exports = task;
