import { readFile, writeFile } from 'node:fs/promises'
import path from 'node:path'
import { fileURLToPath } from 'node:url'
import type { ChatMessageContentItem } from '@azure/openai'
import { AzureKeyCredential, OpenAIClient } from '@azure/openai'
import { fromMarkdown } from 'mdast-util-from-markdown'
import { visitParents } from 'unist-util-visit-parents'
import { remove } from 'unist-util-remove'
import { createLogger } from 'shared/logger'
import { env } from '../../env'
import type { ISearchInput } from '@/shared/types.js'

const logger = createLogger('llm-core')
const __filename = fileURLToPath(import.meta.url)
const __dirname = path.dirname(__filename)
const endpoint = env.AZURE_OPENAI_API_ENDPOINT || '<endpoint>'
const azureApiKey = env.AZURE_OPENAI_API_KEY || '<api key>'

export async function genCode(question: string) {
  logger.info('== llm client start ==')

  const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey))
  const deploymentId = env.AZURE_OPENAI_API_DEPLOYMENT_NAME
  const systemPrompt = await readFile(path.join(__dirname, './system-prompt.md'), 'utf-8')

  const chatCompletion = await client.getChatCompletions(deploymentId, [
    { role: 'system', content: systemPrompt },
    {
      role: 'user',
      content: [
        {
          type: 'text',
          text: question,
        },
        // TODO: Image support
        // {
        //   type: 'image',
        //   url: 'https://example.com/image.png',
        // }
      ],
    },
  ], {
    // set temperature to 0 to get deterministic results
    temperature: 0,
  })
  // it will be markdown format
  const mainContent = chatCompletion.choices[0].message?.content
  logger.info(`raw output> ${mainContent}`)
  const codeBlocks: string[] = extractCode(mainContent)

  await writeFile(path.join(__dirname, '../website/src/Preview.jsx'), codeBlocks[0])
}

/**
 * @link
 * https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/typescript/src/streamChatCompletions.ts#L25
 * @param question string
 * @returns AsyncGenerator<ChatCompletionEvent, void, unknown>
 */
export async function genStreamingCode(question: ISearchInput) {
  logger.info('== llm client start ==')

  const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey))
  const deploymentId = env.AZURE_OPENAI_API_DEPLOYMENT_NAME
  const systemPrompt = await readFile(path.join(__dirname, './system-prompt.md'), 'utf-8')

  const events = await client.streamChatCompletions(deploymentId, [
    { role: 'system', content: systemPrompt },
    {
      role: 'user',
      content: composeUserContent(question),
    },
  ], {
    // set temperature to 0 to get deterministic results
    temperature: 0,
    maxTokens: 4096,
  })
  return events
}

function composeUserContent(question: ISearchInput): Array<ChatMessageContentItem> {
  const content: Array<ChatMessageContentItem> = [question.type === 'image_url'
    ? {
        type: 'image_url',
        imageUrl: {
          url: question.value,
        },
      }
    : {
        type: 'text',
        text: question.value + (question.previousCode
          ? `
Based on the following code, change it to meet my new requirements:
\`\`\`jsx
${question.previousCode}
\`\`\`
`
          : ''),
      }]

  return content
}

/**
 * extract code blocks from markdown
 * @param mainContent
 * @returns string[]
 * @example
 * const mainContent = `
 * # Title
 * ```jsx
 * function App() {
 *  return <div>Hello World</div>
 * }
 *```
 * `
 * Return the code block
 */
function extractCode(mainContent: string | null | undefined) {
  const codeBlocks: string[] = []
  const tree = fromMarkdown(
    mainContent || '',
  )
  // deno-lint-ignore no-explicit-any
  visitParents(tree, 'code', (node: any) => {
    codeBlocks.push(node.value.trim())
  })

  if (codeBlocks.length !== 1)
    throw new Error(`invalid code blocks ${JSON.stringify(codeBlocks)}`)

  remove(tree, 'code')
  return codeBlocks
}
