import {
  BaseEmbeddings,
  BaseLoader,
  BaseVectorDatabase,
  Chunk,
  InsertChunkData,
  LoaderChunk
} from '@cherrystudio/embedjs-interfaces'
import md5 from 'md5'

import { HybridSearch } from './rag-application.js'
import { RAGApplicationBuilder } from './rag-application-builder.js'

// --- Mocks for testing ---

// Mocking a vector database to run tests fully in-memory
class MockVectorDatabase implements BaseVectorDatabase {
  private vectors: InsertChunkData[] = []

  // eslint-disable-next-line @typescript-eslint/no-empty-function
  async init() {}

  async insertChunks(chunks: InsertChunkData[]): Promise<number> {
    this.vectors.push(...chunks)
    return chunks.length
  }

  // A simple cosine similarity function
  private cosineSimilarity(vecA: number[], vecB: number[]) {
    const dotProduct = vecA.reduce((sum, a, i) => sum + a * vecB[i], 0)
    const magnitudeA = Math.sqrt(vecA.reduce((sum, a) => sum + a * a, 0))
    const magnitudeB = Math.sqrt(vecB.reduce((sum, b) => sum + b * b, 0))
    if (magnitudeA === 0 || magnitudeB === 0) {
      return 0
    }
    return dotProduct / (magnitudeA * magnitudeB)
  }

  async similaritySearch(queryVector: number[], k: number): Promise<(Chunk & { score: number })[]> {
    const results = this.vectors
      .map((vector) => ({
        ...vector,
        score: this.cosineSimilarity(queryVector, vector.vector)
      }))
      .sort((a, b) => b.score - a.score)
      .slice(0, k)

    return results.map((result) => ({
      pageContent: result.pageContent,
      metadata: result.metadata,
      score: result.score
    }))
  }

  async getVectorCount(): Promise<number> {
    return this.vectors.length
  }

  // eslint-disable-next-line @typescript-eslint/no-unused-vars
  async deleteKeys(_uniqueLoaderId: string): Promise<boolean> {
    // No-op for this mock
    return true
  }

  async reset(): Promise<void> {
    this.vectors = []
  }
}

// Mocking an embedding model so we don't need real API keys for this test
class MockEmbeddings extends BaseEmbeddings {
  constructor(
    private docVectors: number[][],
    private qVector: number[]
  ) {
    super()
  }

  // eslint-disable-next-line @typescript-eslint/no-unused-vars
  async embedDocuments(_texts: string[]): Promise<number[][]> {
    return this.docVectors
  }

  // eslint-disable-next-line @typescript-eslint/no-unused-vars
  async embedQuery(_text: string): Promise<number[]> {
    return this.qVector
  }

  async getDimensions(): Promise<number> {
    return this.qVector.length
  }
}

// Mocking a loader to feed our custom documents
class MockLoader extends BaseLoader<any> {
  constructor(private docs: { pageContent: string }[]) {
    super('mock-loader', {}, 0, 0)
  }

  async *getUnfilteredChunks(): AsyncGenerator<LoaderChunk, void, void> {
    let i = 0
    for (const doc of this.docs) {
      yield {
        pageContent: doc.pageContent,
        metadata: { id: `doc_${i++}`, source: 'mock-source' },
        contentHash: md5(doc.pageContent)
      }
    }
  }
}

// A type for our search results to satisfy TypeScript
interface SearchResult extends Chunk {
  score: number
  finalScore?: number
}

// --- Test Cases ---

const testScenarios = [
  {
    name: 'Scenario 1: High semantic relevance, but poor keyword match',
    query: 'Discussing the evolution of future work models',
    documents: [
      {
        pageContent:
          'Artificial intelligence and automation will fundamentally reshape the labor market, requiring new skills.'
      },
      { pageContent: 'Our company is focused on improving customer service quality.' },
      { pageContent: 'A few thoughts on what is to come: we need a clear roadmap for our tasks.' }
    ],
    docVectors: [
      [0.9, 0.8, 0.1], // Best answer, highest semantic score
      [0.5, 0.4, 0.8], // Keyword trap, lower semantic score
      [0.6, 0.5, 0.7] // Another keyword trap
    ],
    queryVector: [1.0, 0.8, 0.2], // Query vector is closest to the best answer
    ideal_top_1_idx: 0,
    notes:
      'V3 should trust the high vector score, ignoring keyword noise from "work" and "future" in other docs. V1/V2 might fail.'
  },
  {
    name: 'Scenario 2: Precise keyword match, but weaker semantic score',
    query: 'How to fix a printer connection error',
    documents: [
      { pageContent: 'A guide to troubleshooting general network connectivity failures.' },
      { pageContent: 'To resolve a printer connection error, first check if the USB cable is properly plugged in.' },
      { pageContent: 'The ink cartridge for this machine is high quality and prints fast.' }
    ],
    docVectors: [
      [0.9, 0.8, 0.2], // Semantic trap, highest score (about "connection")
      [0.7, 0.6, 0.8], // Best answer, medium score
      [0.2, 0.1, 0.3] // Irrelevant doc
    ],
    queryVector: [1.0, 0.8, 0.1], // Query vector is VERY close to the semantic trap
    ideal_top_1_idx: 1,
    notes:
      'Native vector search will fail because of the potent semantic trap. V3.2 should overcome this by dynamically shifting weight to the high keyword score of index 1.'
  },
  {
    name: 'Scenario 3: Strong signals for both semantic and keyword matching',
    query: 'Which headphones have the best active noise cancellation?',
    documents: [
      {
        pageContent: 'The sound quality of these headphones is excellent, but the noise cancellation is just average.'
      },
      {
        pageContent:
          'I bought it for the active noise cancellation, and the effect is outstanding. The world goes quiet.'
      },
      { pageContent: 'The battery life is a highlight of these headphones; it can last a full day.' }
    ],
    docVectors: [
      [0.6, 0.5, 0.2], // Semantically average
      [0.9, 0.9, 0.9], // Best answer, highest semantic score
      [0.4, 0.3, 0.1] // Semantically irrelevant
    ],
    queryVector: [0.9, 0.8, 0.9], // Query vector is closest to the best answer
    ideal_top_1_idx: 1,
    notes: 'This is a straightforward case. All algorithms should correctly identify index 1 as the best answer.'
  }
]

const algorithms: { name: string; type: HybridSearch | null }[] = [
  { name: '1. 原生向量排序', type: null },
  { name: '2. V1.0 - 简单加权求和', type: 'simple_weighted_sum' },
  { name: '3. V2.0 - 动态缩放因子', type: 'dynamic_scaling' },
  { name: '4. 备选 - 倒数排名融合 (RRF)', type: 'rrf' },
  { name: '5. V3.0 - 自适应混合模型 (默认)', type: 'adaptive_hybrid' }
]

async function runDemonstration() {
  console.log('############################################################')
  console.log('###         排序算法改进有效性验证与对比演示         ###')
  console.log('############################################################\n')

  for (const scenario of testScenarios) {
    console.log(`\n================== ${scenario.name} ==================`)
    console.log(`\n[Query]: ${scenario.query}\n`)
    console.log('[Candidate Documents]:')
    scenario.documents.forEach((doc, i) => {
      const isIdeal = i === scenario.ideal_top_1_idx ? '🏆 [Best Answer]' : ''
      console.log(`  ${i}. ${doc.pageContent} ${isIdeal}`)
    })
    console.log(`\n[Test Notes]: ${scenario.notes}\n`)

    for (const algorithm of algorithms) {
      console.log(`--- ${algorithm.name} ---`)

      const app = await new RAGApplicationBuilder()
        .setModel('NO_MODEL')
        .setEmbeddingModel(new MockEmbeddings(scenario.docVectors, scenario.queryVector))
        .setVectorDatabase(new MockVectorDatabase())
        .addLoader(new MockLoader(scenario.documents))
        .build(algorithm.type)

      const results = (await app.getEmbeddings(scenario.query)) as SearchResult[]

      results.slice(0, 3).forEach((result, i) => {
        const originalIndex = scenario.documents.findIndex((d) => d.pageContent === result.pageContent)
        let icon = ' '
        if (i === 0) {
          icon = originalIndex === scenario.ideal_top_1_idx ? '🏆' : '❌'
        }
        console.log(`  ${i + 1}. [${icon}] (Original Index: ${originalIndex}) ${result.pageContent}`)
      })
    }
    console.log(`\n================== SCENARIO END ==================`)
  }
}

runDemonstration()
