import re
import argparse
import requests
import json
from sentence_transformers import SentenceTransformer, util
from zhipuai import ZhipuAI
import os

# Configure ZhipuAI client
MODEL = "glm-4-plus"
client = ZhipuAI(api_key='652056ee12c005f36b61ba8df67274b2.US6dlqS8ckjtuQv3')

# Paper query API base URL
BASE_URL = "http://180.184.65.98:38880/atomgit"

# Load Sentence-Bert model for embedding calculations
embedder = SentenceTransformer('all-MiniLM-L6-v2')


# Call ZhipuAI function
def call_zhipu_ai(prompt):
    try:
        response = client.chat.completions.create(
            model=MODEL,
            messages=[
                {"role": "system",
                 "content": "You are an expert academic writer. Your task is to analyze and synthesize research papers to generate comprehensive literature reviews. Focus on maintaining academic rigor while ensuring clarity and coherence."},
                {"role": "user", "content": prompt}
            ],
            temperature=0.2,
            max_tokens=4000
        )
        content = response.choices[0].message.content
        print("API call successful, processing...")
        return content
    except Exception as e:
        print(f"ZhipuAI call failed: {str(e)}")
        return ""

# API call functions
def search_papers(query, top_k=100):
    url = f"{BASE_URL}/search_papers"
    params = {"query": query, "top_k": top_k}
    try:
        response = requests.get(url, params=params)
        if response.status_code == 200:
            return response.json()
        else:
            print(f"API Error: {response.text}")
            return []
    except Exception as e:
        print(f"Request Error: {str(e)}")
        return []


def query_by_paper_id(paper_id, top_k=10):
    url = f"{BASE_URL}/query_by_paper_id"
    params = {"paper_id": paper_id, "top_k": top_k}
    response = requests.get(url, params=params)
    if response.status_code == 200:
        return response.json()
    else:
        print(f"API Error: {response.text}")
        return []


def save_papers_to_file(papers, filename):
    with open(filename, 'w', encoding='utf-8') as f:
        json.dump(papers, f, indent=4, ensure_ascii=False)


# Multi-agent collaboration class
class ReviewGenerator:
    def __init__(self):
        # Dictionary to store references: {ref_num: {'paper_id': ..., 'title': ..., 'chunk_id': ..., 'year': ...}}
        self.references = {}
        self.ref_counter = 1

    def identify_query_type(self, query):
        """Agent 1: Identify query type"""
        prompt = f"Determine which category the following query belongs to: a. Technical concept survey, b. Research direction status, c. Method comparison analysis, d. Technical development roadmap. Query: {query}"
        return call_zhipu_ai(prompt).strip()

    def generate_outline(self, query_type, topic):
        """Agent 2: Generate outline"""
        prompt = f"For the type {query_type} on the topic '{topic}', generate a detailed literature review outline with at least 4 main sections."
        outline = call_zhipu_ai(prompt)
        return [line.strip() for line in outline.split('\n') if line.strip() and not line.startswith('#')]

    def retrieve_papers(self, query):
        """Agent 3: Retrieve papers"""
        papers = search_papers(query, top_k=100)
        if len(papers) < 50:
            print(f"Warning: Retrieved {len(papers)} papers, less than required 50.")
        return papers

    def extract_chunks(self, papers):
        """Agent 4: Extract relevant text chunks"""
        chunks = []
        for paper in papers[:50]:  # Process up to 50 papers
            entity = paper.get('entity', None)
            if entity:
                paper_id = entity.get('paper_id', '')
                if paper_id:
                    print(f"Fetching chunks for paper_id: {paper_id}")
                    paper_chunks = query_by_paper_id(paper_id, top_k=10)
                    print(f"Received {len(paper_chunks)} chunks for paper_id {paper_id}")
                    # Sort chunks by 'distance' ascending if available; otherwise, use a high default value
                    paper_chunks_sorted = sorted(paper_chunks, key=lambda x: x.get('distance', float('inf')))
                    # Limit each paper to at most 3 chunks
                    limited_chunks = paper_chunks_sorted[:3]
                    for chunk in limited_chunks:
                        chunk_data = {
                            'text': chunk.get('text', ''),
                            'paper_id': paper_id,
                            'chunk_id': chunk.get('chunk_id', ''),
                            'paper_title': entity.get('paper_title', ''),
                            'year': entity.get('year', ''),
                            'venue': re.sub(r'\d{4}$', '', entity.get('original_filename', '').split('_')[4]) if len(entity.get('original_filename', '').split('_')) > 4 else ''
                        }
                        chunks.append(chunk_data)
        return chunks

    def filter_chunks(self, query, chunks):
        """Agent 5: Filter relevant text chunks"""
        query_embedding = embedder.encode(query, convert_to_tensor=True)
        chunk_texts = [chunk['text'] for chunk in chunks]
        chunk_embedding = embedder.encode(chunk_texts, convert_to_tensor=True)
        cos_scores = util.pytorch_cos_sim(query_embedding, chunk_embedding)[0]
        top_k = min(50, len(chunks))  # Select top 50 relevant chunks
        top_indices = cos_scores.argsort(descending=True)[:top_k]
        return [chunks[i] for i in top_indices]

    def assign_chunks_to_sections(self, outline, chunks):
        """Agent 6: Assign chunks to sections"""
        section_chunks = {section: [] for section in outline}
        section_embedding = embedder.encode(outline, convert_to_tensor=True)
        chunk_texts = [chunk['text'] for chunk in chunks]
        chunk_embedding = embedder.encode(chunk_texts, convert_to_tensor=True)
        for i, chunk in enumerate(chunks):
            scores = util.pytorch_cos_sim(chunk_embedding[i], section_embedding)
            best_section_idx = scores.argmax()
            section_chunks[outline[best_section_idx]].append(chunk)
        return section_chunks

    def summarize_sections(self, section_chunks):
        """Agent 7: Summarize each section with reference numbers"""
        section_summaries = {}
        for section, chunks in section_chunks.items():
            if chunks:
                chunk_texts_with_refs = []
                for chunk in chunks:
                    ref_key = f"{chunk['paper_id']}-{chunk['chunk_id']}"
                    if ref_key not in [f"{r['paper_id']}-{r['chunk_id']}" for r in self.references.values()]:
                        self.references[self.ref_counter] = {
                            'paper_id': chunk['paper_id'],
                            'title': chunk['paper_title'],
                            'chunk_id': chunk['chunk_id'],
                            'year': chunk['year'],
                            'venue': chunk['venue']
                        }
                        ref_num = self.ref_counter
                        self.ref_counter += 1
                    else:
                        ref_num = [k for k, v in self.references.items() if
                                   v['paper_id'] == chunk['paper_id'] and v['chunk_id'] == chunk['chunk_id']][0]
                    chunk_texts_with_refs.append(f"{chunk['text']} [{ref_num}]")

                chunk_texts = '\n'.join(chunk_texts_with_refs)
                prompt = f"""Summarize the following text blocks for the section '{section}', ensuring detailed integration of information rather than simple listing.
                Include citations in the format <sup>X</sup>, where X is the reference number, placed at the end of sentences.
                Ensure each paragraph has at least one citation.
                Provided text blocks:
                {chunk_texts}
                """
                summary = call_zhipu_ai(prompt)
                section_summaries[section] = summary
        return section_summaries

    def generate_review(self, section_summaries):
        """Agent 8: Generate final review with references"""
        summaries_text = '\n\n'.join([f"{section}:\n{summary}" for section, summary in section_summaries.items()])
        prompt = (
            f"Integrate the following section summaries into a logically coherent literature review, "
            f"ensuring the final text is at least 2000 words and that each paragraph contains at least one citation "
            f"in the format <sup>X</sup> placed at the end of sentences:\n{summaries_text}"
        )
        review = call_zhipu_ai(prompt)

        # Append References section
        if self.references:
            references_section = "## References\n\n"
            for ref_num, ref in sorted(self.references.items()):
                references_section += f"[{ref_num}] {ref['title']}.  {ref['venue']}, {ref['year']}, chunk {ref['chunk_id']}\n\n"
            review += "\n\n" + references_section
        return review

    def generate_structured_review(self, query):
        """Main process: Generate structured review"""
        # 1. Identify query type
        query_type = self.identify_query_type(query)
        print(f"Query Type: {query_type}")

        # 2. Generate outline
        outline = self.generate_outline(query_type, query)
        print(f"Outline: {outline}")

        # 3. Retrieve papers
        papers = self.retrieve_papers(query)
        save_papers_to_file(papers, "retrieved_papers.json")
        print(f"Retrieved {len(papers)} papers")

        # 4. Extract text chunks
        chunks = self.extract_chunks(papers)
        print(f"Extracted {len(chunks)} chunks")

        # 5. Filter relevant text chunks
        filtered_chunks = self.filter_chunks(query, chunks)
        print(f"Filtered to {len(filtered_chunks)} chunks")

        # 6. Assign to sections
        section_chunks = self.assign_chunks_to_sections(outline, filtered_chunks)

        # 7. Summarize each section
        section_summaries = self.summarize_sections(section_chunks)
        print(f"Generated summaries for {len(section_summaries)} sections")

        # 8. Generate final review
        review = self.generate_review(section_summaries)
        return review

