import pickle
import streamlit as st
import numpy as np
import os
import Levenshtein
import pandas as pd
from scipy.sparse import coo_matrix
import tensorflow as tf
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import MultiLabelBinarizer
from keras.models import Sequential
from keras.layers import Dense
from sentence_transformers import SentenceTransformer, CrossEncoder
import faiss

category_map = {
'acc-phys': 'Accelerator Physics',
'adap-org': 'Not available',
'q-bio': 'Not available',
'cond-mat': 'Not available',
'chao-dyn': 'Not available',
'patt-sol': 'Not available',
'dg-ga': 'Not available',
'solv-int': 'Not available',
'bayes-an': 'Not available',
'comp-gas': 'Not available',
'alg-geom': 'Not available',
'funct-an': 'Not available',
'q-alg': 'Not available',
'ao-sci': 'Not available',
'atom-ph': 'Atomic Physics',
'chem-ph': 'Chemical Physics',
'plasm-ph': 'Plasma Physics',
'mtrl-th': 'Not available',
'cmp-lg': 'Not available',
'supr-con': 'Not available',

'econ.GN': 'General Economics',
'econ.TH': 'Theoretical Economics',
'eess.SY': 'Systems and Control',

'astro-ph': 'Astrophysics',
'astro-ph.CO': 'Cosmology and Nongalactic Astrophysics',
'astro-ph.EP': 'Earth and Planetary Astrophysics',
'astro-ph.GA': 'Astrophysics of Galaxies',
'astro-ph.HE': 'High Energy Astrophysical Phenomena',
'astro-ph.IM': 'Instrumentation and Methods for Astrophysics',
'astro-ph.SR': 'Solar and Stellar Astrophysics',
'cond-mat.dis-nn': 'Disordered Systems and Neural Networks',
'cond-mat.mes-hall': 'Mesoscale and Nanoscale Physics',
'cond-mat.mtrl-sci': 'Materials Science',
'cond-mat.other': 'Other Condensed Matter',
'cond-mat.quant-gas': 'Quantum Gases',
'cond-mat.soft': 'Soft Condensed Matter',
'cond-mat.stat-mech': 'Statistical Mechanics',
'cond-mat.str-el': 'Strongly Correlated Electrons',
'cond-mat.supr-con': 'Superconductivity',
'cs.AI': 'Artificial Intelligence',
'cs.AR': 'Hardware Architecture',
'cs.CC': 'Computational Complexity',
'cs.CE': 'Computational Engineering, Finance, and Science',
'cs.CG': 'Computational Geometry',
'cs.CL': 'Computation and Language',
'cs.CR': 'Cryptography and Security',
'cs.CV': 'Computer Vision and Pattern Recognition',
'cs.CY': 'Computers and Society',
'cs.DB': 'Databases',
'cs.DC': 'Distributed, Parallel, and Cluster Computing',
'cs.DL': 'Digital Libraries',
'cs.DM': 'Discrete Mathematics',
'cs.DS': 'Data Structures and Algorithms',
'cs.ET': 'Emerging Technologies',
'cs.FL': 'Formal Languages and Automata Theory',
'cs.GL': 'General Literature',
'cs.GR': 'Graphics',
'cs.GT': 'Computer Science and Game Theory',
'cs.HC': 'Human-Computer Interaction',
'cs.IR': 'Information Retrieval',
'cs.IT': 'Information Theory',
'cs.LG': 'Machine Learning',
'cs.LO': 'Logic in Computer Science',
'cs.MA': 'Multiagent Systems',
'cs.MM': 'Multimedia',
'cs.MS': 'Mathematical Software',
'cs.NA': 'Numerical Analysis',
'cs.NE': 'Neural and Evolutionary Computing',
'cs.NI': 'Networking and Internet Architecture',
'cs.OH': 'Other Computer Science',
'cs.OS': 'Operating Systems',
'cs.PF': 'Performance',
'cs.PL': 'Programming Languages',
'cs.RO': 'Robotics',
'cs.SC': 'Symbolic Computation',
'cs.SD': 'Sound',
'cs.SE': 'Software Engineering',
'cs.SI': 'Social and Information Networks',
'cs.SY': 'Systems and Control',
'econ.EM': 'Econometrics',
'eess.AS': 'Audio and Speech Processing',
'eess.IV': 'Image and Video Processing',
'eess.SP': 'Signal Processing',
'gr-qc': 'General Relativity and Quantum Cosmology',
'hep-ex': 'High Energy Physics - Experiment',
'hep-lat': 'High Energy Physics - Lattice',
'hep-ph': 'High Energy Physics - Phenomenology',
'hep-th': 'High Energy Physics - Theory',
'math.AC': 'Commutative Algebra',
'math.AG': 'Algebraic Geometry',
'math.AP': 'Analysis of PDEs',
'math.AT': 'Algebraic Topology',
'math.CA': 'Classical Analysis and ODEs',
'math.CO': 'Combinatorics',
'math.CT': 'Category Theory',
'math.CV': 'Complex Variables',
'math.DG': 'Differential Geometry',
'math.DS': 'Dynamical Systems',
'math.FA': 'Functional Analysis',
'math.GM': 'General Mathematics',
'math.GN': 'General Topology',
'math.GR': 'Group Theory',
'math.GT': 'Geometric Topology',
'math.HO': 'History and Overview',
'math.IT': 'Information Theory',
'math.KT': 'K-Theory and Homology',
'math.LO': 'Logic',
'math.MG': 'Metric Geometry',
'math.MP': 'Mathematical Physics',
'math.NA': 'Numerical Analysis',
'math.NT': 'Number Theory',
'math.OA': 'Operator Algebras',
'math.OC': 'Optimization and Control',
'math.PR': 'Probability',
'math.QA': 'Quantum Algebra',
'math.RA': 'Rings and Algebras',
'math.RT': 'Representation Theory',
'math.SG': 'Symplectic Geometry',
'math.SP': 'Spectral Theory',
'math.ST': 'Statistics Theory',
'math-ph': 'Mathematical Physics',
'nlin.AO': 'Adaptation and Self-Organizing Systems',
'nlin.CD': 'Chaotic Dynamics',
'nlin.CG': 'Cellular Automata and Lattice Gases',
'nlin.PS': 'Pattern Formation and Solitons',
'nlin.SI': 'Exactly Solvable and Integrable Systems',
'nucl-ex': 'Nuclear Experiment',
'nucl-th': 'Nuclear Theory',
'physics.acc-ph': 'Accelerator Physics',
'physics.ao-ph': 'Atmospheric and Oceanic Physics',
'physics.app-ph': 'Applied Physics',
'physics.atm-clus': 'Atomic and Molecular Clusters',
'physics.atom-ph': 'Atomic Physics',
'physics.bio-ph': 'Biological Physics',
'physics.chem-ph': 'Chemical Physics',
'physics.class-ph': 'Classical Physics',
'physics.comp-ph': 'Computational Physics',
'physics.data-an': 'Data Analysis, Statistics and Probability',
'physics.ed-ph': 'Physics Education',
'physics.flu-dyn': 'Fluid Dynamics',
'physics.gen-ph': 'General Physics',
'physics.geo-ph': 'Geophysics',
'physics.hist-ph': 'History and Philosophy of Physics',
'physics.ins-det': 'Instrumentation and Detectors',
'physics.med-ph': 'Medical Physics',
'physics.optics': 'Optics',
'physics.plasm-ph': 'Plasma Physics',
'physics.pop-ph': 'Popular Physics',
'physics.soc-ph': 'Physics and Society',
'physics.space-ph': 'Space Physics',
'q-bio.BM': 'Biomolecules',
'q-bio.CB': 'Cell Behavior',
'q-bio.GN': 'Genomics',
'q-bio.MN': 'Molecular Networks',
'q-bio.NC': 'Neurons and Cognition',
'q-bio.OT': 'Other Quantitative Biology',
'q-bio.PE': 'Populations and Evolution',
'q-bio.QM': 'Quantitative Methods',
'q-bio.SC': 'Subcellular Processes',
'q-bio.TO': 'Tissues and Organs',
'q-fin.CP': 'Computational Finance',
'q-fin.EC': 'Economics',
'q-fin.GN': 'General Finance',
'q-fin.MF': 'Mathematical Finance',
'q-fin.PM': 'Portfolio Management',
'q-fin.PR': 'Pricing of Securities',
'q-fin.RM': 'Risk Management',
'q-fin.ST': 'Statistical Finance',
'q-fin.TR': 'Trading and Market Microstructure',
'quant-ph': 'Quantum Physics',
'stat.AP': 'Applications',
'stat.CO': 'Computation',
'stat.ME': 'Methodology',
'stat.ML': 'Machine Learning',
'stat.OT': 'Other Statistics',
'stat.TH': 'Statistics Theory'
}

# 加载分类资源，包括TF-IDF向量器、多标签二值化器和分类模型
@st.cache_resource
def load_resources():
    # 加载TF-IDF向量器
    with open("vectorizer.pkl", "rb") as vectorizer_file:
        vectorizer = pickle.load(vectorizer_file)

    # 加载多标签二值化器（MultiLabelBinarizer）
    with open("mlb.pkl", "rb") as mlb_file:
        mlb = pickle.load(mlb_file)

    # 创建多层感知机模型
    def create_mlp():
        mlp = Sequential()
        mlp.add(Dense(256, activation='relu'))  # 第一层，256个节点，ReLU激活函数
        mlp.add(Dense(len(mlb.classes_), activation='sigmoid'))  # 输出层，对每个类别进行sigmoid分类
        mlp.build(input_shape=(None, vectorizer.vocabulary_.__len__()))  # 定义输入形状
        return mlp

    # 加载模型权重
    mlp = create_mlp()
    mlp.load_weights("baseline.h5")
    return vectorizer, mlb, mlp

# 加载用于基于语义检索的资源，包括嵌入模型、交叉编码器和索引
@st.cache_resource
def load_faiss_resources():
    # 加载句嵌入模型
    model = SentenceTransformer('./models/all-MiniLM-L6-v2')

    # 加载交叉编码器，用于评分查询与文档匹配度
    cross_encoder = CrossEncoder('./models/ms-marco-MiniLM-L-6-v2')

    # 加载嵌入向量文件
    loaded_embeddings = np.load('compressed_array.npz')['array_data']

    # 加载预处理后的数据
    df_data = pd.read_json("processed_data.json", orient="records", lines=True)

    # 提取论文文本、ID和分类
    chunk_list = list(df_data['text'])
    arxiv_id_list = list(df_data['id'])
    cat_list = list(df_data['cat_text'])

    # 构建Faiss索引
    embed_length = loaded_embeddings.shape[1]  # 嵌入向量的维度
    num_centroids = 5  # 聚类中心数量
    quantizer = faiss.IndexFlatL2(embed_length)  # 基于L2距离的量化器
    index = faiss.IndexIVFFlat(quantizer, embed_length, num_centroids)  # 创建索引
    index.train(loaded_embeddings)  # 训练索引
    index.add(loaded_embeddings)  # 添加嵌入向量到索引

    return model, cross_encoder, index, df_data, chunk_list, cat_list

# 使用分类模型预测文本的类别
def predict_categories(texts, vectorizer, mlb, model):
    # 转换文本为TF-IDF向量
    texts_vec = vectorizer.transform(texts)
    texts_vec_coo = coo_matrix(texts_vec)  # 转换为COO稀疏矩阵格式

    # 转换为TensorFlow稀疏张量
    sparse_tensor = tf.sparse.SparseTensor(
        indices=np.vstack((texts_vec_coo.row, texts_vec_coo.col)).T,
        values=texts_vec_coo.data,
        dense_shape=texts_vec_coo.shape
    )
    sparse_tensor = tf.sparse.reorder(sparse_tensor)

    # 使用模型进行预测
    predictions = model.predict(sparse_tensor)

    # 转换为二值预测结果
    binary_predictions = (predictions > 0.5).astype(int)
    return mlb.inverse_transform(binary_predictions)

def find_best_match(chars, category_map):
    # 将输入字符转化为字符串
    input_string = ''.join(chars).lower()
    
    # 准备一个字典存储每个键与列表元素的 Levenshtein 距离
    match_scores = {}

    # 遍历 category_map 中的键
    for key, value in category_map.items():
        # 将键转化为小写
        key_string = key.lower()
        
        # 计算 Levenshtein 距离
        distance = Levenshtein.distance(input_string, key_string)
        match_scores[key] = distance

    # 找到 Levenshtein 距离最小的键（即最相似的）
    best_match_key = min(match_scores, key=match_scores.get)
    return best_match_key

# 加载必要的资源
vectorizer, mlb, mlp = load_resources()
model, cross_encoder, index, df_data, chunk_list, cat_list = load_faiss_resources()

# Streamlit Web App界面
st.title("文本分类与论文查询")
st.markdown("基于分类模型对文本进行分类，并查询相关的研究论文。")

# 用户输入
user_input = st.text_area("输入文本或查询内容", "This is a new document about AI.")

# 按钮操作：分类和查询
if st.button("运行分类与查询"):
    if not user_input.strip():
        st.error("请输入文本内容！")
    else:
        # 分类任务
        with st.spinner("正在分类..."):
            predicted_categories = predict_categories([user_input], vectorizer, mlb, mlp)
            formatted_category = find_best_match(predicted_categories[0],category_map)
            st.success("分类完成！")
            st.write(f"**预测类别**: {formatted_category}")
        
        # 查询任务
        with st.spinner("正在查询相关研究论文..."):
            query = [user_input]
            query_embedding = model.encode(query)  # 查询的嵌入向量
            top_k = 10  # 检索前10个结果
            D, I = index.search(query_embedding, top_k)  # 使用Faiss进行最近邻搜索

            # 获取预测文本及其相关评分
            pred_list = list(I[0])
            pred_strings_list = [chunk_list[item] for item in pred_list]
            cross_input_list = [[query[0], item] for item in pred_strings_list]
            cross_scores = cross_encoder.predict(cross_input_list)

            # 将结果组织为DataFrame
            df = pd.DataFrame(cross_input_list, columns=['query_text', 'pred_text'])
            df['original_index'] = I[0]
            df['cross_scores'] = cross_scores
            df_sorted = df.sort_values(by='cross_scores', ascending=False).reset_index(drop=True)

            # 展示前10条结果
            st.success("查询完成！")
            num_results = 10
            for i in range(0, num_results):
                text = df_sorted.loc[i, 'pred_text']
                original_index = df_sorted.loc[i, 'original_index']
                arxiv_id = df_data.loc[original_index, 'id']
                cat_text = df_data.loc[original_index, 'cat_text']

                # 显示预测结果及论文链接
                link_to_pdf = f'https://arxiv.org/pdf/{arxiv_id}'
                st.markdown(f"### 结果 {i+1}")
                st.write(f"**预测文本**: {text}")
                st.write(f"**分类**: {cat_text}")
                st.markdown(f"[PDF链接]({link_to_pdf})")
