import re
import user_auth
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import gradio as gr
import os
import jieba  

# 从日志文件中提取需求并保存到 Excel
def extract_demands_from_log(log_path):
    try:
        with open(log_path, 'r', encoding='utf-8') as file:
            log_content = file.read()
    except FileNotFoundError:
        print(f"未找到文件: {log_path}")
        return None

    # 修改后的正则表达式，去除多余空格，并使用 re.DOTALL 多行匹配
    pattern = r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} \| INFO +\| api.internal.dependencies.user_auth:verify_sign:\d+ - (.*?)(?=\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} \| INFO|$)"
    matches = re.findall(pattern, log_content, re.DOTALL)

    smart_writing_demands = []
    ppt_creation_demands = []

    # 定义提取 PPT 需求的正则表达式
    ppt_pattern_1 = r"我的要求是：(.*?)。\n根据主题和参考资料完成内容分析"
    ppt_pattern_2 = r"我要制作一份(.*?)PPT的大纲，输出大纲格式严格按照以下"

    for demand in matches:
        if demand.startswith("OloeBh"):
            auth = user_auth.UserAuth()
            try:
                decrypted_result = auth.get_true_messages(demand)
                if decrypted_result:
                    last_content = decrypted_result[-1].get("content", "")
                    # 尝试使用正则表达式提取 PPT 需求
                    match_1 = re.search(ppt_pattern_1, last_content)
                    match_2 = re.search(ppt_pattern_2, last_content)
                    if match_1:
                        ppt_creation_demands.append(match_1.group(1))
                    elif match_2:
                        ppt_creation_demands.append(match_2.group(1))
            except Exception as e:
                print(f"解密PPT创作需求时出错: {e}")
        else:
            smart_writing_demands.append(demand)

    def clean_text(text):
        return re.sub(r'[\x00-\x1F\x7F]', '', text)

    smart_writing_demands = [clean_text(demand) for demand in smart_writing_demands if pd.notna(demand)]  # 过滤 np.nan
    ppt_creation_demands = [clean_text(demand) for demand in ppt_creation_demands if pd.notna(demand)]  # 过滤 np.nan

    smart_writing_df = pd.DataFrame({"智能写作需求": smart_writing_demands})
    ppt_creation_df = pd.DataFrame({"PPT创作需求": ppt_creation_demands})

    path = os.getcwd()
    data = os.path.join(path, "data")
    try:
        output_excel_dir = data
        if not os.path.exists(output_excel_dir):
            os.makedirs(output_excel_dir)
        output_excel_path = os.path.join(output_excel_dir, "output.xlsx")
        with pd.ExcelWriter(output_excel_path, engine='openpyxl') as writer:
            smart_writing_df.to_excel(writer, sheet_name='智能写作需求', index=False)
            ppt_creation_df.to_excel(writer, sheet_name='PPT创作需求', index=False)
        print("Excel 文件已创建成功！")
        return smart_writing_demands, ppt_creation_demands
    except Exception as e:
        print(f"创建Excel文件时出错: {e}")
        return None


# 统计词频并保存到 txt 文件
def word_frequency_analysis(texts, output_txt_path):
    all_words = []
    for text in texts:
        # 分词
        words = jieba.lcut(text)
        # 去除无关字符、标点和单字
        filtered_words = [
            word for word in words
            if re.match(r'^[\u4e00-\u9fa5a-zA-Z0-9]+$', word)  # 保留中文字符、字母和数字
            and len(word) >= 2  # 只保留2字及以上词语
        ]
        all_words.extend(filtered_words)

    word_freq = pd.Series(all_words).value_counts().reset_index()
    word_freq.columns = ['词语', '词频']
    # 设置索引从 1 开始
    word_freq.index = range(1, len(word_freq) + 1)

    # 保存前 100 个高频词到 txt 文件
    top_100_freq = word_freq.head(100)
    output_dir = "data"
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    full_output_path = os.path.join(output_dir, output_txt_path)
    with open(full_output_path, 'w', encoding='utf-8') as f:
        for index, row in top_100_freq.iterrows():
            f.write(f"{row['词语']}: {row['词频']}\n")

    # 返回前 10 个高频词
    top_10_freq = word_freq.head(10)
    return top_10_freq


# 文本搜索
def text_search(keyword, texts, top_n=10):
    texts = [text for text in texts if pd.notna(text)]  # 过滤 np.nan
    keyword = keyword if pd.notna(keyword) else ""  # 处理关键词为 np.nan 的情况

    vectorizer = TfidfVectorizer()
    all_texts = texts + [keyword]
    tfidf_matrix = vectorizer.fit_transform(all_texts)
    keyword_vector = tfidf_matrix[-1]
    text_vectors = tfidf_matrix[:-1]
    similarities = cosine_similarity(keyword_vector, text_vectors).flatten()
    sorted_indices = similarities.argsort()[::-1]
    top_indices = sorted_indices[:top_n]
    top_texts = [texts[i] for i in top_indices]
    top_similarities = [similarities[i] for i in top_indices]
    results = []
    for i, (text, similarity) in enumerate(zip(top_texts, top_similarities), start=1):
        results.append(f"{i}. 相似度: {similarity:.4f}, 文本: {text}")
    return '\n'.join(results)


# Gradio 界面部分（日志分析）
with gr.Blocks() as demo:
    gr.Markdown("### 日志文件需求分析与搜索")
    log_file_input = gr.File(label="上传日志文件")
    analyze_button = gr.Button("分析文件")

    smart_writing_freq_output = gr.Textbox(label="智能写作需求前十词频", lines=10)
    ppt_creation_freq_output = gr.Textbox(label="PPT创作需求前十词频", lines=10)

    smart_writing_keyword_input = gr.Textbox(label="输入智能写作需求搜索关键词")
    smart_writing_search_button = gr.Button("搜索智能写作需求")
    smart_writing_search_output = gr.Textbox(label="智能写作需求搜索结果", lines=10)

    ppt_creation_keyword_input = gr.Textbox(label="输入PPT创作需求搜索关键词")
    ppt_creation_search_button = gr.Button("搜索PPT创作需求")
    ppt_creation_search_output = gr.Textbox(label="PPT创作需求搜索结果", lines=10)

    def analyze_log_file(file):
        log_path = file.name
        result = extract_demands_from_log(log_path)
        if result:
            smart_writing_demands, ppt_creation_demands = result
            smart_freq = word_frequency_analysis(smart_writing_demands, 'smart_writing_freq.txt')
            ppt_freq = word_frequency_analysis(ppt_creation_demands, 'ppt_creation_freq.txt')
            return str(smart_freq), str(ppt_freq)
        return "", ""

    analyze_button.click(analyze_log_file, inputs=log_file_input,
                         outputs=[smart_writing_freq_output, ppt_creation_freq_output])

    def perform_smart_writing_search(keyword):
        try:
            output_excel_dir = r"C:\kaohe\data"
            output_excel_path = os.path.join(output_excel_dir, "output.xlsx")
            excel_file = pd.ExcelFile(output_excel_path)
            smart_writing_df = excel_file.parse('智能写作需求')
            smart_writing_texts = smart_writing_df['智能写作需求'].tolist()
            return text_search(keyword, smart_writing_texts)
        except FileNotFoundError:
            return "请先上传并分析日志文件。"

    smart_writing_search_button.click(perform_smart_writing_search, inputs=smart_writing_keyword_input,
                                      outputs=smart_writing_search_output)

    def perform_ppt_creation_search(keyword):
        try:
            output_excel_dir = r"C:\kaohe\data"
            output_excel_path = os.path.join(output_excel_dir, "output.xlsx")
            excel_file = pd.ExcelFile(output_excel_path)
            ppt_creation_df = excel_file.parse('PPT创作需求')
            ppt_creation_texts = ppt_creation_df['PPT创作需求'].tolist()
            return text_search(keyword, ppt_creation_texts)
        except FileNotFoundError:
            return "请先上传并分析日志文件。"

    ppt_creation_search_button.click(perform_ppt_creation_search, inputs=ppt_creation_keyword_input,
                                     outputs=ppt_creation_search_output)

if __name__ == "__main__":
    demo.launch()