import re
from stanfordcorenlp import StanfordCoreNLP
from excel_process import read_excel
import pandas as pd


class Analysis_tool:
    # 初始化方法（构造函数）
    def __init__(self, model_path):
        # 成员变量
        print("初始化", flush=True)
        self.model_path = model_path
        self.nlp = StanfordCoreNLP(self.model_path, lang='zh')

    # 按行读取txt文档内容，保存为列表
    def read_file_lines(self, file_path):
        lines = []
        with open(file_path, 'r', encoding='utf-8') as file:
            for line in file:
                lines.append(line.strip())  # 使用 strip() 移除行末的换行符和空白字符
        return lines

    def extract_main_clause(self, sentence, dependence, words):
        root_idx = -1
        word_num_list = []
        # 找到ROOT的索引
        for dep in dependence:
            if dep[0] == 'ROOT':
                root_idx = dep[2]
                word_num_list.append(root_idx - 1)
        if root_idx != -1:
            # 提取主干
            for dep in dependence:
                if dep[1] == root_idx:
                    word_num_list.append(dep[2] - 1)
            sorted_word_num = sorted(word_num_list)
            main_clause = ''.join(words[i] for i in sorted_word_num)
            return main_clause.strip()
        else:
            return sentence

    # 文章分句
    def split_sentences(self, text):
        print("分句", flush=True)
        # 使用。和；和\n作为分割标点符号
        sentences = [s.strip() for s in re.split(r'[。；\n]', text) if s.strip()]
        return sentences

    def process_string(self, text, x=60, y=30, z=10):
        print("开始处理:", text, flush=True)
        splits = []
        # 每篇报告按句号和分号分句，存入二维列表spilt_list
        for split in self.split_sentences(text):
            splits.append(split)

        main_sum = 0
        word_sum = 0
        CD_sentence_num = 0
        proper_length_num = 0
        for sentence in splits:
            # 主干
            dependence = self.nlp.dependency_parse(sentence)
            words = self.nlp.word_tokenize(sentence)
            main_clause = self.extract_main_clause(sentence, dependence, words)
            # print(main_clause)
            # print(words)
            # print(sentence, dependence)
            main_sum += len(main_clause)
            word_sum += len(sentence)
            # 基数词和度量词双重判断
            if "CD" in self.nlp.parse(sentence) and "M" in self.nlp.parse(sentence):
                # 统计含有量词的句子数量
                CD_sentence_num += 1
            # 书名号加入量化判断标准
            elif "《" in self.nlp.parse(sentence) and "》" in self.nlp.parse(sentence):
                CD_sentence_num += 1
            # 统计长度适当句子
            if 100 >= len(sentence):
                proper_length_num += 1

        # 统计含量词句子占比
        CD_percent = CD_sentence_num / len(splits)
        # 统计恰当长度句子占比
        properlength_percent = proper_length_num / len(splits)
        # 统计报告中主干占比
        main_percent = main_sum / word_sum
        content_percent = 1 - main_percent
        # print("丰富程度：", content_percent)
        result = "分析结果（百分制）： 内容量化得分 {:.2f}分，主宾明确度得分 {:.2f}分，语句长度分析得分 {:.2f} ，综合评分{:.2f}。".format(CD_percent * 100,
                                                                                   content_percent * 100,
                                                                                   properlength_percent * 100,
                                                                                   CD_percent * x + content_percent * y + properlength_percent * z)
        print("分析结束，", result, flush=True)
        return result

    # 处理excel文件
    def process_excel(self, file_path, x, y, z, sheet_name, columns=[0], skip_rows=0):
        # 所有报告存数组
        report_array = read_excel(file_path, sheet_name, skip_rows=skip_rows, use_column=columns)
        print("开始处理：", report_array, flush=True)
        # 转一维列表
        temp_report_list = [item for sublist in report_array for item in sublist]
        # 将读到的空单元格nan替换为“内容为空”
        report_list = [str(value) if pd.notna(value) else '内容为空' for value in temp_report_list]
        split_list = []
        for report in report_list:
            splits = []
            # 每篇报告按句号和分号分句，存入二维列表spilt_list
            for split in self.split_sentences(report):
                splits.append(split)
            split_list.append(splits)
        # 读取原有的 Excel 文件
        df_original = pd.read_excel(file_path, sheet_name=sheet_name)
        # 创建一个新的 ExcelWriter 对象，设置为追加模式，if_sheet_exists 设置为 'replace'
        with pd.ExcelWriter(file_path, engine='openpyxl', mode='a', if_sheet_exists='replace') as writer:
            # 将原有工作表的结构和内容写入新建工作表
            df_original.to_excel(writer, sheet_name='result', index=False)

        CD_percent_list = []
        sum_score_list = []
        content_percent_list = []
        properlength_percent_list = []
        for index in range(len(split_list)):
            report = split_list[index]
            main_sum = 0
            word_sum = 0
            CD_sentence_num = 0
            proper_length_num = 0
            # 空单元格评分全置为0
            if split_list[index] == ['内容为空']:
                CD_percent = 0
                content_percent = 0
                properlength_percent = 0
                print("第 {} 条内容为空！".format(index + 1),flush=True)
            else:
                for long_sentence in report:
                    if "CD" in self.nlp.parse(long_sentence) and "M" in self.nlp.parse(long_sentence):
                        # 统计含有量词的句子数量
                        CD_sentence_num += 1
                    elif "《" in self.nlp.parse(long_sentence) and "》" in self.nlp.parse(long_sentence):
                        CD_sentence_num += 1
                    if 100 >= len(long_sentence):
                        proper_length_num += 1
                    sentences = long_sentence.split("，")
                    for sentence in sentences:
                        dependence = self.nlp.dependency_parse(sentence)
                        words = self.nlp.word_tokenize(sentence)
                        # 主干
                        main_clause = self.extract_main_clause(sentence, dependence, words)
                        # print(main_clause)
                        # print(words)
                        # print(sentence, dependence)
                        main_sum += len(main_clause)
                        word_sum += len(sentence)
                # 统计报告中主干占比
                main_percent = main_sum / word_sum
                content_percent = 1 - main_percent
                # 统计含量词句子占比
                CD_percent = CD_sentence_num / len(report)
                # 统计恰当长度句子占比
                properlength_percent = proper_length_num / len(report)

                print("第 {} 条分析结果（百分制）： 内容量化得分 {:.2f}分， 主宾明确度得分 {:.2f}分，语句长度分析得分 {:.2f} ， 综合评分{:.2f}".format(index + 1, CD_percent * 100,
                                                                                             content_percent * 100,
                                                                                             properlength_percent * 100,
                                                                                             CD_percent * x + content_percent * y + properlength_percent * z,
                      flush=True))
                # 修改默认评分规则，三条因素的权重为6:3:1
            CD_percent_list.append(round(CD_percent * 100, 2))
            content_percent_list.append(round(content_percent * 100, 2))
            properlength_percent_list.append(round(properlength_percent * 100, 2))
            sum_score_list.append(round(CD_percent * x + content_percent * y + properlength_percent * z))
        # 读取工作表内容
        df = pd.read_excel(file_path, sheet_name='result')
        # print(df)
        # 新建行
        df['量化程度得分'] = CD_percent_list
        df['主宾明确度得分'] = content_percent_list
        df['语句长度分析得分'] = properlength_percent_list
        df['综合得分'] = sum_score_list
        # 写入内容
        with pd.ExcelWriter(file_path, engine='openpyxl', mode='a', if_sheet_exists='replace') as writer:
            df.to_excel(writer, sheet_name='result', index=False)
        return file_path
