import re
from stanfordcorenlp import StanfordCoreNLP
from excel_process import read_excel
import pandas as pd
import numpy as np
import openpyxl
from openpyxl import load_workbook


# 按行读取文档内容，保存为列表
def read_file_lines(file_path):
    lines = []
    with open(file_path, 'r', encoding='utf-8') as file:
        for line in file:
            lines.append(line.strip())  # 使用 strip() 移除行末的换行符和空白字符
    return lines

# 文章分句
def split_sentences(text):
    # 使用。和；和\n 作为分割
    sentences = [s.strip() for s in re.split(r'[。；\n]', text) if s.strip()]
    return sentences

def extract_main_clause(sentence,dependence,words):
    root_idx = -1
    word_num_list = []
    # 找到ROOT的索引
    for dep in dependence:
        if dep[0] == 'ROOT':
            root_idx = dep[2]
            word_num_list.append(root_idx - 1)
    if root_idx != -1:
        # 提取主干
        for dep in dependence:
            if dep[1] == root_idx:
                word_num_list.append(dep[2] - 1)
        sorted_word_num = sorted(word_num_list)
        main_clause = ''.join(words[i] for i in sorted_word_num)
        return main_clause.strip()
    else:
        return sentence

# 文档路径
#file_path = './sample.txt'
file_path = './sample.xlsx'
sheet = input("输入工作表名：")
row = input("输入从第几行开始（数字）：")
columns = input("输入报告所在列(数字)：")
# 所有报告存数组
report_array = read_excel(file_path, sheet, skip_rows=int(row)-2, use_column=[int(columns)-1])
# 转一维列表
temp_report_list = [item for sublist in report_array for item in sublist]
# 将读到的空单元格nan替换为“内容为空”
report_list = [str(value) if pd.notna(value) else '内容为空' for value in temp_report_list]
print(report_list)
split_list = []
for report in report_list:
    splits = []
    # 每篇报告按句号和分号分句，存入二维列表spilt_list
    for split in split_sentences(report):
        splits.append(split)
    split_list.append(splits)
# print(split_list)
# 导入模型
nlp = StanfordCoreNLP(r'D:\stanford-corenlp-full-2018-01-31', lang='zh')
score_list = []
labels_list = []
content_percent_list = []
for index in range(len(split_list)):
    report = split_list[index]
    CD_sentence_num = 0
    proper_length_num = 0
    score = 0
    main_sum = 0
    word_sum = 0
    labels = []
    if split_list[index] == ['内容为空']:
        score = 0
        content_percent = 0
        labels.append("内容为空")
        score_list.append(score)
        labels_list.append(labels)
        content_percent_list.append(content_percent)
    else:
        for long_sentence in report:
            # 基数词和度量词双重判断
            if "CD" in nlp.parse(long_sentence) and "M" in nlp.parse(long_sentence):
                # 统计含有量词的句子数量
                CD_sentence_num += 1
            # 书名号加入量词判断
            elif "《" in nlp.parse(long_sentence) and "》" in nlp.parse(long_sentence):
                CD_sentence_num += 1
            if 100 >= len(long_sentence):
                proper_length_num += 1
            sentences = long_sentence.split("，")
            print(sentences)
            for sentence in sentences:
                dependence = nlp.dependency_parse(sentence)
                words = nlp.word_tokenize(sentence)
                # 主干
                main_clause = extract_main_clause(sentence,dependence, words)
                print(main_clause)
                #print(words)
                print(sentence,dependence)
                main_sum += len(main_clause)
                word_sum += len(sentence)
        # 统计报告中主干占比
        main_percent = main_sum/word_sum
        content_percent = 100*(1-main_percent)
        print("丰富程度：",content_percent)
        # 统计含量词句子占比
        CD_percent = CD_sentence_num / len(report)
        # 统计恰当长度句子占比s
        properlength_percent = proper_length_num / len(report)

        print("第 {} 条分析结果： 文本内容量化程度 {:.2f}  文本结构规范程度 {:.2f} 综合评分（百分制）{:.2f}" .format(index+1,CD_percent,properlength_percent,
                                                                               (CD_percent+properlength_percent)*50))

        # 量词得分机制
        if CD_percent >= 0.2:
            score += 5
            labels.append("量化标准提供情况较好")
        elif 0 < CD_percent < 0.2:
            score += 3
            labels.append("有量化标准但占比较低")
        else:
            labels.append("未提供量化标准")
        # 文章结构得分机制
        if properlength_percent >= 0.8:
            score += 5
            labels.append("行文结构较好")
        elif 0.3 < properlength_percent < 0.8:
            score += 4
            labels.append("行文结构一般")
        else:
            score += 2
            labels.append("行文结构较差")
        score_list.append(score)
        labels_list.append(labels)
        content_percent_list.append(content_percent)
nlp.close()
# 读取原有工作表内容
df = pd.read_excel(file_path, sheet_name=sheet)
# 新建三列
df['得分'] = score_list
df['丰富程度'] = content_percent_list
df['评价'] = labels_list
# 写入内容
with pd.ExcelWriter(file_path, engine='openpyxl', mode='a', if_sheet_exists='replace') as writer:
    df.to_excel(writer, sheet_name='result', index=False)
