import re
import json
import jieba
import pandas as pd
from pycorrector import Corrector
#词频分析
def word_analysis(data_path):
    count={}
    with open(data_path, 'r', encoding='utf-8') as file:
    # 逐行读取
        for line in file:
            # 将每行的字符串解析为JSON对象
            obj = json.loads(line)
            text=obj['text']
            words = jieba.lcut(text)  # 使用jieba进行分词，将文本分成词语列表
            for word in words:            #  使用 for 循环遍历每个词语并统计个数
                if len(word) < 2:          # 排除单个字的干扰，使得输出结果为词语
                    continue
                else:
                    #如果字典里键为 word 的值存在，则返回键并加一，如果不存在键word，则返回0再加上1
                    count[word] = count.get(word, 0) + 1    
        wlist = list(count.items())         # 将字典的所有键值对转化为列表
        wlist.sort(key=lambda x: x[1], reverse=True)     # 对列表按照词频从大到小的顺序排序
        for i in range(50):  #   此处统计排名前五的单词，所以range(5)
            word, number = wlist[i]
            print("关键字：{:-<10}频次：{:+>8}".format(word, number))
#缺少问题描述/分类内容
def lack_describe_OR_label(data_path):
    err_text=[]
    err_meta=[]
    err=[]
    count=0
    #存在问题描述/分类的文本数量
    count2=0
    with open(data_path, 'r', encoding='utf-8') as file:
    # 逐行读取
        for line in file:
            # 将每行的字符串解析为JSON对象
            obj = json.loads(line)
            text=obj['text']
            meta=obj['meta']
            #获取问题描述和答案之间的内容
            pattern = r"答案：(.*?)(?=评论：)"
            #获取问题分类后的所有内容
            # pattern = r"问题分类：(.*)"
            match = re.search(pattern, text, re.DOTALL)
            if match:
                count2+=1
            # 获取匹配的文本

                question_description = match.group(1).strip()
                # 打印结果
                # print("问题描述:", question_description)
                dmatch = re.search("[\u4e00-\u9fffA-Za-z0-9]+", question_description)
                if dmatch:
                    continue
                else:
                    count+=1
                    err_text.append(text)
                    err_meta.append(meta)
                    err.append(question_description)
                    # print("describe:",question_description)
                    # print("text:",text)
               
    print('存在问题描述的数量：',count2)        
    print(count)
    #写入excel
    dfData={
        'text':err_text,
        'error':err,
        'meta':err_meta
    }
    df=pd.DataFrame(dfData)
    save_path=data_path.split('/')[1].split('.')[0]+'_answer.xlsx'
    df.to_excel(save_path,index=False)

#缺少问题
def lack_question(datapath):
    err_text=[]
    err_meta=[]
    err=[]
    count=0
    #存在问题描述/分类的文本数量
    count2=0
    with open(data_path, 'r', encoding='utf-8') as file:
    # 逐行读取
        for line in file:
            # 将每行的字符串解析为JSON对象
            obj = json.loads(line)
            text=obj['text']
            meta=obj['meta']
            #获取问题描述和答案之间的内容
            if "问题描述" in text:
                pattern = r"问题：(.*?)(?=问题描述：)"
            else :
                pattern=  r"问题：(.*?)(?=答案：)"
            match = re.search(pattern, text, re.DOTALL)
            if match:
                count2+=1
            # 获取匹配的文本

                question_description = match.group(1).strip()
                # 打印结果
                # print("问题描述:", question_description)
                dmatch = re.search("[\u4e00-\u9fffA-Za-z0-9]+", question_description)
                if dmatch:
                    continue
                else:
                    count+=1
                    err_text.append(text)
                    err_meta.append(meta)
                    err.append(question_description)
                    print("describe:",question_description)
                    print("text:",text)
    print('存在缺少问题的数量：',count2)        
    print(count)
    dfData={
        'text':err_text,
        'error':err,
        'meta':err_meta
    }
    df=pd.DataFrame(dfData)
    save_path=data_path.split('/')[1].split('.')[0]+'_question.xlsx'
    df.to_excel(save_path,index=False)
#查找被屏蔽的文本
def mask_sentence(data_path):
    count=0
    err_text=[]
    err_meta=[]
    err=[]
    #存在问题描述/分类的文本数量
    with open(data_path, 'r', encoding='utf-8') as file:
    # 逐行读取
        for line in file:
            # 将每行的字符串解析为JSON对象
            obj = json.loads(line)
            text=obj['text']
            meta=obj['meta']
            pattern = r"[\u4e00-\u9fa5]+[\*]+[\u4e00-\u9fa5]+"
            match = re.search(pattern,text)
            if match:
                count+=1
                err_text.append(text)
                err_meta.append(meta)
                err.append(match.group(0))
                print('屏蔽内容：',match.group(0))
                print('text',text)
    print('被屏蔽的文本数量：',count)
    dfData={
        'text':err_text,
        'error':err,
        'meta':err_meta
    }
    df=pd.DataFrame(dfData)
    save_path=data_path.split('/')[1].split('.')[0]+'_mask.xlsx'
    df.to_excel(save_path,index=False)
def find_specific_word(data_path):
    count=0
    err_text=[]
    err_meta=[]
    err=[]
    #存在问题描述/分类的文本数量
    with open(data_path, 'r', encoding='utf-8') as file:
    # 逐行读取
        for line in file:
            # 将每行的字符串解析为JSON对象
            obj = json.loads(line)
            text=obj['text']
            meta=obj['meta']
            pattern=r'图转侵删'
            if '图转侵删' in text:
                count+=1
    print(count)
#切分段落为句子
def split_text_into_sentences(text):
    # 使用正则表达式匹配句号、问号等作为句子分隔符
    # re.split默认会在每个匹配的地方分割字符串
    # 并且会保留分隔符作为结果列表的一个元素
    sentences = re.split(r'[。？！\n]', text)
    sentences = list(filter(None, sentences))
    return sentences
#使用pycorrector查找文字错误
def find_word_erro(data_path):
    m = Corrector()
    read=0
    count=0
    err_text=[]
    err_meta=[]
    err=[]
    #存在问题描述/分类的文本数量
    with open(data_path, 'r', encoding='utf-8') as file:
    # 逐行读取
        for line in file:
            read+=1
            print('已读取{}条'.format(read))
            # 将每行的字符串解析为JSON对象
            obj = json.loads(line)
            text=obj['text']
            sentences=split_text_into_sentences(text)
            meta=obj['meta']
            results=m.correct_batch(sentences)
            errors=[]
            for result in results:
                erro=result['errors']
                #句子存在文字错误
                if len(erro)>0:
                    errors.append(result)
            #整段文字存在错误
            if len(errors)>0:
                count+=1
                err_text.append(text)
                err.append(errors)
                err_meta.append(meta)
    print(count)
    dfData={
        'text':err_text,
        'error':err,
        'meta':err_meta
    }
    df=pd.DataFrame(dfData)
    save_path=data_path.split('/')[1].split('.')[0]+'_pycorrector.xlsx'
    df.to_excel(save_path,index=False)





if __name__=="__main__":
    data_path='D:\date-clean\data/zhihu_questions_part.merged-part1.sampled.jsonl'
    lack_describe_OR_label(data_path)
    # lack_question(data_path)
    # mask_sentence(data_path)
    # word_analysis(data_path)
    # find_word_erro(data_path)