from pandas import DataFrame,read_excel
from re import split,finditer
from xlsxwriter.workbook import Workbook
from xlwt import Workbook as Wb
from os.path import exists,splitext,basename
from os import makedirs

def mkdir():
    # 创建文件保存目录
    if not exists('rawData'):
        makedirs('rawData')
    if not exists('markData'):
        makedirs('markData')

def model_set_colors(data, data_result, cate_data):
    workbook = data_result
    #关键词为英文字符时可能会有大小写不一，需要统一转换为小写或大写
    keywords = cate_data['关键词'].str.lower()
    keyword_num = keywords.shape[0]
    #为后面捆绑作准备
    worksheet = workbook.add_worksheet()
    red = workbook.add_format({'color':'red'})
    #读取行数，后面对每行数据循环写入匹配到关键词的富字符串
    nums_data = data.shape[0]
    #设定worksheet的初始写入行列
    work_row = 1
    work_col = 0
    work_col2 = 1
    # 表头
    bold = workbook.add_format({'bold': True, 'align': 'center', })
    worksheet.write("A1", "序号", bold)
    worksheet.write("B1", "文本", bold)
    worksheet.write("C1", "文献名称", bold)


    for num in range(nums_data):
        txt = data.loc[num, '文本']
        id_data = data.loc[num, '序号']
        file_source = data.loc[num, '文献名称']
        txt = str(txt).lower()
        match_str = ''  #保存匹配到的关键词
        for i in range(keyword_num):
            keyword = keywords[i]
            keyword = str(keyword)

            if txt.find(keyword) >= 0:
                match_str = match_str+keyword+'|'
        match_str = match_str.strip('|')    #虽然关键词已保存，但并未按其在文本中出现的位置顺序
        #通过re.finditer按文本中出现的顺序匹配，所以进行二次匹配
        re_match = finditer(match_str,txt)
        re_str = ''
        for m in re_match:
            re_str = re_str + str(m.group()) + '|'
        re_str = re_str.strip('|')

        keyword_split = re_str.split('|')   #这样就按文本中出现关键词的顺序列出了

        keyword_split_num = len(keyword_split)
        keyword_match = keyword_split
        match_words = []
        #keyword_match_num = len(keyword_match)
        #找出能模糊匹配到的字符长度短的关键词
        for j in range(keyword_split_num):
            for k in range(len(keyword_match)):
                if (keyword_split[j].find(keyword_match[k])>= 0) and (len(keyword_split[j])>len(keyword_match[k])):
                    match_words.append(keyword_match[k])
        #原关键词列表删除字符长度短的关键词
        num_match_words = len(match_words)
        for i in range(num_match_words):
            if match_words[i] in keyword_split:
                keyword_split.remove(match_words[i])
        keyword_set= keyword_split
        keyword_sep = ''
        for each in keyword_set:
            keyword_sep = keyword_sep + each + '|'
        keyword_sep = keyword_sep.strip('|')
        if(len(keyword_sep)) == 0:   #若匹配模式为空字符串，则跳过
            continue
        #用所有关键词将整段话分割，再插入富字符串，然后捆绑颜色、关键词和后面的文本，需注意一一对应
        temp_list = split(keyword_sep, txt)
        # temp_list = [x.strip() for x in temp_list if x.strip()!='']
        for i,x in enumerate(temp_list):
            if x.strip()=='':
                # FIXME: 此处将空串替换成了不可见字符，可能有坑；替代方案可用空格替换，否则空串写不到富文本里面。
                temp_list[i] = '\u200b'

        params = []
        temp_list_num = len(temp_list)
        for i in range(temp_list_num):
            if i != 0:
                params.extend((red,keyword_set[i-1],temp_list[i]))
            else:
                params.append(temp_list[i])
        
        worksheet.write(work_row, work_col, id_data)
        worksheet.write_rich_string(work_row, work_col2, *params)
        worksheet.write(work_row, work_col + 2, "《"+file_source+"》")
        work_row = work_row+1
    workbook.close()

def getTextKeywords(keyword:str,full_file_name:str,file_name:str):
    '''根据关键词获取包含关键词的句子'''
    f = ''
    try:
        f = open(full_file_name,encoding='gb18030',errors='replace')  # 返回一个文件对象
        f.readline()  #  验证编码是否读取有误，有误使用utf8
    except Exception as e:
        f = open(full_file_name,encoding='utf8')  # 返回一个文件对象
        # f.readline()
    finally:
        f.seek(0,0)  # 文件指针回到最初
    
    line = True

    # book = xlwt.Workbook() #创建Excel
    book = Wb() #创建Excel
    sheet = book.add_sheet('sheet1',cell_overwrite_ok=True) #创建sheet页
    # 写入表头
    sheet.write(0,0,"序号")
    sheet.write(0,1,"文本")
    sheet.write(0,2,"文献名称")

    row = 1 # 从第一行开始插入

    while line:
        line = f.readline()
        # 空串跳过
        if not len(line):
            continue
        try:
            # 判断句尾是否是句号
            index = line.rindex('。') # 从右侧开始查找
            if index == len(line) - 1:
                # 最后是句号删除掉,分割就不会产生最后的空字符串
                line = line[:-1]
        except:
            pass
        # 句号分割
        sentence_list = line.split('。')
        # row = 0 # 从第一行开始插入
        for sentence in sentence_list:
            # 查找这一句是否包含该关键词
            keyword_index = sentence.find(keyword)
            if keyword_index != -1:
                keyword_count = sentence.count(keyword)  # 统计一个句子出现关键词的数量
                # 若一行有多个关键词，写入多行
                for i in range(keyword_count):
                    sheet.write(row,0,row) # 序号
                    sheet.write(row,1,sentence + "。") # 文本
                    sheet.write(row,2,splitext(file_name)[0].strip()) # 文本来源
                    row = row + 1
            
    f.close()
    book.save('rawData/'+splitext(file_name)[0]+'-'+keyword+'.xls')


def start(fullpath,keyword):
    '''执行查询标记
    @param: file :文件的绝对路径,包括文件名
    '''

    mkdir()
    file_name = basename(fullpath) # 文件名

    getTextKeywords(keyword,fullpath,file_name)
    data = read_excel(r'rawData/{0}-{1}.xls'.format(splitext(file_name)[0],keyword))  # 原文本文件
    data_result = Workbook(r'markData/{0}-{1}.xlsx'.format(splitext(file_name)[0],keyword))  # 标注的结果文件
    cate_data = DataFrame(list(keyword), columns = ['关键词'])
    # 标记颜色
    model_set_colors(data, data_result, cate_data)
