import jieba
import pdfplumber
import jiagu
import os
import re
from docx import Document
from paddleocr import PaddleOCR

def extract_text_from_file(path):
    file_extension = os.path.splitext(path)[1].lower()

    if file_extension in [".jpg", ".jpeg", ".png", ".bmp", ".tif", ".tiff", ".gif"]:
        result_n, xueli = extract_n_from_image(path)
        result_it = extract_it_from_image(path)
        return result_n, xueli, result_it

    elif file_extension == '.pdf':
        result_n, xueli = extract_n_from_pdf(path)
        result_it = extract_it_from_pdf(path)
        return result_n, xueli, result_it

    elif file_extension == '.doc' or file_extension == '.docx':
        result_n, xueli = extract_n_from_word(path)
        result_it = extract_it_from_word(path)
        return result_n, xueli, result_it


def extract_text_from_text(scanf):
    result_n, xueli = extract_n_from_text(scanf)
    result_it = extract_it_from_text(scanf)
    return result_n, xueli, result_it

# 文本处理
def preprocess_text(text):
    # 删除类似"1."，"1、"，"一、"和"一."
    text = text.lower()
    text = re.sub(r'([一二三四五六七八九十]+[、.])|\d+\.|\d+、', ' ', text)
    # 删除标点符号
    text = re.sub(r'[^\w\s]', ' ', text)
    # 删除字符\n
    text = re.sub(r'\n', ' ', text)

    return text


def tichu(start_values, tichu_values):
    """
    剔除不必要的简历信息
    :param start_values: 提取出来的文本列表
    :param tichu_values: 剔除词列表，原文本只要存在子串也会被剔除
    :return: 剔除完毕的列表
    """
    substrings_to_remove = tichu_values
    cleaned_list = [item for item in start_values if not any(substring in item for substring in substrings_to_remove)]
    return cleaned_list


def get_xueli(original_list, substrings_to_check, ):
    """
    从文本中得到学历
    :param original_list:提取的文本列表
    :param substrings_to_check:提取的文本列表
    :return:对应的学历
    """
    matched_substrings = set()
    for item in original_list:
        for substring in substrings_to_check:
            if substring in item and substring not in matched_substrings:
                matched_substrings.add(substring)
    if "博士" in matched_substrings:
        return "博士"
    elif {"硕士", "研究生"}.issubset(matched_substrings):
        return "硕士"
    elif "本科" in matched_substrings:
        return "本科"
    elif "大专" in matched_substrings:
        return "大专"
    else:
        return "无学历"


# 提取it专业词汇
def extract_it(text):
    # 加载用户自定义词典
    jieba.load_userdict("../Files/Prepare/it专业词汇.txt")

    # 读取自定义词典中的词汇，并转换为小写
    tech_dict = set()
    with open("../Files/Prepare/it专业词汇.txt", "r", encoding="utf-8") as f:
        for line in f:
            tech_dict.add(line.strip().lower())  # 转换为小写

    words = jieba.cut(text)
    tech_stack = []
    for word in words:
        if word.lower() in tech_dict:  # 转换为小写进行比较
            tech_stack.append(word)
    return tech_stack


# pdf简历中提取出成it专业词汇
def extract_it_from_pdf(pdf_path):
    with pdfplumber.open(pdf_path) as pdf:
        text = ""
        for page in pdf.pages:
            text += page.extract_text()
        cleaned_list = tichu(text, ["简历"])
        tt = "".join(cleaned_list)
        result = preprocess_text(tt)
        tech_stack = extract_it(result)

        # 将集合转换为以逗号分隔的字符串
        tech_stack_str = ", ".join(tech_stack)

        return tech_stack_str


# 图片中提取it技术栈
def extract_it_from_image(image_path):
    ocr = PaddleOCR(use_angle_cls=True, use_gpu=False)  # 使用CPU预加载，不用GPU
    result = ocr.ocr(image_path, cls=True)  # 打开图片文件
    # 打印所有文本信息
    text = []
    for t in result:
        for k in t:
            text.append(k[1][0])
    cleaned_list = tichu(text, ["简历"])
    tt = "".join(cleaned_list)
    result = preprocess_text(tt)
    tech_stack = extract_it(result)
    # 将集合转换为以逗号分隔的字符串

    tech_stack_str = ", ".join(tech_stack)

    return tech_stack_str


# word中提取it技术栈
def extract_it_from_word(word_path):
    doc = Document(word_path)
    text = ""
    for para in doc.paragraphs:
        text += para.text + "\n"

    for table in doc.tables:
        for row in table.rows:
            for cell in row.cells:
                text += cell.text + "\t"  # 以制表符分隔单元格文本
            text += "\n"  # 在每行结束时添加换行符
    cleaned_list = tichu(text, ["简历"])
    tt = "".join(cleaned_list)
    result = preprocess_text(tt)
    tech_stack = extract_it(result)

    # 将集合转换为以逗号分隔的字符串
    tech_stack_str = ", ".join(tech_stack)

    return tech_stack_str


# word中提取it技术栈
def extract_it_from_text(text):
    cleaned_list = tichu(text, ["简历"])
    tt = "".join(cleaned_list)
    result = preprocess_text(tt)
    tech_stack = extract_it(result)

    # 将集合转换为以逗号分隔的字符串
    tech_stack_str = ", ".join(tech_stack)
    return tech_stack_str


# 提取名词函数
def extract_n(text):
    """
    分词并提取技术栈
    :param text: 需要处理的每一段文本
    :return: 分词后以空格连接的技术栈文本
    """
    words = jiagu.seg(text)
    # 分词
    words = list(filter(lambda x: x != ' ', words))
    pos = jiagu.pos(words)  # 词性标注

    tech_stack = []
    for i in range(len(pos)):
        if (pos[i] == 'n'):
            tech_stack.append(words[i])
    return tech_stack


# 图片中提取出成名词
def extract_n_from_image(image_path):
    ocr = PaddleOCR(use_angle_cls=True, use_gpu=False)  # 使用CPU预加载，不用GPU
    result = ocr.ocr(image_path, cls=True)  # 打开图片文件
    # 打印所有文本信息
    text = []
    for t in result:
        for k in t:
            text.append(k[1][0])
    cleaned_list = tichu(text, ["简历"])
    tt = "".join(cleaned_list)
    result = preprocess_text(tt)
    tech_stack = extract_n(result)
    xueli = get_xueli(text, ['本科', '大专', '研究生', '硕士', '博士'])

    # 将集合转换为以逗号分隔的字符串
    tech_stack_str = ", ".join(tech_stack)

    return tech_stack_str, xueli


# pdf简历中提取出成名词
def extract_n_from_pdf(pdf_path):
    with pdfplumber.open(pdf_path) as pdf:
        text = ""
        for page in pdf.pages:
            text += page.extract_text()
    cleaned_list = tichu(text, ["简历"])
    tt = "".join(cleaned_list)
    result = preprocess_text(tt)
    tech_stack = extract_n(result)
    xueli = get_xueli(text, ['本科', '大专', '研究生', '硕士', '博士'])

    # 将集合转换为以逗号分隔的字符串
    tech_stack_str = ", ".join(tech_stack)

    return tech_stack_str, xueli


# pdf简历中提取出成名词
def extract_n_from_word(word_path):
    doc = Document(word_path)
    text = ""
    for para in doc.paragraphs:
        text += para.text + "\n"

    for table in doc.tables:
        for row in table.rows:
            for cell in row.cells:
                text += cell.text + "\t"  # 以制表符分隔单元格文本
            text += "\n"  # 在每行结束时添加换行符
    cleaned_list = tichu(text, ["简历"])
    tt = "".join(cleaned_list)
    result = preprocess_text(tt)
    tech_stack = extract_n(result)
    xueli = get_xueli(text, ['本科', '大专', '研究生', '硕士', '博士'])

    # 将集合转换为以逗号分隔的字符串
    tech_stack_str = ", ".join(tech_stack)

    return tech_stack_str, xueli

def extract_n_from_text(text):
    cleaned_list = tichu(text, ["简历"])
    tt = "".join(cleaned_list)
    result = preprocess_text(tt)
    tech_stack = extract_n(result)
    xueli = get_xueli(text, ['本科', '大专', '研究生', '硕士', '博士'])

    # 将集合转换为以逗号分隔的字符串
    tech_stack_str = ", ".join(tech_stack)

    return tech_stack_str, xueli

def extract_resume_text_from_file(path):
    file_extension = os.path.splitext(path)[1].lower()

    if file_extension in [".jpg", ".jpeg", ".png", ".bmp", ".tif", ".tiff", ".gif"]:
        ocr = PaddleOCR(use_angle_cls=True, use_gpu=False)  # 使用CPU预加载，不用GPU
        result = ocr.ocr(path, cls=True)  # 打开图片文件
        # 打印所有文本信息
        text = []
        for t in result:
            for k in t:
                text.append(k[1][0])
        text = ' '.join(text)  # 将OCR识别出的字符列表合并成一个字符串，之间用空格分隔

    elif file_extension == '.pdf':
        with pdfplumber.open(path) as pdf:
            text = ""
            for page in pdf.pages:
                text += page.extract_text()

    elif file_extension == '.doc' or file_extension == '.docx':
        doc = Document(path)
        text = ""
        for para in doc.paragraphs:
            text += para.text

        for table in doc.tables:
            for row in table.rows:
                for cell in row.cells:
                    text += cell.text + "\t"  # 以制表符分隔单元格文本
                text += "\n"  # 在每行结束时添加换行符
    text1 = re.sub(r'[^\w\u4e00-\u9fa5]', ' ', text)
    text2 = re.sub(r'\s+', '', text1)  # 匹配所有空格，并移除
    words = jieba.cut(text2)
    text3 = []
    for word in words:
        text3.append(word)
    text4 = ','.join(text3)  # 将OCR识别出的字符列表合并成一个字符串，之间用空格分隔
    return text4

# a= extract_resume_text_from_file("C:/Users/WUXINTAO/Desktop/resume.pdf")
# print(a)
#
# b,c,d=extract_text_from_file("C:/Users/WUXINTAO/Desktop/resume.pdf")
# print(d)