# -*- encoding: utf-8 -*-
# @Time       :  18:05
# @Author     : yuxian
# @Email      : 1503889663@qq.com
# @File       : 智能简历挑选器.py
# @SoftWare   : PyCharm
from paddlenlp import Taskflow
# from pdfminer.converter import TextConverter
# from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
# from pdfminer.pdfpage import PDFPage
# import io
from gensim.models import Word2Vec
import matplotlib.pyplot as plt
import numpy as np
import re
import os
import fitz
import tkinter as tk
from tkinter import ttk, filedialog
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg

from matplotlib import rcParams

rcParams['font.family'] = 'SimHei'


class RadarChartApp:
    def __init__(self, root):
        self.root = root
        self.root.title("智能简历挑选器")
        self.root.geometry("800x600")
        self.categories = ["项目经历", "教育背景与经历", "专业技能与能力", "个人特质", "奖项与荣誉"]
        self.weights = [1.0] * len(self.categories)
        self.main_frame = ttk.Frame(self.root)
        self.main_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=10)
        self.create_widgets()

    def create_widgets(self):
        # 左侧雷达图区域
        self.radar_frame = ttk.Frame(self.main_frame)

        self.fig, self.ax = plt.subplots(figsize=(6, 6), subplot_kw=dict(polar=True))
        self.canvas = FigureCanvasTkAgg(self.fig, master=self.radar_frame)
        self.canvas.get_tk_widget().pack(fill=tk.BOTH, expand=True)

        # 右侧控制区域
        self.controls_frame = ttk.Frame(self.main_frame)
        self.controls_frame.pack(side=tk.RIGHT, padx=20)

        self.file_button = ttk.Button(self.controls_frame, text="上传PDF简历", command=self.open_pdf_file)
        self.file_button.pack()

        self.file_label = ttk.Label(self.controls_frame, text="")
        self.file_label.pack(pady=5)

        self.weight_scales = []
        for i, category in enumerate(self.categories):
            scale_label = ttk.Label(self.controls_frame, text=category)
            scale_label.pack()
            scale = ttk.Scale(self.controls_frame, from_=0, to=1, orient=tk.HORIZONTAL, length=100, command=lambda value, idx=i: self.update_weight(value, idx))
            scale.set(1.0)
            scale.pack()
            self.weight_scales.append(scale)

    def open_pdf_file(self):
        file_path = filedialog.askopenfilename(filetypes=[("PDF Files", "*.pdf")])
        if file_path:
            self.file_label.config(text=os.path.basename(file_path))
            self.process_pdf(file_path)

    def process_pdf(self, file_path):
        self.values = IntelligentResumePicker(file_path).run()
        self.plot_radar_chart()

    def plot_radar_chart(self, *_):
        self.ax.clear()
        N = len(self.categories)
        angles = np.linspace(0, 2 * np.pi, N, endpoint=False).tolist()  # 计算角度
        angles += angles[:1]  # 闭合图形

        weighted_values = [value * weight for value, weight in zip(self.values, self.weights)]
        weighted_values += weighted_values[:1]  # 闭合图形

        self.ax.plot(angles, weighted_values, linewidth=1, linestyle='solid', label="实际值")
        self.ax.fill(angles, weighted_values, 'b', alpha=0.1)

        self.ax.set_theta_offset(np.pi / 2)  # 调整角度偏移
        self.ax.set_theta_direction(-1)  # 调整角度方向
        self.ax.set_xticks(angles[:-1])
        self.ax.set_xticklabels(self.categories, color='black', size=10)
        self.canvas.draw()
        self.radar_frame.pack(side=tk.LEFT)

    def update_weight(self, value, idx):
        self.weights[idx] = float(value)
        self.plot_radar_chart()


class IntelligentResumePicker:
    def __init__(self, file_path):
        self.text = None
        self.ner_nlp = Taskflow('information_extraction')
        self.ner_nlp1 = Taskflow('ner')
        self.file_path = file_path
        self.model = Word2Vec.load("../Data/word2vec model/zh_to_vec.model")
        self.stop_word = open("../Data/stop_words.txt", encoding='utf-8').read().split("\n")

    @staticmethod
    def zh_replace_special_char(cleaned_text):
        """
        去除特殊字符
        :param cleaned_text:
        :return:
        """
        # 去掉一些特殊字符
        cleaned_text = re.sub("\s+", " ", cleaned_text)
        return cleaned_text

    def extract_text_from_pdf(self):
        doc = fitz.open(self.file_path)
        texts = ""
        for index in range(doc.page_count):
            page1 = doc.load_page(index)
            texts += page1.get_text("text")

        # resource_manager = PDFResourceManager()
        # fake_file_handle = io.StringIO()
        # converter = TextConverter(resource_manager, fake_file_handle)
        # page_interpreter = PDFPageInterpreter(resource_manager, converter)
        # with open(self.file_path, 'rb') as fh:
        #     for page in PDFPage.get_pages(fh, caching=True, check_extractable=True):
        #         page_interpreter.process_page(page)
        #     texts = fake_file_handle.getvalue()
        # converter.close()
        # fake_file_handle.close()
        if texts:
            self.text = texts
            return texts

    def get_key_word(self):
        if '实习经历' in self.text:
            resume_criteria = ["实习经历", "教育背景", "个人能力", "个人特质", "奖项与荣誉"]
        else:
            resume_criteria = ["项目经历", "教育背景", "专业技能", "个人特质", "奖项与荣誉"]
        self.ner_nlp.set_schema(resume_criteria)
        doc = self.ner_nlp(self.text)
        # 对每个key对应的value根据probability值进行排序，并选取probability值最高的内容
        sorted_data = {}
        for key, values in doc[0].items():
            sorted_values = sorted(values, key=lambda x: x['probability'], reverse=True)
            sorted_data[key] = sorted_values[0]['text']

        project_experience = []
        education_background = []
        professional_skills = []
        personal_traits = []
        awards_honors = []

        # 遍历命名实体，根据标签分类
        for entity, label in self.ner_nlp1(self.text):
            if entity in self.stop_word or len(entity.strip()) < 2:
                continue
            # 项目经历
            entity = entity.strip()
            if '作品类' in label and label == "作品类_实体":
                project_experience.append(entity)
            # 教育背景与经历
            elif '组织机构类' in label or ('术语类' in label and '课程' in entity):
                education_background.append(entity)
            # 专业技能与能力
            elif ('术语类' in label and "术语类_符号指标类" != label) or '作品类' in label or '场景事件' in label:
                professional_skills.append(entity)
            # 个人特质
            elif '个性特征' in label:
                personal_traits.append(entity)
            # 奖项与荣誉
            elif '奖项' in label or '荣誉' in label:
                awards_honors.append(entity)

        if sorted_data.get("教育背景"):
            education_background.append(sorted_data['教育背景'])

        # print("项目经历：", project_experience)
        # print("教育背景：", education_background)
        # print("专业技能：", professional_skills)
        # print("个人特质：", personal_traits)
        # print("奖项与荣誉：", awards_honors)
        return [project_experience, education_background, professional_skills, personal_traits, awards_honors]

    # 计算相似度评分
    def calculate_similarity_score(self, entity_list, label):
        def utils():
            if label == "教育背景与经历":
                return len(entity_list[0]) * 0.1 if len(entity_list[0]) < 10 else 0.85
            else:
                return len(entity_list[0]) * 0.07 if len(entity_list[0]) < 10 else 0.75

        scores = []
        for entities in entity_list:
            entity_scores = []
            for entity in entities:
                if entity in self.model.wv:
                    # 获取与实体词最相似的词向量
                    most_similar = self.model.wv.most_similar(entity, topn=1)
                    # 提取相似度
                    similarity = most_similar[0][1] if most_similar else 0
                    if similarity > 0:
                        entity_scores.append(similarity)
            if entity_scores:
                avg_score = np.mean(entity_scores)
                scores.append(avg_score)
        return np.mean(scores) if scores else utils()

    def run(self):
        self.extract_text_from_pdf()
        self.text = self.zh_replace_special_char(self.text)
        data = self.get_key_word()
        # 计算每个实体列表的相似度评分
        name_title = ["项目经历", "教育背景与经历", "专业技能与能力", "个人特质", "奖项与荣誉"]
        data = [self.calculate_similarity_score([entities], label) for label, entities in zip(name_title, data)]
        return data


if __name__ == '__main__':
    root = tk.Tk()
    app = RadarChartApp(root)
    root.mainloop()
