import base64
import heapq
import os

import fasttext
import pandas as pd
import numpy as np
import torch
from Crypto.Util.Padding import unpad
from torch_geometric.data import Data
from torch_geometric.nn import GCNConv
import pymysql

from Crypto.Cipher import AES


# 将每一份招聘要求和简历技能构建成图，利用图卷积网络模型，进行各节点间特征结合和特征转换
class GCN(torch.nn.Module):
    def __init__(self):
        super().__init__()
        torch.manual_seed(1234)
        self.conv1 = GCNConv(100, 70)  # 将输入的一百维特征转化为七十维特征
        self.conv2 = GCNConv(70, 10)  # 将输入的七十维特征转化为十维特征
        self.conv3 = GCNConv(10, 5)  # 将输入的十维特征转化为五维特征
        self.classifier = torch.nn.Linear(5, 30)  # 将输入的五维特征转化为三十维特征

    def forward(self, x, edge_index):
        h = self.conv1(x, edge_index)
        h = h.tanh()
        h = self.conv2(h, edge_index)
        h = h.tanh()
        h = self.conv3(h, edge_index)
        h = h.tanh()
        h = self.classifier(h)
        return h


class Recommendation():

    def transform_features(self, data, model):  # 调用模型得到特征
        """
        Transform the features
        :param data: 传入模型的数据
        :param model: 需传入的模型
        :return: 传入模型后得到的特征值
        """
        transformed_h = model(data.x, data.edge_index)
        return transformed_h

    def getData(self, mydata, model):  # 构建数据集,mydata为一条数据，该函数得到该数据的Data类表示,Job为该数据对应的工作名称
        """
        将简历技能或招聘要求转化成可以传入模型的数据
        :param mydata:一条简历技能数据或招聘要求数据
        :param Job:数据对应的岗位
        :param model:对应技能的词向量模型
        :return:
        """
        x = []
        for word in mydata:
            word = word.replace(",", "")
            temp = model.get_word_vector(word)
            x.append(temp)
        x.append(np.zeros(100, dtype=np.float32))
        x = np.array(x)
        x = torch.tensor(x)
        length = len(mydata)
        resource = [length] * length
        target = [i for i in range(length)]
        edge_index = torch.tensor([resource, target], dtype=torch.long)
        data = Data(x=x, edge_index=edge_index)
        return data

    def getData1(self, mydata, model):  # 构建数据集,mydata为一条数据，该函数得到该数据的Data类表示,Job为该数据对应的工作名称
        """
        将简历技能或招聘要求转化成可以传入模型的数据
        :param mydata:一条简历技能数据或招聘要求数据
        :param Job:数据对应的岗位
        :param model:对应技能的词向量模型
        :return:
        """
        x = []

        for word in mydata:
            word = word.replace(",", "")
            temp = model.get_word_vector(word)
            x.append(temp)
        x.append(np.zeros(100, dtype=np.float32))
        x = np.array(x)
        x = torch.tensor(x)
        length = len(mydata)
        resource = [length] * length
        target = [i for i in range(length)]
        edge_index = torch.tensor([resource, target], dtype=torch.long)
        data = Data(x=x, edge_index=edge_index)
        return data

    def cosine_similarity(self, vec1, vec2):  # 计算余弦相似度
        """
        计算俩个向量的余弦相似度
        :param vec1: 向量1
        :param vec2: 向量2
        :return: 俩向量的余弦相似度
        """
        dot_product = np.dot(vec1, vec2)
        norm_a = np.linalg.norm(vec1)
        norm_b = np.linalg.norm(vec2)
        return dot_product / (norm_a * norm_b)

    def get_finalData(self, input_csv, output_column, word_model_file):
        """
        读取可直接使用的数据文件，得到最终的数据集
        :param input_csv: 读取文件
        :param output_column: 读取列,其中第一个元素需要为构建数据集的数据列,第二个为对应数据的id列
        :param word_model_file:进行处理的词向量模型
        :return: 最终数据集
        """
        # 导入词向量模型
        model = fasttext.load_model(word_model_file)
        # 加载csv文件
        df = pd.read_csv(input_csv)
        column1 = [text.split() for text in df[output_column[0]]]
        column2 = [text for text in df[output_column[1]]]
        data_all = []
        for i in range(len(column1)):
            data = self.getData(column1[i], model)
            data_all.append(data)
        return data_all, column2

    def model_use(self, data_all):
        """
        将数据集的数据传入模型
        :param data_all: 数据集
        :return: 经过模型得到数据集里每一个图的全局特征
        """
        model = GCN()  # 构建图卷积网络模型
        torch.save(model.state_dict(), 'gcn_model.pth')
        data_column_means = []
        for i in range(len(data_all)):
            # 使用模型进行特征转换
            transformed_data_features = self.transform_features(data_all[i], model)
            # 得到全局特征
            column_means = transformed_data_features.mean(dim=0)
            data_column_means.append(column_means.detach().numpy())
        return data_column_means

    def save_features(self, save_file, data_column_means):
        """
        保存文件
        :param save_file: 保存的文件(.npz)
        :param data_data_column_means:保存的数组
        :return:none
        """
        np.savez(save_file, *data_column_means)

    def load_features(self, load_file):
        """
        导入文件
        :param load_file: 导入文件
        :return: 数据集的全局特征
        """
        # 后续读取
        loaded_arrays = np.load(load_file)

        # 将加载的各个数组重新整理成一个列表
        data_column_means = [arr for arr in loaded_arrays.values()]
        return data_column_means

    def get_compare_values(self, mydata, Job, model_file, word_model_file):
        """
        处理推荐处信息时，将其传入model中得到全局特征
        :param mydata: 一条简历技能数据或招聘要求数据
        :param Job: 数据对应的岗位
        :param model_file: GCN模型参数文件
        :param word_model_file: 对应技能的词向量模型参数文件
        :return:
        """
        # 导入词向量模型
        model_use = fasttext.load_model(word_model_file)
        userdata = self.getData(mydata, Job, model_use)
        model = GCN()
        model.load_state_dict(torch.load(model_file))
        # 使用模型进行特征转换
        transformed_data_features = self.transform_features(userdata, model)
        # 得到全局特征
        compare_values = transformed_data_features.mean(dim=0)
        return compare_values.detach().numpy()

    def get_compare_values_user(self, mydata, model_file, word_model_file):
        """
        处理推荐处信息时，将其传入model中得到全局特征
        :param mydata: 一条简历技能数据或招聘要求数据
        :param Job: 数据对应的岗位
        :param model_file: GCN模型参数文件
        :param word_model_file: 对应技能的词向量模型参数文件
        :return:
        """
        # 导入词向量模型
        model_use = fasttext.load_model(word_model_file)
        userdata = self.getData1(mydata, model_use)
        model = GCN()
        model.load_state_dict(torch.load(model_file))
        # 使用模型进行特征转换
        transformed_data_features = self.transform_features(userdata, model)
        # 得到全局特征
        compare_values = transformed_data_features.mean(dim=0)
        return compare_values.detach().numpy()

    def getmax_similarity(self, data_column_means, compare_values):  # 得到compare_values与其他数据的相似度列表
        """
        compare_values是比较特征，与data_column_means中的数据进行相似度比较，得到列表结果
        :param data_column_means:数据集特征
        :param compare_values:需进行相似度比较的数据特征
        :return:列表
        """
        similarity_result = []
        # i_result = []
        for i in range(len(data_column_means)):
            similarity = self.cosine_similarity(data_column_means[i], compare_values)
            similarity_result.append(similarity)
            # i_result.append(i)
        return similarity_result

    def compare_degrees(self, degree1: str, degree2: str) -> bool:
        degree_order = {"博士": 5, "硕士": 4, "本科": 3, "大专": 2, "无学历": 1}
        # Ensure both degrees are valid and in the degree_order dictionary
        if degree1 not in degree_order or degree2 not in degree_order:
            # raise ValueError("Invalid degree specified. Valid degrees are: 博士, 硕士, 本科, 大专.")
            print("errcv")
        return degree_order[degree1] <= degree_order[degree2]

    def get_recommendation(self, data_column_means, compare_values, result_csv, result_columns, n, operation,
                           use_xueli):
        """
        得到推荐结果，输出结果，返回对应的下标值
        :param data_column_means:数据集全局特征
        :param compare_values:比较数据全局特征
        :param result_csv:结果所在的表
        :param result_columns:表需读取的列,第一个数据为ID信息
        :param n:推荐的个数
        :param operation:推荐时需满足要求的操作函数
        :param use_xueli:需要的学历
        :return:推荐数据的ID
        """
        similarity_result = self.getmax_similarity(data_column_means, compare_values)
        largest_values_indices = heapq.nlargest(len(similarity_result),
                                                ((v, i) for i, v in enumerate(similarity_result)))
        # 分离最大值和下标
        largest_values, indices = zip(*largest_values_indices)
        search_Result = self.myresult(indices, result_csv, result_columns, n, operation, use_xueli)
        return search_Result

    def myresult(self, in_id, result_csv, result_columns, n, operation, use_xueli):

        # 读取CSV文件，并指定只读取所需的列
        specific_columns = result_columns
        df = pd.read_csv(result_csv, usecols=specific_columns)

        # # 将DataFrame中的数据类型转换为JSON支持的数据类型
        # df = df.astype({
        #     'ID': str,  # 将ID列的数据类型转换为字符串类型
        #     '公司名称': str,  # 将公司名称列的数据类型转换为字符串类型
        #     '岗位': str,  # 将岗位列的数据类型转换为字符串类型
        #     'salary': str,  # 将salary列的数据类型转换为浮点数类型
        #     'education': str,  # 将education列的数据类型转换为字符串类型
        #     'description': str,  # 将description列的数据类型转换为字符串类型
        #     'link': str  # 将link列的数据类型转换为字符串类型
        # })

        ID_need = []
        i_need = []
        for i in range(len(in_id)):
            row_number = in_id[i]
            if operation(use_xueli, df.loc[row_number, result_columns[2]]):
                ID_need.append(df.loc[row_number, result_columns[1]])
                i_need.append(i)
                if len(ID_need) == n:
                    break

        result_list = []
        for k in i_need:
            row_number = in_id[k]
            result_dict = {}
            for column in result_columns:
                result_dict[column] = df.loc[row_number, column]
            result_list.append(result_dict)
        return result_list


def mysql_update():
    # MySQL数据库连接参数
    db_config = {
        'host': 'localhost',
        'port': 3306,
        'user': 'root',
        'password': 'wxt200413',
        'database': 'fuchuangweb',
    }
    # SQL查询语句（根据实际需求替换为您的查询）
    sql_query = "SELECT * FROM users"

    # 创建数据库连接
    connection = pymysql.connect(**db_config)

    # 使用pandas的read_sql_query函数执行查询并将结果转换为DataFrame
    df = pd.read_sql_query(sql_query, connection)

    # 关闭数据库连接
    connection.close()

    # 将DataFrame保存为CSV文件
    csv_file_path = 'output4.csv'
    df.to_csv(csv_file_path, index=False)


# AES解密函数
def decrypt_aes(iv, ciphertext):
    iv = base64.b64decode(iv)
    ct = base64.b64decode(ciphertext)
    cipher = AES.new(AES_KEY, AES.MODE_CBC, iv)
    pt = unpad(cipher.decrypt(ct), AES.block_size)
    return pt.decode('utf-8')


AES_KEY = b'fcxdcwjwcxwxtfjy'  # 请替换为自己的密钥，确保长度为16字节


# mysql_update()
def recommend_talents(mydata, use_xueli):
    mysql_update()  # 获取mysql表中信息
    test = Recommendation()
    # 读数据库，更新output4.csv
    current_dir = os.getcwd()
    print(current_dir)
    # 构建到上级目录的路径
    parent_dir = os.path.dirname(current_dir)
    data_all, _ = test.get_finalData(parent_dir+'\\GCN\\output4.csv', ['Resume_it', 'Email', ], parent_dir+'\\GCN\\ail9_1.bin')
    data_column_means = test.model_use(data_all)
    print('4')
    model_file = parent_dir+'\\GCN\\gcn_model.pth'
    word_model_file = parent_dir+'\\GCN\\ail9_1.bin'
    user_values = test.get_compare_values_user(mydata, model_file, word_model_file)
    print('5')
    search_Result = test.get_recommendation(data_column_means, user_values, parent_dir+'\\GCN\\output4.csv',
                                            ['Name', 'Email', 'Education', 'Telephone', 'Resume_it', 'Email_iv',
                                             'Telephone_iv'],
                                            10, test.compare_degrees, use_xueli)
    print('ok')
    # 解密邮箱和电话
    for result in search_Result:
        result['Email'] = decrypt_aes(result['Email_iv'], result['Email'])
        result['Telephone'] = decrypt_aes(result['Telephone_iv'], result['Telephone'])
    # 移除加密的 IV
    del result['Email_iv']
    del result['Telephone_iv']

    return search_Result


if __name__ == '__main__':
    # 数据库信息改
    mydata = ['pa', '搭建服务器', 'mariadb', '网络架构', '数据库', '运维', '负载均衡', 'android', '高可用', 'ansible',
              '应用服务', 'centos', '环境搭建', 'app', 'nginx', '企业应用', '反向代理', '数据管理', '服务器',
              '项目经验', 'dhcp', '开发', 'shell脚本', '运营', '程序安装']
    xueli = '大专'

    result = recommend_talents(mydata, xueli)
    print(result)
