# 构建用户画像
import csv
import json

from flask import jsonify
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from py2neo import Graph

from bulid_distance import city_similarity, read_cities_data, get_ex_cities


class UserProfile:
    def __init__(self, skills, city, demand, salary):
        self.skills = skills
        self.city = city
        self.demand = demand
        self.salary = salary


user_profile = UserProfile(
    skills=["Python", "Data Analysis"],
    city="New York",
    demand="实习",
    salary=10000)


# def recommend_positions(user_profile, knowledge_graph):


if __name__ == '__main__':
    citycsv = open('data/job.csv', 'r', encoding="utf-8")
    cityreader = csv.reader(citycsv)
    jobData = list(cityreader)

    results = []
    graph = Graph("http://47.109.43.77/:7474", auth=("neo4j", "Xiyou666"), name="neo4j")

    expected_city = input("请输入期望工作城市：")

    expected_salary1 = input("请输入期望工资：")

    expected_skills = input("请输入技能，多个技能以逗号隔开：")

    skills_list = expected_skills.split(',')
    skills = [s.strip() for s in skills_list]
    user_skills = {"skills": ','.join(skills)}
    exCity = set()
    jobCity = get_ex_cities(expected_city)
    citySimilarities = city_similarity(expected_city, jobCity)
    for city_name, similarity in citySimilarities:
        exCity.add(city_name)
    print(list(exCity))
    job_result = []
    for i in range(1, len(jobData)):
        results = []
        query = '''
            MATCH (j:job {id: $id})-[:HAS_SALARY]->(s:Salary),
            (j)-[:LOCATED_IN]->(c:City),
            (j)-[:REQUIRES]->(sk:skills)
            WHERE s.salary >= $expected_salary AND c.name IN $city
            RETURN s, c, COLLECT(sk) AS skills, j.id
        '''
        # 执行查询并传入参数
        result_category = graph.run(query, id=jobData[i][0], expected_salary=float(expected_salary1),
                                    city=list(exCity)).data()
        if result_category:
            for result in result_category:
                salary = result['s']['salary']
                city = result['c']['name']
                skills = [skill['name'] for skill in result['skills']]
                jobId = result['j.id']
                skill = ','.join(skills)
                job_skills = {"skills": skill}

                # 将技能转换为向量
                vectorizer = CountVectorizer(tokenizer=lambda x: x.split(','))
                job_vector = vectorizer.fit_transform([job_skills["skills"]])
                user_vector = vectorizer.transform([user_skills["skills"]])

                # 计算余弦相似度
                similarity = cosine_similarity(user_vector, job_vector)
                if similarity > 0.4:
                    # 将结果添加到列表中
                    results.append(
                        {"similarity": similarity, "jobId": jobId, "salary": salary, "city": city,
                         "skills": skills})

                    # 按照相似度从大到小进行排序
                    results.sort(key=lambda x: x["similarity"], reverse=True)
                    # 打印结果
                    for result1 in results:
                        print(
                            "____________________________________________________________________________________________________________________________________")
                        print(f"JobId: {result1['jobId']}")
                        print(f"Salary: {result1['salary']}")
                        print(f"City: {result1['city']}")
                        print(f"Skills: {result1['skills']}")
                        print(f"相似度: {result1['similarity']}")
                        query2 = '''
                        MATCH (j:job {id: $jobId})
                        RETURN j
                    '''
                        # 执行查询并传入参数
                        result = graph.run(query2, jobId=result1['jobId']).data()
                        job_result.extend(result)
    print(job_result)
    # with app.app_context():
    # print(jsonify(job_result))
    # # 将结果转换为字典格式
    # job_result_list = []
    # for result in job_result:
    #     job_result_dict = {}
    #     job_result_dict.update(result)
    #     job_result_list.append(job_result_dict)
    #
    # # 将字典数据存储到文件中
    # with open('job_result.json', 'w', encoding='utf-8') as f:
    #     json.dump(job_result_list, f, ensure_ascii=False)
