from decimal import Decimal

import numpy as np
import pandas as pd
import pymysql as psq
from sqlalchemy import create_engine

import roll_处理清洗 as roll
import 数据清洗 as DataCleaning

engine = create_engine("mysql+pymysql://root:root@localhost:3306/hotkeydb?charset=utf8")


# 薪资区间分布--条形图
def DataPreprocessing(all_data, index):
    salary = all_data["薪资"]

    def mapSalary(data):
        # data *= 10
        data = data.split("-")
        # print(data)
        # 将数据扩大到10倍，展示方便
        data[0] = float(data[0])
        data[1] = float(data[1])
        if data[0] >= 1 and data[1] <= 5:
            data[0] = 1
            data[1] = 5
        elif data[0] > 5 and data[1] <= 10:
            data[0] = 5
            data[1] = 10
        elif data[0] > 10 and data[1] <= 15:
            data[0] = 10
            data[1] = 15
        elif data[0] > 15 and data[1] <= 20:
            data[0] = 15
            data[1] = 20
        elif data[0] > 20 and data[1] <= 30:
            data[0] = 20
            data[1] = 30
        elif data[0] > 30 and data[1] <= 40:
            data[0] = 30
            data[1] = 40
        data[0] = Decimal(data[0]).quantize(Decimal("0.0"))
        data[1] = Decimal(data[1]).quantize(Decimal("0.0"))
        string = str(data[0]) + "-" + str(data[1])
        return string

    all_data["薪资"] = all_data["薪资"].apply(mapSalary)
    # print(all_data["薪资"])
    salary_range = all_data.groupby("薪资")["薪资"].count()
    salary_range.to_csv("./data/salary_range_" + index + ".csv", header=False, index=True)
    salary_range = pd.read_csv("./data/salary_range_" + index + ".csv", header=None, names=['薪资', "数量"])
    salary_range = salary_range[
        (salary_range['薪资'] == "1.0-5.0") | (salary_range['薪资'] == "5.0-10.0") | (salary_range['薪资'] == "10.0-15.0")
        | (salary_range['薪资'] == "15.0-20.0") | (salary_range['薪资'] == "20.0-30.0") | (
                salary_range['薪资'] == "30.0-40.0")]
    salary_range = salary_range.sort_values(ascending=False, by="薪资")
    salary_range.to_csv("./data/salary_range_" + index + ".csv", header=True, index=False)
    return salary_range


# 学位需求
def degreeRequirements(computer_vision1, image_algorithms1, database_management1, data_mining1, big_data1):
    degree_need0 = computer_vision1.groupby("学位需求")["学位需求"].count()
    degree_need1 = image_algorithms1.groupby("学位需求")["学位需求"].count()
    degree_need2 = database_management1.groupby("学位需求")["学位需求"].count()
    degree_need3 = data_mining1.groupby("学位需求")["学位需求"].count()
    degree_need4 = big_data1.groupby("学位需求")["学位需求"].count()
    degree_need = pd.concat([degree_need0, degree_need1, degree_need2, degree_need3, degree_need4])
    degree_need.to_csv("./data/degree_requirements.csv", header=False, index=True)
    degree_need = pd.read_csv("./data/degree_requirements.csv", header=None, names=['学位需求', "数量"])
    degree_need = degree_need[(degree_need['学位需求'] == "本科") | (degree_need['学位需求'] == "大专") | (
            degree_need['学位需求'] == "博士") | (degree_need['学位需求'] == "硕士")]
    degree_need = degree_need.groupby('学位需求').agg({"数量": "sum"}).reset_index()
    degree_need = degree_need.sort_values(ascending=False, by="学位需求")
    degree_need.to_csv("./data/degree_requirements.csv", header=True, index=False)
    return degree_need


# 公司规模
def companySize(computer_vision1, image_algorithms1, database_management1, data_mining1, big_data1):
    company_size0 = computer_vision1.groupby("公司人数")["公司人数"].count()
    company_size1 = image_algorithms1.groupby("公司人数")["公司人数"].count()
    company_size2 = database_management1.groupby("公司人数")["公司人数"].count()
    company_size3 = data_mining1.groupby("公司人数")["公司人数"].count()
    company_size4 = big_data1.groupby("公司人数")["公司人数"].count()
    company_size = pd.concat([company_size0, company_size1, company_size2, company_size3, company_size4])
    company_size.to_csv("./data/company_size.csv", header=False, index=True)
    company_size = pd.read_csv("./data/company_size.csv", header=None, names=['公司人数', "数量"])
    company_size = company_size.groupby('公司人数').agg({"数量": "sum"}).reset_index()
    company_size = company_size.sort_values(ascending=False, by="公司人数")
    company_size.to_csv("./data/company_size.csv", header=True, index=False)
    return company_size


# 工作经验
def workExperience(computer_vision1, image_algorithms1, database_management1, data_mining1, big_data1):
    # 经验需求
    experience_requirements0 = computer_vision1.groupby("经验需求")["经验需求"].count()
    experience_requirements1 = image_algorithms1.groupby("经验需求")["经验需求"].count()
    experience_requirements2 = database_management1.groupby("经验需求")["经验需求"].count()
    experience_requirements3 = data_mining1.groupby("经验需求")["经验需求"].count()
    experience_requirements4 = big_data1.groupby("经验需求")["经验需求"].count()
    experience_requirements = pd.concat([experience_requirements0, experience_requirements1, experience_requirements2,
                                         experience_requirements3, experience_requirements4
                                         ])
    experience_requirements.to_csv("./data/experience_requirements.csv", header=False, index=True)
    experience_requirements = pd.read_csv("./data/experience_requirements.csv", header=None, names=['经验需求', "数量"])
    experience_requirements = experience_requirements[
        (experience_requirements['经验需求'] == "无需经验") | (experience_requirements['经验需求'] == "1年经验") | (
                experience_requirements['经验需求'] == "2年经验")
        | (experience_requirements['经验需求'] == "5-7年经验") | (experience_requirements['经验需求'] == "3-4年经验") | (
                experience_requirements['经验需求'] == "8-9年经验") | (experience_requirements['经验需求'] == "10年以上经验")]
    experience_requirements = experience_requirements.groupby('经验需求').agg({"数量": "sum"}).reset_index()
    experience_requirements = experience_requirements.sort_values(ascending=False, by="经验需求")
    experience_requirements.to_csv("./data/experience_requirements.csv", header=True, index=False)
    return experience_requirements


# 公司类型
def companyType(all_data, index):
    # 公司类型
    company_type_count = all_data.groupby("公司类型")["公司类型"].count()
    # print(company_type_count)
    company_type_count = company_type_count.sort_values(ascending=False)
    company_type_count.to_csv("./data/company_type_num_" + index + ".csv", header=False, index=True)
    company_type_count = pd.read_csv("./data/company_type_num_" + index + ".csv", header=None, names=['公司类型', "数量"])
    company_type_count = company_type_count[
        (company_type_count['公司类型'] == "民营公司") | (company_type_count['公司类型'] == "上市公司") | (
                company_type_count['公司类型'] == "国企")
        | (company_type_count['公司类型'] == "民营公司") | (company_type_count['公司类型'] == "创业公司") | (
                company_type_count['公司类型'] == "合资")
        | (company_type_count['公司类型'] == "外资（欧美）") | (company_type_count['公司类型'] == "外资（非欧美）")]
    company_type_count = company_type_count.sort_values(ascending=False, by="公司类型")
    company_type_count.to_csv("./data/company_type_num_" + index + ".csv", header=True, index=False)
    return company_type_count


# 公司福利
def companyBenefits(computer_vision1, image_algorithms1, database_management1, data_mining1, big_data1):
    welfare_num0 = computer_vision1.groupby("福利0")["福利0"].count()
    welfare_num1 = computer_vision1.groupby("福利1")["福利1"].count()
    welfare_num2 = computer_vision1.groupby("福利2")["福利2"].count()
    welfare_num3 = computer_vision1.groupby("福利3")["福利3"].count()
    welfare_num4 = computer_vision1.groupby("福利4")["福利4"].count()
    welfare_num5 = computer_vision1.groupby("福利5")["福利5"].count()
    welfare_num6 = computer_vision1.groupby("福利6")["福利6"].count()
    welfare_num7 = image_algorithms1.groupby("福利0")["福利0"].count()
    welfare_num8 = image_algorithms1.groupby("福利1")["福利1"].count()
    welfare_num9 = image_algorithms1.groupby("福利2")["福利2"].count()
    welfare_num10 = image_algorithms1.groupby("福利3")["福利3"].count()
    welfare_num11 = image_algorithms1.groupby("福利4")["福利4"].count()
    welfare_num12 = image_algorithms1.groupby("福利5")["福利5"].count()
    welfare_num13 = image_algorithms1.groupby("福利6")["福利6"].count()
    welfare_num14 = database_management1.groupby("福利0")["福利0"].count()
    welfare_num15 = database_management1.groupby("福利1")["福利1"].count()
    welfare_num16 = database_management1.groupby("福利2")["福利2"].count()
    welfare_num17 = database_management1.groupby("福利3")["福利3"].count()
    welfare_num18 = database_management1.groupby("福利4")["福利4"].count()
    welfare_num19 = database_management1.groupby("福利5")["福利5"].count()
    welfare_num20 = database_management1.groupby("福利6")["福利6"].count()
    welfare_num21 = image_algorithms1.groupby("福利0")["福利0"].count()
    welfare_num22 = image_algorithms1.groupby("福利1")["福利1"].count()
    welfare_num23 = image_algorithms1.groupby("福利2")["福利2"].count()
    welfare_num24 = image_algorithms1.groupby("福利3")["福利3"].count()
    welfare_num25 = image_algorithms1.groupby("福利4")["福利4"].count()
    welfare_num26 = image_algorithms1.groupby("福利5")["福利5"].count()
    welfare_num27 = image_algorithms1.groupby("福利6")["福利6"].count()
    welfare_num28 = data_mining1.groupby("福利0")["福利0"].count()
    welfare_num29 = data_mining1.groupby("福利1")["福利1"].count()
    welfare_num30 = data_mining1.groupby("福利2")["福利2"].count()
    welfare_num31 = data_mining1.groupby("福利3")["福利3"].count()
    welfare_num32 = data_mining1.groupby("福利4")["福利4"].count()
    welfare_num33 = data_mining1.groupby("福利5")["福利5"].count()
    welfare_num34 = data_mining1.groupby("福利6")["福利6"].count()
    welfare_num35 = big_data1.groupby("福利0")["福利0"].count()
    welfare_num36 = big_data1.groupby("福利1")["福利1"].count()
    welfare_num37 = big_data1.groupby("福利2")["福利2"].count()
    welfare_num38 = big_data1.groupby("福利3")["福利3"].count()
    welfare_num39 = big_data1.groupby("福利4")["福利4"].count()
    welfare_num40 = big_data1.groupby("福利5")["福利5"].count()
    welfare_num41 = big_data1.groupby("福利6")["福利6"].count()
    welfare_num = pd.concat([welfare_num0, welfare_num1, welfare_num2, welfare_num3, welfare_num4, welfare_num5,
                             welfare_num6, welfare_num7, welfare_num8, welfare_num9, welfare_num10, welfare_num11,
                             welfare_num12,
                             welfare_num13, welfare_num14, welfare_num15, welfare_num16, welfare_num17, welfare_num18,
                             welfare_num19,
                             welfare_num20, welfare_num21, welfare_num22, welfare_num23, welfare_num24, welfare_num25,
                             welfare_num26, welfare_num27,
                             welfare_num28, welfare_num29, welfare_num30, welfare_num31, welfare_num32, welfare_num33,
                             welfare_num34, welfare_num35, welfare_num36, welfare_num37, welfare_num38, welfare_num39,
                             welfare_num40, welfare_num41
                             ])
    welfare_num.to_csv("./data/welfare_num.csv", header=False, index=True)
    welfare_num = pd.read_csv("./data/welfare_num.csv", header=None, names=['福利', "数量"])
    welfare_num = welfare_num.groupby('福利').agg({"数量": "sum"}).reset_index()
    welfare_num = welfare_num.drop(welfare_num[welfare_num["福利"] == ",带薪年假,"].index)
    welfare_num = welfare_num.drop(welfare_num[welfare_num["福利"] == "..."].index)
    welfare_num = welfare_num.sort_values(ascending=False, by="数量")
    welfare_num.to_csv("./data/welfare_num.csv", header=True, index=False)

# 就职岗位（没有用到）
def inauguralPosition(all_data, index):
    post0 = all_data.groupby("就业岗位0")["就业岗位0"].count()
    post1 = all_data.groupby("就业岗位1")["就业岗位1"].count()
    post2 = all_data.groupby("就业岗位2")["就业岗位2"].count()
    post = pd.concat([post0, post1, post2])
    post = post.sort_values(ascending=False)
    post.to_csv("./data/inaugural_position_" + index + ".csv", header=False, index=True)
    post = pd.read_csv("./data/inaugural_position_" + index + ".csv", header=None, names=['就业岗位', "数量"])
    post = post.groupby('就业岗位').agg({"数量": "sum"}).reset_index()
    post.to_csv("./data/inaugural_position_" + index + ".csv", header=True, index=False)
    return post


# 剔除最后一个字字
def eliminate(data):
    data = data.replace("省", "").replace("市", "")
    return data


# 地区工作数
def districtJobs(computer_vision1, image_algorithms1, database_management1, data_mining1, big_data1):
    jobs_number0 = computer_vision1.groupby("地区")["地区"].count()
    jobs_number1 = image_algorithms1.groupby("地区")["地区"].count()
    jobs_number2 = database_management1.groupby("地区")["地区"].count()
    jobs_number3 = data_mining1.groupby("地区")["地区"].count()
    jobs_number4 = big_data1.groupby("地区")["地区"].count()
    jobs_number = pd.concat([jobs_number0, jobs_number1, jobs_number2, jobs_number3, jobs_number4])
    jobs_number.to_csv("./data/district_jobs.csv", header=False, index=True)
    jobs_number = pd.read_csv("./data/district_jobs.csv", header=None, names=['地区', "数量"])
    # 地图映射表
    map_mapping = pd.read_excel('./original_data/省级市级映射表.xlsx')
    map_key = []
    map_value = []
    for i in map_mapping['地级']:
        map_key.append(i)
    for j in map_mapping["省"]:
        map_value.append(j)
    map_dic = dict(zip(map_key, map_value))
    jobs_number["地区"] = jobs_number["地区"].map(map_dic)
    jobs_number = jobs_number.groupby('地区').agg({"数量": "sum"}).reset_index()
    jobs_number["地区"] = jobs_number["地区"].apply(eliminate)
    jobs_number = jobs_number.sort_values(ascending=False, by="地区")
    jobs_number.to_csv("./data/district_jobs.csv", header=True, index=False)


if __name__ == '__main__':
    computer_vision = DataCleaning.clearData(r"./original_data/计算机视觉.csv")
    image_algorithms = DataCleaning.clearData(r"./original_data/图像算法.csv")
    database_management = DataCleaning.clearData(r"./original_data/数据库管理.csv")
    data_mining = DataCleaning.clearData(r"./original_data/数据挖掘.csv")
    big_data = DataCleaning.clearData(r"./original_data/大数据分析.csv")

    # 就职岗位
    inauguralPosition(computer_vision, "计算机视觉")
    inauguralPosition(image_algorithms, "图像算法")
    inauguralPosition(database_management, "数据库管理")
    inauguralPosition(data_mining, "数据挖掘")
    inauguralPosition(big_data, "大数据分析")
    
    # 公司规模
    companySize(computer_vision1=computer_vision, image_algorithms1=image_algorithms,
                database_management1=database_management, data_mining1=data_mining,
                big_data1=big_data)
    
    # 学位需求
    degreeRequirements(computer_vision1=computer_vision, image_algorithms1=image_algorithms,
                       database_management1=database_management, data_mining1=data_mining,
                       big_data1=big_data)
    # 公司福利
    companyBenefits(computer_vision1=computer_vision, image_algorithms1=image_algorithms,
                    database_management1=database_management, data_mining1=data_mining,
                    big_data1=big_data)
    
    # 就职岗位(没有用到)
    # inauguralPosition(computer_vision1=computer_vision, image_algorithms1=image_algorithms,
    #                   database_management1=database_management, data_mining1=data_mining,
    #                   big_data1=big_data)
    
    # 地区工作数
    districtJobs(computer_vision1=computer_vision, image_algorithms1=image_algorithms,
                 database_management1=database_management, data_mining1=data_mining,
                 big_data1=big_data)
    
    # 薪资区间分布
    DataPreprocessing(computer_vision, "计算机视觉")
    DataPreprocessing(image_algorithms, "图像算法")
    DataPreprocessing(database_management, "数据库管理")
    DataPreprocessing(data_mining, "数据挖掘")
    DataPreprocessing(big_data, "大数据分析")
    
    # 工作经验
    workExperience(computer_vision1=computer_vision, image_algorithms1=image_algorithms,
                   database_management1=database_management, data_mining1=data_mining,
                   big_data1=big_data)
    
    # 公司类型
    companyType(computer_vision, "计算机视觉")
    companyType(image_algorithms, "图像算法")
    companyType(database_management, "数据库管理")
    companyType(data_mining, "数据挖掘")
    companyType(big_data, "大数据分析")
    
    # 公司规模
    companySize(computer_vision1=computer_vision, image_algorithms1=image_algorithms,
                database_management1=database_management, data_mining1=data_mining,
                big_data1=big_data)

    # roll数据清洗与处理
    roll.clearData()