#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Txc001'

from datetime import date
from openpyxl import load_workbook, workbook, Workbook
from openpyxl.styles import Alignment, Font
from common.util import get_day_of_day, get_month_and_day
from db.mysql_handle import MysqlHandler, get_all
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas import Series
import copy
import pylab as pl

from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

MYSQL_CONFIG = {
    'host': '127.0.0.1',
    'port': 3306,
    'user': 'root',
    'password': '',
    'db': 'lagou',
    'charset': 'utf8'
}

site_name = {
    'zhaopin': '智联招聘网',
    '51job': '前程无忧网',
    'lagou': '拉勾网',
    'zhipin': 'BOSS直聘网',
    'all': '所有网站'
}
from_sites = ['all', '51job', 'zhaopin', 'lagou', 'zhipin']
table_name = 'job_position'

def get_results_from_mysql(from_site, keyword, days=-56):
    mysql_handler = MysqlHandler(MYSQL_CONFIG)

    if from_site == '51job':
        start_time = get_month_and_day(get_day_of_day(days))
        end_time = get_month_and_day(get_day_of_day())
        sql = f"select concat('2018-', create_time) as create_time, count(*) as { keyword } from { table_name } where from_site=%s and locate(%s, description) > 0 " \
            "and create_time between %s and %s group by create_time order by create_time;"
    else:
        start_time = str(get_day_of_day(days))
        end_time = str(date.today())
        sql = f"select create_time, count(*) as { keyword } from { table_name } where from_site=%s and locate(%s, description) > 0 " \
              "and create_time between %s and %s group by create_time order by create_time;"

    results = mysql_handler.getAll(sql, (from_site, keyword, start_time, end_time))

    mysql_handler.dispose()

    return results

def handle_data_by_pandas(results):
    df = pd.DataFrame(results)
    df['create_time'] = pd.to_datetime(df['create_time'])  # 将数据类型转换为日期类型
    df = df.set_index('create_time')  # 将date设置为index
    df = df.resample('w').sum()

    return df

def create_thread_img(df, title):
    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    df.plot(ax=ax, title=title, color='red', marker='o')
    # ax.plot(df.axes[0], df)
    ax.set_xlabel('周')
    ax.set_ylabel('岗位数')
    plt.savefig(f'./imgs/{ title }.jpg')
    plt.show()

def analysis_thread(from_site, keyword, days=56):
    results = get_results_from_mysql(from_site, keyword, -days)
    df = handle_data_by_pandas(results)

    title = f'{ str(date.today())} { site_name[from_site] } 最近{ days }天 {keyword} 岗位的趋势图'

    create_thread_img(df, title)

py_keyword_li = [
    {'kw': 'django', 'cou': 0},
    {'kw': '爬虫', 'cou': 0},
    {'kw': 'web', 'cou': 0},
    {'kw': '运维', 'cou': 0},
    {'kw': '测试', 'cou': 0},
    {'kw': '数据分析', 'cou': 0},
    {'kw': '机器学习', 'cou': 0},
    {'kw': '算法', 'cou': 0},
]
analysis_keyword_li = [
    {'kw': 'bi', 'cou': 0, 'position': []},
    {'kw': 'vba', 'cou': 0, 'position': []},
    {'kw': 'spss', 'cou': 0, 'position': []},
    {'kw': 'tableau', 'cou': 0, 'position': []},
    {'kw': 'etl', 'cou': 0, 'position': []},
    {'kw': 'excel', 'cou': 0, 'position': []},
    {'kw': '其它', 'cou': 0, 'position': []},
]

education_li = [
    {'kw': '大专', 'cou': 0},
    {'kw': '本科', 'cou': 0},
    {'kw': '硕士', 'cou': 0},
    {'kw': '博士', 'cou': 0},
    {'kw': '其它', 'cou': 0},
    {'kw': '无学历要求', 'cou': 0},
]
city_li = [
    {'kw': '北京', 'cou': 0},
    {'kw': '上海', 'cou': 0},
    {'kw': '广州', 'cou': 0},
    {'kw': '深圳', 'cou': 0},
    {'kw': '武汉', 'cou': 0},
    {'kw': '西安', 'cou': 0},
]
position_li = [
    {'kw': '爬虫', 'cou': 0, 'position': []},
    {'kw': 'web', 'cou': 0, 'position': []},
    {'kw': '机器学习', 'cou': 0, 'position': []},
    {'kw': '算法', 'cou': 0, 'position': []},
    {'kw': '数据分析', 'cou': 0, 'position': []},
    {'kw': '测试', 'cou': 0, 'position': []},
    {'kw': '运维', 'cou': 0, 'position': []},
    {'kw': '工程师', 'cou': 0, 'position': []},
    {'kw': '其它', 'cou': 0, 'position': []},
]

analysis_position_li = [
    {'kw': 'tableau', 'cou': 0, 'position': []},
    {'kw': 'spss', 'cou': 0, 'position': []},
    {'kw': 'etl', 'cou': 0, 'position': []},
    {'kw': 'vba', 'cou': 0, 'position': []},
    {'kw': 'bi', 'cou': 0, 'position': []},
    {'kw': 'excel', 'cou': 0, 'position': []},
    {'kw': '其它', 'cou': 0, 'position': []},
]

work_year_li = [
    {'kw': ['1-3年', '1-3年经验', '1年经验', '2年经验', '2-3年经验', '1-3年'], 'cou': 0},
    {'kw': ['3-5年', '3-5年经验', '3年经验', '3-4年经验', '4-5年经验'], 'cou': 0},
    {'kw': ['5-7年', '5-7年经验'], 'cou': 0},
    {'kw': ['1年以下'], 'cou': 0},
    {'kw': ['无经验', '无工作经验', '不限', '经验不限'], 'cou': 0},
    {'kw': ['7年以上', '7年以上经验'], 'cou': 0},
]

def get_py_all_results(from_site='all'):
    if from_site != 'all':
        sql = f"SELECT lower(description) as description, position_name, education, city, work_year FROM job_position where description!='' and from_site='{ from_site }' and keyword like 'python%'"
    else:
        sql = "SELECT lower(description) as description, position_name, education, city, work_year FROM job_position where description!='' and keyword like 'python%'"

    return get_all(sql)

def get_java_all_results(from_site='all'):
    if from_site != 'all':
        sql = f"SELECT lower(description) as description, education, city, work_year FROM job_position where description!='' and from_site='{ from_site }' and keyword = 'java'"
    else:
        sql = "SELECT lower(description) as description, education, city, work_year FROM job_position where description!='' and keyword = 'java'"

    return get_all(sql)

def get_html5_all_results(from_site='all'):
    if from_site != 'all':
        sql = f"SELECT lower(description) as description, education, city, work_year FROM job_position where description!='' and from_site='{ from_site }' and keyword = 'html5'"
    else:
        sql = "SELECT lower(description) as description, education, city, work_year FROM job_position where description!='' and keyword = 'html5'"

    return get_all(sql)

def get_analysis_all_results(from_site='all'):
    if from_site != 'all':
        sql = f"SELECT lower(description) as description, position_name, education, city, work_year FROM job_position where description!='' and from_site='{ from_site }' and keyword in ('excel', 'vba', 'spss', 'tableau', 'etl', 'bi')"
    else:
        sql = "SELECT lower(description) as description, position_name, education, city, work_year FROM job_position where description!='' and keyword in ('excel', 'vba', 'spss', 'tableau', 'etl', 'bi')"

    return get_all(sql)

def contains_handle(li, kw_li):
    df = pd.DataFrame(li)
    for row in df.itertuples():
        des = getattr(row, "description")

        for keyword in kw_li:
            if keyword['kw'] in des:
                keyword['cou'] += 1

    return kw_li

def education_handle(li, kw_li):
    df = pd.DataFrame(li)
    for row in df.itertuples():
        education = getattr(row, "education")
        if not education:
            kw_li[-1]['cou'] += 1
        else:
            for keyword in kw_li:
                if keyword['kw'] == education:
                    keyword['cou'] += 1
                    break
            else:
                kw_li[-2]['cou'] += 1

    return kw_li

def city_handle(li, kw_li):
    df = pd.DataFrame(li)
    for row in df.itertuples():
        city = getattr(row, "city")
        for keyword in kw_li:
            if keyword['kw'] == city:
                keyword['cou'] += 1
                break

    return kw_li

def position_handle_by_des(row, kw_li):
    des = getattr(row, "description")
    b = True

    if '爬虫' in des or 'scrapy' in des or 'requests' in des or 'urllib' in des:
        kw_li[0]['cou'] += 1
        kw_li[0]['position'].append(row)
    elif 'django' in des or 'flask' in des or 'web' in des:
        kw_li[1]['cou'] += 1
        kw_li[1]['position'].append(row)
    elif '机器学习' in des or 'tensorflow' in des or 'sk-learn' in des or 'sklearn' in des:
        kw_li[2]['cou'] += 1
        kw_li[2]['position'].append(row)
    elif '算法' in des:
        kw_li[3]['cou'] += 1
        kw_li[3]['position'].append(row)
    elif '数据分析' in des or 'pandas' in des or 'numpy' in des:
        kw_li[4]['cou'] += 1
        kw_li[4]['position'].append(row)
    elif '测试用例' in des or '自动化测试' in des:
        kw_li[5]['cou'] += 1
        kw_li[5]['position'].append(row)
    else:
        b = False

    return b

def python_position_handle(li, kw_li):
    df = pd.DataFrame(li)
    for row in df.itertuples():
        position_name = getattr(row, "position_name")
        for keyword in kw_li:
            if keyword['kw'] in position_name.lower():

                # 工程师
                if keyword['kw'] == '工程师' and position_handle_by_des(row, kw_li):
                    break

                keyword['cou'] += 1
                keyword['position'].append(row)
                break
        else:
            if not position_handle_by_des(row, kw_li):
                kw_li[-1]['cou'] += 1
                kw_li[-1]['position'].append(row)

    return kw_li

def analysis_position_handle(li, kw_li):
    df = pd.DataFrame(li)
    for row in df.itertuples():
        position_name = getattr(row, "position_name")
        for keyword in kw_li:
            if keyword['kw'] in position_name.lower():
                keyword['cou'] += 1
                keyword['position'].append(row)
                break
        else:
            des = getattr(row, "description")
            for keyword in kw_li:
                if keyword['kw'] in des:
                    keyword['cou'] += 1
                    keyword['position'].append(row)
                    break
            else:
                kw_li[-1]['cou'] += 1
                kw_li[-1]['position'].append(row)

    # 过滤 excel
    excel_position = {'kw': 'excel', 'cou': 0, 'position': []}
    for row in kw_li[-2]['position']:
        des = getattr(row, "description")

        if '数据分析' in des:
            excel_position['position'].append(row)
    excel_position['cou'] = len(excel_position['position'])
    kw_li[-2] = excel_position

    return kw_li

def work_year_handle(li, kw_li):
    df = pd.DataFrame(li)
    for row in df.itertuples():
        work_year = getattr(row, "work_year")
        for keyword in kw_li:
            if work_year in keyword['kw']:
                keyword['cou'] += 1
                break
        else:
            kw_li[-1]['cou'] += 1

    return [{'kw': keyword['kw'][0] , 'cou': keyword['cou']} for keyword in kw_li]

def analysis_bar(kw_li, title):
    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)

    d = [kw['cou'] for kw in kw_li]
    index = [kw['kw'] for kw in kw_li]
    data = pd.Series(d, index=index)
    data.plot.bar(ax=ax, color='red', alpha=0.7, title=title)

    pl.xticks(rotation=360)

    plt.savefig(f'./imgs/{ title }.jpg')
    plt.show()

def analysis_pie(kw_li, title):
    kw_li.sort(key=lambda x: x['cou'], reverse=True)

    fig = plt.figure(figsize=(8,8))
    ax = fig.add_subplot(1, 1, 1)

    d = [kw['cou'] for kw in kw_li]
    labels = [f"{kw['kw']}:{kw['cou']}" for kw in kw_li]

    pl.xticks(rotation=360)
    ax.set_title(title)
    ax.pie(d, labels=labels, autopct='%1.2f%%')
    plt.legend(loc='upper left')

    # ax.text(10, 10, '测试')

    plt.savefig(f'./imgs/{ title }.jpg')
    plt.show()

def create_pie(result, title_kw):
    title = f'{ str(date.today())} 的 {title_kw} 饼图分析'
    analysis_pie(result, title)

def handle_special_excel_data(r, citys, work_years, educations):
    final_li = []

    for row in r:
        # {'kw': 'tableau', 'cou': 0, 'position': []}
        positions = row['position']

        education_dict = {}
        city_dict = {}
        work_year_dict = {}

        for city in citys:
            city_dict[city] = 0
            work_year_dict[city] = 0
            education_dict[city] = {}
            for education in educations:
                education_dict[city][education] = 0

        for position in positions:
            work_year = getattr(position, "work_year")
            city = getattr(position, "city")
            education = getattr(position, "education")

            city_dict[city] += 1

            if work_year in work_years:
                work_year_dict[city] += 1

                if education in educations:
                    education_dict[city][education] += 1

        for city in citys:
            temp_li = [row['kw'], city, city_dict[city], work_year_dict[city]]
            for education in educations:
                # ['岗位', '城市', '岗位总数', '1-3年经验岗位数', '文凭', '岗位数', '文凭', '岗位数']
                temp_li.append(education)
                temp_li.append(education_dict[city][education])

            final_li.append(temp_li)

    return final_li

def create_special_excel(from_site):
    if from_site != 'all':
        work_years = tuple(work_year_li[0]['kw'])
        citys = tuple([city['kw'] for city in city_li])
        educations = ('本科', '大专')

        titles = ['方向', '岗位', '城市', '岗位总数', '1-3年经验岗位数(占比)', '文凭', '岗位数(占比)', '文凭', '岗位数(占比)']

        # python
        sql = f"select city, work_year, education, lower(position_name) as position_name, lower(description) as description from job_position " \
              f"where from_site='{from_site}' and city in {str(citys)} and keyword like 'python%'"
        results = get_all(sql)
        r = python_position_handle(results, copy.deepcopy(position_li))
        python_li = handle_special_excel_data(r[:-2], citys, work_years, educations)
        write_to_excel(f'./excel/{site_name[from_site]}_python方向_{work_years[0]}_各城市分析.xlsx', titles, python_li, 'python')

        # 数据分析
        sql = f"select city, work_year, education, lower(position_name) as position_name, lower(description) as description from job_position " \
              f"where from_site='{from_site}' and city in {str(citys)} and keyword in ('excel', 'vba', 'spss', 'tableau', 'etl', 'bi')"
        results = get_all(sql)
        r = analysis_position_handle(results, copy.deepcopy(analysis_position_li))
        analysis_li = handle_special_excel_data(r[:-1], citys, work_years, educations)
        write_to_excel(f'./excel/{site_name[from_site]}_数据分析方向_{work_years[0]}_各城市分析.xlsx', titles, analysis_li, '数据分析')

def write_to_excel(file_name, titles, rows, first_col=''):
    wb = Workbook()
    sheet = wb.active

    if titles:
        for col, title in enumerate(titles):
            c = col + 1
            sheet.cell(row=1, column=c).font = Font('黑体', bold=True)
            sheet.cell(row=1, column=c).alignment = Alignment(horizontal='center')
            sheet.cell(row=1, column=c).value = title

    for li in rows:
        all = li[2]
        count = li[3]

        if all == 0:
            li[3] = '{}({:.1f}%)'.format(0, 0)
        else:
            li[3] = '{}({:.1f}%)'.format(count, count/all*100)

        if count == 0:
            li[5] = '{}({:.1f}%)'.format(0, 0)
            li[7] = '{}({:.1f}%)'.format(0, 0)
        else:
            li[5] = '{}({:.1f}%)'.format(li[5], li[5]/count*100)
            li[7] = '{}({:.1f}%)'.format(li[7], li[7]/count*100)

        sheet.append([first_col] + li)

    wb.save(file_name)

def position_handle_by_city(r):
    for row in r:
        row['city'] = {}
        for postion in row['position']:
            city = getattr(postion, "city")

            if not row['city'].get(city):
                row['city'][city] = []

            row['city'][city].append(row)

def create_pie_city(r, title):
    citys_dict = {}
    for row in r:
        kw = row['kw']
        for city_info in city_li:
            city = city_info['kw']

            if row['city'].get(city):
                cou = len(row['city'][city])
            else:
                cou = 0

            if not citys_dict.get(city):
                citys_dict[city] = []

            citys_dict[city].append({'kw': kw, 'cou': cou})

    for key, value in citys_dict.items():
        create_pie(value, title % key)



if __name__ == '__main__':
    # print(test_get_results())

    # keywords = ['django', 'scrapy', '爬虫']
    #
    # for from_site in from_sites:
    #     for keyword in keywords:
    #         analysis_thread(from_site, keyword)

    # for from_site in from_sites[:1]:
    for from_site in from_sites:
        li_python = get_py_all_results(from_site)
    #     li_java = get_java_all_results(from_site)
    #     li_analysis = get_analysis_all_results(from_site)
    #     li_html5 = get_html5_all_results(from_site)
    #
    #     # 岗位技能
    #     r = education_handle(li_java, copy.deepcopy(education_li))
    #     title = f'{ str(date.today())}{ site_name[from_site] }的java岗位 学历 饼图分析'
    #     analysis_pie(r, title)
    #
    #     r = education_handle(li_html5, copy.deepcopy(education_li))
    #     title = f'{ str(date.today())}{ site_name[from_site] }的html5岗位 学历 饼图分析'
    #     analysis_pie(r, title)
    #     #
    #     # # 城市
    #     r = city_handle(li_java, copy.deepcopy(city_li))
    #     title = f'{ str(date.today())}{ site_name[from_site] }的java岗位 城市 饼图分析'
    #     analysis_pie(r, title)
    #
    #     r = city_handle(li_html5, copy.deepcopy(city_li))
    #     title = f'{ str(date.today())}{ site_name[from_site] }的html5岗位 城市 饼图分析'
    #     analysis_pie(r, title)
    #
    #     r = work_year_handle(li_java, copy.deepcopy(work_year_li))
    #     title = f'{ str(date.today())}{ site_name[from_site] }的java岗位 工作经验 饼图分析'
    #     analysis_pie(r, title)
    #
    #     r = work_year_handle(li_html5, copy.deepcopy(work_year_li))
    #     title = f'{ str(date.today())}{ site_name[from_site] }的html5岗位 工作经验 饼图分析'
    #     analysis_pie(r, title)
    #     #
    #     #
    #     # # python分析
        r = python_position_handle(li_python, copy.deepcopy(position_li))
        create_pie(copy.deepcopy(r), f'{ site_name[from_site] } 所有 python 岗位')

        # 岗位再次划分城市
        position_handle_by_city(r)
        create_pie_city(r, f'{ site_name[from_site] } %s 所有 python 岗位')

    #
    #     for position in r[:-2]:
    #         positions_li = position['position']
    #         name = position['kw']
    #
    #         r = work_year_handle(positions_li, copy.deepcopy(work_year_li))
    #         create_pie(r, f'{ site_name[from_site] } python ' + name + ' 工作经验 ')
    #
    #         r = city_handle(positions_li, copy.deepcopy(city_li))
    #         create_pie(r, f'{ site_name[from_site] } python ' + name + ' 城市 ')
    #
    #         r = education_handle(positions_li, copy.deepcopy(education_li))
    #         create_pie(r, f'{ site_name[from_site] } python ' + name + ' 学历 ')
    #
    #     # 数据分析
    #     r = analysis_position_handle(li_analysis, copy.deepcopy(analysis_position_li))
    #     create_pie(copy.deepcopy(r), f'{ site_name[from_site] } 所有 数据分析 岗位')
    #
    #     for position in r[:-1]:
    #         positions_li = position['position']
    #         name = position['kw']
    #
            # r = work_year_handle(positions_li, copy.deepcopy(work_year_li))
            # create_pie(r, f'{ site_name[from_site] } 数据分析 ' + name + ' 工作经验 ')
            #
            # r = city_handle(positions_li, copy.deepcopy(city_li))
            # create_pie(r, f'{ site_name[from_site] } 数据分析 ' + name + ' 城市 ')
            #
            # r = education_handle(positions_li, copy.deepcopy(education_li))
            # create_pie(r, f'{ site_name[from_site] } 数据分析 ' + name + ' 学历 ')


        # 分城市、学历、工作经验
        # create_special_excel(from_site)


        pass


