#!/usr/bin/env python
# -*- coding: utf-8 -*-

import re
import os
import json
import string
import jieba
import matplotlib
import pandas as pd
import numpy as np
from collections import Counter, OrderedDict
import matplotlib.pyplot as plt
matplotlib.use('TkAgg')
jieba.load_userdict(os.path.join(os.path.abspath(
    os.path.dirname(__file__)), 'data/mydict.txt'))
import sys
reload(sys)
sys.setdefaultencoding('utf8')

matplotlib.rcParams['font.sans-serif'] = "SimHei"
# pdnames = ['position_name', 'salary', 'job_request',
#            'description', 'advantage', 'company_name', 'address', 'url', 'company_homepage']


def work_path(file_path):
    curent_path = os.path.abspath(os.path.dirname(__file__))
    return os.path.join(curent_path, file_path)


df = pd.read_csv(work_path('data/jobs.csv'))


def salary_plot(dataframe=None, of=work_path('data/salary.json')):
    '''薪资水平散点图'''
    try:
        f = open(of, 'r')
        result = json.load(f)
        counts = result['counts']
        unique = result['salary_range']
    except (IOError, ValueError):
        if dataframe is not None:
            salary_val = dataframe['salary'].values

            seq = map(lambda x: re.sub(
                '[^\d-]', '', x).split('-'), salary_val)
            salary = map(lambda x: int(x[0]), seq) + \
                map(lambda x: int(x[len(x) - 1]), seq)
            unique = set(salary)
            counts = []
            for item in unique:
                counts.append(salary.count(item))
            with open(of, 'w') as of:
                json.dump(
                    {'salary_range': list(unique),
                     'counts': counts}, of)
        else:
            print 'failed to obtain data'
            sys.exit()
    finally:
        x = np.array(list(unique))
        y = np.array(counts)
        plt.scatter(x, y, alpha=0.4)
        plt.ylabel(u'quantity')
        plt.xlabel(u'salary(K)')
        plt.grid(True)
        plt.title('salary standard')
        plt.show()


def city_plot(dataframe=None, of=work_path('data/city.json')):
    '''城市分布柱状图图'''
    try:
        f = open(of, 'r')
        result = OrderedDict(
            sorted(json.load(f).items(), key=lambda t: t[1], reverse=True))
        counts = result.values()
        cities = result.keys()
    except (IOError, ValueError):
        if dataframe is not None:
            content = dataframe['job_request'].values
            seq = map(lambda x: x.split(' '), content)
            city_words = map(lambda x: x[0], seq)
            city_counts = OrderedDict(
                sorted(Counter(city_words).most_common(20), key=lambda t: t[1], reverse=True))
            cities = city_counts.keys()
            counts = city_counts.values()
            with open(of, 'w') as of:
                json.dump(city_counts, of)
        else:
            print 'failed to obtain data'
            sys.exit()
    finally:
        x = range(len(cities))
        y = np.array(counts)
        plt.bar(x, y, tick_label=cities, color='rgb')
        plt.ylabel(u'quantity')
        plt.xlabel(u'city')
        plt.grid(False)
        plt.title('city distribution')
        plt.show()


def experience_plot(dataframe=None, of=work_path('data/experience.json')):
    '''经验要求情况柱状图'''
    def clean(word):
        first = re.sub(u'[经验年]', '', word.decode('utf-8'))
        result = re.sub(u'5-7', u'5-10', first)
        return result

    try:
        f = open(of, 'r')
        result = OrderedDict(
            sorted(json.load(f).items(), key=lambda t: t[1], reverse=True))
        counts = result.values()
        experiences = result.keys()
    except (IOError, ValueError):
        if dataframe is not None:
            content = dataframe['job_request'].values
            seq = map(lambda x: x.split(' '), content)
            exper_words = map(lambda x: clean(x[1]), seq)
            exper_counts = OrderedDict(
                sorted(Counter(exper_words).items(), key=lambda t: t[1], reverse=True))
            experiences = exper_counts.keys()
            counts = exper_counts.values()
            with open(of, 'w') as of:
                json.dump(exper_counts, of)
        else:
            print 'failed to obtain data'
            sys.exit()
    finally:
        x = range(len(experiences))
        y = np.array(counts)
        plt.bar(x, y, tick_label=experiences, color='rgb')
        plt.ylabel(u'quantity')
        plt.xlabel(u'experience(Year)')
        plt.grid(False)
        plt.title('experience distribution')
        plt.show()


def education_plot(dataframe=None, of=work_path('data/education.json')):
    '''学历要求情况饼图'''
    try:
        f = open(of, 'r')
        result = OrderedDict(
            sorted(json.load(f).items(), key=lambda t: t[1], reverse=True))
        counts = result.values()
        edu = result.keys()
    except (IOError, ValueError):
        if dataframe is not None:
            content = dataframe['job_request'].values
            seq = map(lambda x: x.split(' '), content)
            edu_words = map(lambda x: x[2], seq)
            edu_counts = OrderedDict(
                sorted(Counter(edu_words).most_common(5), key=lambda t: t[1], reverse=True))
            edu = edu_counts.keys()
            counts = edu_counts.values()
            with open(of, 'w') as of:
                json.dump(edu_counts, of)
        else:
            print 'failed to obtain data'
            sys.exit()
    finally:
        slices = np.array(counts)
        labels = edu

        def expl(item, target=slices.min()):
            '''找出最小的切片'''
            return 0.1 if item == target else 0

        explode = map(expl, slices)
        colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
        plt.pie(slices, labels=labels, explode=explode, colors=colors,
                autopct='%1.3f%%', startangle=90)
        plt.title('education distribution')
        plt.show()


def job_desc_kw(dataframe=None):

    desc_seq = dataframe['description'].values
    desc_seq = [item for item in desc_seq if item is not np.nan]
    text = ' '.join(desc_seq)
    text = clean_text(text)
    # print text
    seg_list = jieba.cut(text)
    jb_counts = OrderedDict(
        sorted(Counter(seg_list).items(), key=lambda t: t[1], reverse=True))
    for i in jb_counts:
        print i


def position_desc_kw(dataframe=None):
    desc_seq = dataframe['position_name'].values
    desc_seq = [item for item in desc_seq if item is not np.nan]
    text = ' '.join(desc_seq)
    text = clean_text(text)
    seg_list = jieba.cut(text)
    jb_counts = OrderedDict(
        sorted(Counter(seg_list).most_common(200), key=lambda t: t[1], reverse=True))
    for i in jb_counts:
        print i
    # print jb_counts


def clean_text(text):
    content = re.sub(u'\n+', '', text.decode('utf-8'))
    content = re.sub(u' +', '', content)
    punct = "[\d\s{}——！；：•▲．→一【】，。？、~@  # ￥%……&*（）]+".format(
        string.punctuation).decode('utf8')
    data = re.sub(punct, ' ', content)
    return data


if __name__ == '__main__':
    # pass
    # job_desc_kw()
    position_desc_kw(df)

    # print workpath
