# -*- coding:utf8 -*-
import importlib
import re, sys, json, datetime, random, time
from scrapy.selector import Selector
from scrapy.exceptions import CloseSpider
from lxml import etree
try:
    from scrapy.spiders import Spider
except:
    from scrapy.spider import BaseSpider as Spider

from scrapy.http import Request, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.http import HtmlResponse

from gaokaopai.items import *
from gaokaopai.dao import *
from gaokaopai.util import *
from bs4 import BeautifulSoup
import requests
import lxml.html
import logging
import demjson

importlib.reload(sys)
# sys.setdefaultencoding("utf-8")

class DiyigaokaoMajorSpider(Spider):
    name        = 'diyigaokao_major'
    allow       = ['diyigaokao.com.com']

    def __init__(self, *args, **kwargs):
        self.type = 1
        self.file_object = open('log.txt', 'w+')
        super(DiyigaokaoMajorSpider, self).__init__(*args, **kwargs)

    def start_requests(self):
        yield Request("http://www.diyigaokao.com/major/zk/", callback=self.parse_list, dont_filter=True)

    def parse_list(self, response):
        base_url = get_base_url(response)

        for first_dom in response.xpath(u"//section/ul[contains(@class, 'cf')]/li/a"):
            url = ''.join(first_dom.xpath(u"./@href").extract())
            yield Request(urljoin_rfc(base_url, url), callback=self.parse_major, dont_filter=True)

    def parse_major(self, response):
        base_url = get_base_url(response)

        code = ''.join(response.xpath(u"//li[contains(text(), '专业代码：')]/text()").extract()).strip().replace('专业代码：', '')
        name = ''.join(response.xpath(u"//h3[contains(@id, 'specaltlyDetails')]/text()").extract()).replace('专业详情', '')

        major = getMajorByNameAndCode(code, name, self.type)

        if major == None:
            self.file_object.write(name+":"+code+"\n")
        else:
            occupation  = '||'.join(response.xpath(u"//div[contains(@class, 'details')]/ol[1]/li/text()").extract())
            objective   = ''.join(response.xpath(u"//div[contains(@class, 'inner')]/p/strong[contains(text(), '专业培养目标')]/parent::p/text()").extract())
            require     = ''.join(response.xpath(u"//div[contains(@class, 'inner')]/p/strong[contains(text(), '专业培养要求')]/parent::p/text()").extract())
            core        = ''.join(response.xpath(u"//div[contains(@class, 'inner')]/p/strong[contains(text(), '主干学科')]/parent::p/text()").extract())
            kernel      = ''.join(response.xpath(u"//div[contains(@class, 'inner')]/p/strong[contains(text(), '主要课程')]/parent::p/text()").extract())
            knowledge   = ''.join(response.xpath(u"//div[contains(@class, 'inner')]/p/strong[contains(text(), '知识和能力')]/parent::p/node()").extract())

            sql = "update t_major set `occupation`='%s', `objective`='%s', `require`='%s', `core`='%s', `kernel`='%s', `knowledge`='%s' where id='%s'" % \
                  (occupation, objective, require, core, kernel, knowledge, major['id'])

            executeSql(sql)

            # 就业特征
            job_url = ''.join(response.xpath(u"//li/a[contains(text(), '就业解析')]/@href").extract())
            if job_url != '':
                yield Request(urljoin_rfc(base_url, job_url), callback=self.parse_job, dont_filter=True, meta={'id':major['id']})


    def parse_job(self, response):
        meta = response.meta

        job_industry    = ''
        job_city        = ''
        job_salary      = ''

        # 行业分布
        pie = ''.join(response.xpath(u"//div[contains(@id, 'pie')]/@id").extract())
        if pie != None:
            '''解析就业分布'''
            data = re.findall(r'data: (.*?)]', response.body, re.S)
            tmp = data[len(data)-2] + ']'
            list_data = demjson.decode(tmp)

            tmp_array = []
            for item in list_data:
                name = item['name']
                value = item['y']

                tmp_array.append(name+":"+str(value))

            job_industry = '||'.join(tmp_array)

        # 城市分布
        bar = ''.join(response.xpath(u"//div[contains(@id, 'bar')]/@id").extract())
        if bar != None:
            data = re.findall(r'data: (.*?)]', response.body, re.S)
            data2 = re.findall(r'categories:(.*?)]', response.body, re.S)

            values = data[len(data)-1].replace('[', '').strip().split(',')
            cities = data2[len(data2)-1].replace('[', '').strip().split(',')

            tmp2_array = []
            for key,city in enumerate(cities):
                city = city.replace("'", '')
                if key > len(values)-1:
                    num = 0
                else:
                    num = values[key]
                tmp2_array.append(city+":"+str(num))

            job_city = '||'.join(tmp2_array)

        # 薪资水平
        line = ''.join(response.xpath(u"//div[contains(@id, 'line')]/@id").extract())
        if line != None:
            categories = re.findall(r'categories:(.*?)]', response.body, re.S)
            arr_categories = categories[0].replace('[', '').strip().split(',')

            last_num = re.findall(r'y :(.*?),marker', response.body, re.S)

            arr_categories_val = []
            series = re.findall(r'data: (.*?)]', response.body, re.S)
            if len(series) > 3:
                for index,val in enumerate(series):
                    if index <= len(series)-4:
                        val = val.replace('[', '')
                        arr_val = val.split(',')
                        if index > len(arr_val)-1:
                            num = 0
                        else:
                            num = arr_val[index]
                        arr_categories_val.append(num)

                    if index == len(series)-4:
                        arr_categories_val.append(arr_val[index+1])
                if len(last_num) > 0:
                    arr_categories_val.append(last_num[0])

            tmp3_array = []
            for index2,age in enumerate(arr_categories):
                if index2 > len(arr_categories_val)-1:
                    num2 = 0
                else:
                    num2= arr_categories_val[index2]
                tmp3_array.append(age.replace("'", '')+":"+str(num2))

            job_salary = '||'.join(tmp3_array)

        if job_industry != '' or job_city != '' or job_salary != '':
            sql = "update t_major set `job_industry`='%s', `job_city`='%s', `job_salary`='%s' where id='%s'" % \
                  (job_industry, job_city, job_salary, meta['id'])

            print("========"*10)
            print(sql, meta['id'])
            print("========"*10)

            executeSql(sql)

