import scrapy
import time
import random
import requests
from bs4 import BeautifulSoup
import re
import datetime
from tutorial.items import LiePinItem

def parseTime(time)->str:
    """
    对发布时间进行系统化数据整合，以'天发布'为结尾，直接返回昨天，以'日发布'为结尾，取这个数据的'月'前面数值和'日'前面的数值，再将它们进行再组合合并
    """
    today = datetime.date.today()
    yesterday = today - datetime.timedelta(days=1)
    tmp = time[-3:]

    if(tmp == '天发布'):
        return yesterday
    elif(tmp == '日发布'):
        m_index = time.index('月')
        d_index = time.index('日')
        month = time[0:m_index]
        day = time[m_index+1:d_index]
        currentMonth=datetime.datetime.now().month
        if int(month)>int(currentMonth):
            return '2019-'+month+'-'+day
        else:
            return '2020-'+month+'-'+day
    else:
        return today

def parseMonthToDay(salary)->str:
    """
    对薪资进行系统化数据整合，如果是面议返回面议，以'元/月'为单位的数值除以30，将单位转化为'元/日'
    """
    if(salary=='面议'):
        return salary
    else:
        ind=salary[-1:]
        count=salary[:-3]
        if (ind=="月"):
            co=int(float(count))/30
            return str(co)+ '元/日'
        else:
            return salary
    
def parseSalary(salary)->int:
    """
    将面议用 0 表示，日结工资≤100用 1 表示，日结工资>100 用 2 表示，以方便后边进行可视化展示
    """
    if(salary=='面议'):
        return 0
    else:
        tmp= salary[:-3]
        if int(float(tmp))>100:
            return 2
        else:
            return 1
        


user_Agent = [
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'
]

headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.8',
    'Cache-Control': 'max-age=0',
    'Connection': 'keep-alive',
    'Cookie': '__uuid=1594095363646.43; __tlog=1594095363647.08%7C00000000%7C00000000%7C00000000%7C00000000; __s_bid=7c6bbe59cb9db7df034824b85bbae7caf66b; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1594095364; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1594100944; _fecdn_=1; gr_user_id=b7ccbb63-74e9-45a2-aa01-70b4af125b7a; bad1b2d9162fab1f80dde1897f7a2972_gr_session_id=3ed98598-8184-43fe-870e-cfe4385da723; grwng_uid=3d2f5d74-6014-41d7-89f0-0a9c09e31c37; Hm_lvt_9bd1bf44f02b38cab5926e780f362426=1594103526; bad1b2d9162fab1f80dde1897f7a2972_gr_session_id_3ed98598-8184-43fe-870e-cfe4385da723=true; JSESSIONID=7DFB62E2E64B75FFFA814C24936A4911; __session_seq=9; __uv_seq=9; Hm_lpvt_9bd1bf44f02b38cab5926e780f362426=1594104019',
    'Host': 'campus.liepin.com',
    'Upgrade-Insecure-Requests': '1'
}



class MySpider(scrapy.Spider):
    name = 'liepin'
    url1 = ''
    page = 0


    def parse(self, response):
        """
        解析数据
        """
        item = LiePinItem()
        for i in range(1, 16):
            where = response.xpath('/html/body/div[2]/div[1]/ul[2]/li[{}]/div[1]/p[2]/span[1]//text()'.format(str(i))).extract()
            when = response.xpath('/html/body/div[2]/div[1]/ul[2]/li[{}]/div[1]/p[2]/span[2]//text()'.format(str(i))).extract()
            job = response.xpath('/html/body/div[2]/div[1]/ul[2]/li[{}]/div[1]/p[1]/span/a/text()'.format(str(i))).extract()
            who = response.xpath('/html/body/div[2]/div[1]/ul[2]/li[{}]/div[1]/p[2]/span[3]//text()'.format(str(i))).extract()
            salary = response.xpath('/html/body/div[2]/div[1]/ul[2]/li[{}]/div[1]/p[2]/span[4]//text()'.format(str(i))).extract()
            company = response.xpath('/html/body/div[2]/div[1]/ul[2]/li[{}]/div[2]/div[1]/a/text()'.format(str(i))).extract()
            industry = response.xpath('/html/body/div[2]/div[1]/ul[2]/li[{}]/div[2]/div[1]/p/a/text()'.format(str(i))).extract()
            if job:
                item['job'] = job[0].strip()
            else:
                item['job'] = None
            if where:
                item['where'] = where[0].strip()
            else:
                item['where'] = None
            if when:
                t = when[0].strip()
                t = parseTime(t)
                item['when'] = t
            else:
                item['when'] = None
            if who:
                item['who'] = who[0].strip()
            else:
                item['who'] = None
            if salary:
                m = salary[0].strip()
                if (m.find('-')): break
                m= parseMonthToDay(m)
                item['salary'] = m
                item['salaryType']=parseSalary(m)
            else:
                item['salary'] = None
                item['salaryType']=None
            if company:
                item['company'] = company[0].strip()
            else:
                item['company'] = None
            if industry:
                item['industry'] = industry[0].strip()
            else:
                item['industry'] = None
            yield item

        next_link = self.url1 + '&curPage=' + str(self.page+1)
        ua = random.choice(user_Agent)
        headers['User_Agent'] = ua  # 头

        if (self.page < 50): # 一共有50页
            print("pn：{}运行中请勿打断...".format(self.page + 1))
            time.sleep(1)
            self.page += 1
            yield scrapy.http.Request(url=next_link, headers=headers, callback=self.parse)

    def start_requests(self):
        """
        解析url
        """
        dqs=['020','010','050090','050020']
        for i in dqs:
            url='https://campus.liepin.com/sojob/search/?keys=&dqs='+i+'&job_type=1&folded=1'
#             print(url)
#             print('=========================================')
            self.url1 = url
#             print(self.url1)
#             print('=========================================')
            start_url = url
            ua = random.choice(user_Agent)
            headers['User-Agent'] = ua
            yield scrapy.http.Request(url=start_url, headers=headers, callback=self.parse)