import re

import pymysql
import requests
from lxml import etree
from bs4 import BeautifulSoup
from database import operation
import threading


class SpiderLiepin(threading.Thread):
    def __init__(self, num, conn):
        threading.Thread.__init__(self)
        self.conn = conn
        self.pageNumber = num
    def run(self):
        base_url = 'https://www.liepin.com/zhaopin/?headId=89fb59aef232a6dec79fc2b4542b12d3&ckId=aa68fc662655476b4ad5b58013948369&key=%E4%BA%92%E8%81%94%E7%BD%91&industry=1$010&currentPage='

        header = {
            #            'cookie': 'f=n; commontopbar_new_city_info=606%7C%E5%8E%A6%E9%97%A8%7Cxm; f=n; commontopbar_new_city_info=606%7C%E5%8E%A6%E9%97%A8%7Cxm; time_create=1639312098082; userid360_xml=BC5EA8FD72D7DA1D58519A02F7244737; 58home=xm; f=n; id58=CocMkmGOXc+LS0qfBN03Ag==; commontopbar_new_city_info=606%7C%E5%8E%A6%E9%97%A8%7Cxm; city=xm; commontopbar_ipcity=xm%7C%E5%8E%A6%E9%97%A8%7C0; 58tj_uuid=bc9b7299-03ef-4022-a368-186401faabef; new_uv=1; utm_source=; spm=; init_refer=; als=0; wmda_new_uuid=1; wmda_uuid=938ca0ac6c946e132d84e80aa37681b2; wmda_session_id_11187958619315=1636720083056-7c0f4113-4674-7ed8; new_session=0; wmda_session_id_1731916484865=1636720086735-1ad7524e-bf25-6925; wmda_visited_projects=%3B11187958619315%3B1731916484865; xzuid=60eebd8a-f644-4926-9c9f-97ddaca94258; sessionid=c7eec58b-6b75-4e7f-9f1c-8556a93de8ae; fzq_h=02488aa0f3d0d78588a0741e7a6cd223_1636720096724_cb2417d423aa42668ebf5d42ad465d42_974595325; Hm_lvt_5bcc464efd3454091cf2095d3515ea05=1636720098; JSESSIONID=DA1757A20391C6FFB9C8EB864A82A152; fzq_js_zhaopin_list_pc=343bc6d13719e8350ee340ff2697121c_1636720240520_9; Hm_lpvt_5bcc464efd3454091cf2095d3515ea05=1636720241; xxzl_cid=38d7601f1ec9441b8c854059dc5f4b39; xxzl_deviceid=0nUliND0LCziSZAQQfTnJorw5YD5ZYvx3%2Frs%2F6lmMwUg51z%2F7t%2BCnl9wrQf%2BmG7c',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
        }  # 请求标头信息
        names = []
        locations = []
        maxPays = []
        minPays = []
        salarys = []
        requirements = []
        educations = []
        companys = []
        types = []
        links = []
        items = []
        for page in range(0, self.pageNumber):
            url = base_url + str(page)  # 页面url
            response = requests.get(url, headers=header)
            html = response.content.decode('utf-8')
            soup = BeautifulSoup(response.text, 'lxml')  # 创建BeautifulSoup对象
            root = etree.HTML(html)

            name = root.xpath(
                '/html/body/div[1]/div/section[1]/div/ul/li/div/div/div[1]/div/a[1]/div[1]/div/div[1]/text()')

            names.extend(name)

            location = root.xpath(
                '/html/body/div[1]/div/section[1]/div/ul/li/div/div/div[1]/div/a[1]/div[1]/div/div[2]/span[2]/text()')
            locations.extend(location)

            for item in soup.find_all('span', class_='job-salary'):
                s = str(item.string)
                if s == '面议':
                    minpay = '0'
                    maxpay = '99'
                    minPays.append(minpay)
                    maxPays.append(maxpay)
                elif '-' in s:
                    pattern = re.compile(r'(\d{1,4}-\d{1,4})')
                    list = pattern.search(s, 0).group().split('-')
                    minpay = list[0]
                    maxpay = list[1]
                    minPays.append(minpay)
                    maxPays.append(maxpay)
                else:
                    pattern = re.compile(r'(\d{1,3})')
                    pay = pattern.search(s, 0).group()
                    minpay = pay
                    maxpay = pay
                    minPays.append(minpay)
                    maxPays.append(maxpay)

            requirement = root.xpath(
                '/html/body/div[1]/div/section[1]/div/ul/li/div/div/div[1]/div/a[1]/div[2]/span[1]/ text()')
            requirements.extend(requirement)

            education = root.xpath(
                '/html/body/div[1]/div/section[1]/div/ul/li/div/div/div[1]/div/a[1]/div[2]/span[2]/text()')
            educations.extend(education)

            for item in soup.find_all('span', class_='company-name'):
                s = str(item.string)
                companys.append(s)

            link = root.xpath('/html/body/div[1]/div/section[1]/div/ul/li/div/div/div[1]/div/a[1]/@href')
            links.extend(link)

            type = root.xpath(
                '/html/body/div[1]/div/section[1]/div/ul/li/div/div/div[1]/div/a[2]/div/div/div[2]/span[1]/text()')
            types.extend(type)


        # 整合数据
        items.append(links)
        items.append(names)
        items.append(minPays)
        items.append(maxPays)
        items.append(educations)
        items.append(requirements)
        items.append(locations)
        items.append(companys)
        # items.append(types)

        # print(items)


        DB = operation.DateProcess()
        cursor = self.conn.cursor()
        n = len(items[0])
        for i in range(0, n):
            sql = 'insert into recruitment ( Web,Url, Job,MinPay,MaxPay,Education,Experience,City,Company) ' \
                  'values("{:}","{:}","{:}","{:}","{:}","{:}","{:}","{:}","{:}")'.format('猎聘', items[0][i], items[1][i],
                                                                                   items[2][i], items[3][i],
                                                                                   items[4][i], items[5][i],
                                                                                   items[6][i],
                                                                                   items[7][i])  # 插入数据
            cursor.execute(sql)
            self.conn.commit()



# 入口
# if __name__ == '__main__':
#     n = int(input("爬取的页数: "))
#     conn = pymysql.connect(host='localhost',
#                            user='root',
#                            passwd='123456',
#                            db='spider',
#                            charset="utf8")
#     SpiderLiepin=SpiderLiepin(n,conn)
#     SpiderLiepin.start()
