import sqlite3
import requests
from bs4 import BeautifulSoup
from lxml import etree
import sys


# 获取指定页html
def open_page(url):
    header = {
        'Cookie':
        'UM_distinctid=179513d29f132e-076a56f77155da-45410429-100200-179513d29f22ee; _bl_uid=tqk8Co02hUm8O6a3spb7dhLt7RRI; lastCity=101010100; wt2=DSF_Dd4xOvtGpBEq__7MvtSOZjG6beqpFuq2cJjQFdlpV2mx7-U2kVlKrQHDs5kONnGZyt0YMS28t36LXxb2y8A~~; __g=-; Hm_lvt_194df3105ad7148dcf2b98a91b5e727a=1622454608,1624507674,1624684826; Hm_lpvt_194df3105ad7148dcf2b98a91b5e727a=1624721393; __c=1624684826; __l=l=%2Fwww.zhipin.com%2Fc101010100%2F%3Fquery%3D%25E7%2588%25AC%25E8%2599%25AB%25E5%25B7%25A5%25E7%25A8%258B%25E5%25B8%2588%26page%3D2%26ka%3Dpage-2&s=3&friend_source=0&s=3&friend_source=0; __a=13582583.1620565631.1624507616.1624684826.76.5.22.76; __zp_stoken__=c66fcOFpjDXAHRCY5dnNRQTtZfVR7BTR8UlkxVwV6GwBBQzdJZWIIZ398FEJKI1UFF24PWS8cAF1EMUNmB1NHcDIdZj0adyAuDkxyFkQZO0YvTUESH3JTajQVQQgaJmx4J00fZFsGBnZveAZG; CNZZDATA1256793290=462675142-1620563780-%7C1624718180; geek_zp_token=V1RNsiGeb131loVtRvzRkfLCO35TvXzC0~',
        'user-agent':
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36',
    }
    res = requests.get(url, headers=header)
    html = res.text
    soup = BeautifulSoup(html, 'lxml')
    html = soup.prettify()

    return html


# 解析html，并将数据入到字典中
def extract_text(html):
    selector = etree.HTML(html)
    titles = selector.xpath('//*[@class="job-name"]/a/@title')
    areas = selector.xpath('//*[@class="job-area"]/text()')
    pubtimes = selector.xpath('//*[@class="job-pub-time"]/text()')
    companies = selector.xpath('//*[@class="name"]/a/text()')
    salaries = selector.xpath('//*[@class="red"]/text()')
    requirements = selector.xpath('//*[@class="job-limit clearfix"]/p/text()')
    industries = selector.xpath('//*[@class="company-text"]/p/a/text()')
    investments = selector.xpath('//*[@class="company-text"]/p/text()')
    tags = selector.xpath('//*[@class="tag-item"]/text()')
    others = selector.xpath('//*[@class="info-desc"]/text()')

    # if(titles=='汇生咨询'):
    #     print('调试')

    requirements_1, requirements_2 = list_split(requirements)
    investments_1, investments_2 = list_split(investments)

    info_dict = {
        'titles': titles,
        'areas': areas,
        'pubtimes': pubtimes,
        'companies': companies,
        'salaries': salaries,
        'requirements_1': requirements_1,
        'requirements_2': requirements_2,
        'industries': industries,
        'investments_1': investments_1,
        'investments_2': investments_2,
        'tags': tags,
        'others': others
    }

    return info_dict


# 拆分list数据
def list_split(list):
    list_1 = [list[i] for i in range(0, len(list), 2) if list[i] != '']
    list_2 = [list[i] for i in range(1, len(list), 2) if list[i] != '']
    return list_1, list_2


# 清理list数据换行符与空格
def data_clear(list):
    for i in range(1, len(list)):
        list[i] = list[i].replace("\n", "")
        list[i] = list[i].strip()
    return list


if __name__ == '__main__':

    conn = sqlite3.connect('boss.db')
    # 创建游标
    cursor = conn.cursor()
    
    cursor.execute('''CREATE TABLE jobs_info
        (ID INTEGER PRIMARY KEY   AUTOINCREMENT,
        titles           TEXT    NOT NULL,
        areas           TEXT    NOT NULL,
        pubtimes           TEXT    NOT NULL,
        companies           TEXT    NOT NULL,
        salaries           TEXT    NOT NULL,
        requirements_1           TEXT    NOT NULL,
        requirements_2           TEXT    NOT NULL,
        industries           TEXT    NOT NULL,
        investments_1           TEXT    NOT NULL,
        investments_2           TEXT    NOT NULL,
        tags           TEXT    NOT NULL,
        others           TEXT    NOT NULL);''')


    for i in range(1, 5):
        url = 'https://www.zhipin.com/c101010100/?query=%E7%88%AC%E8%99%AB%E5%B7%A5%E7%A8%8B%E5%B8%88&page=' + str(
            i) + '&ka=page-' + str(i)
        print(url)    
        html = open_page(url)
        # print(html)
        info_dict = extract_text(html)
        # print(len(info_dict['titles']))
        if(len(info_dict['titles']))==0:
            sys.exit('请重新获取cookies') 
        for j in range(0, len(info_dict['titles'])):
            if i==4 and j==10:  #一页有三十条数据，所以第十页只取前十条，共100条数据
                break
            # 插入数据
            temp=[]
            # print(j)
            for list in info_dict.values():
                # print(list[j])
                temp.append(list[j])
            temp=data_clear(temp)
            # print(temp)
            sql = '''INSERT INTO jobs_info(titles, areas, pubtimes, companies, salaries,
            requirements_1, requirements_2, industries, investments_1, investments_2, tags, 
            others) VALUES("%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s")'''%(temp[0],temp[1],temp[2],temp[3],temp[4],temp[5],temp[6],temp[7],temp[8],temp[9],temp[10],temp[11])
            # print(sql)
            cursor.execute(sql)
        # 提交事物
        conn.commit()
        print("已获取到第" + str(i) + "页")

    # 关闭游标
    cursor.close()
    # 关闭连接
    conn.close()
