from distutils.log import info
import sqlite3
import requests
from bs4 import BeautifulSoup
from lxml import etree
import sys

import re
import urllib3

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# def getCookie(url):
#     headers = {
#         'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36'
#     }
#     #urllib或requests在打开https站点是会验证证书。 简单的处理办法是在get方法中加入verify参数，并设为False
#     html = requests.get(url, headers=headers,verify=False)
#     #获取cookie:DZSW_WSYYT_SESSIONID
#     if html.status_code == 200:
#         print(html.cookies)
#         for cookie in html.cookies:
#             print(cookie)
#         return html.cookies
def getCookie():
    url='https://www.zhipin.com/'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36'
    }
    #urllib或requests在打开https站点是会验证证书。 简单的处理办法是在get方法中加入verify参数，并设为False
    # response = requests.get(url, headers=headers,verify=False)
    # cookie_value = ''
    # for key,value in response.cookies.items():  
    #     cookie_value += key + '=' + value + ';'  
    # print(cookie_value)
    # return cookie_value
    # response = requests.post(url)
    # set_cookie = response.headers['Set-Cookie']
    # array = re.split('[;,]',set_cookie)
    # cookieValue = ''
    # for arr in array:
    #     if arr.find('DZSW_SESSIONID') >= 0 or arr.find('bl0gm1HBTB') >= 0:
    #         cookieValue += arr + ';'

    # print(cookieValue)
    # return cookieValue

    url = "https://www.zhipin.com"

    res = requests.get(url)

    ck = res.cookies

    print(ck)

    print(type(ck))

    print(ck.keys()) # 获取cookie中所有键名，以list格式输出

    print(ck.items())


# 获取指定页html
def open_page(url):
    cookie=getCookie()
    header = {
        'Cookie':
        cookie,
        'user-agent':
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36',
    }
    res = requests.get(url, headers=header)
    html = res.text
    soup = BeautifulSoup(html, 'lxml')
    html = soup.prettify()

    return html


# 解析列表页html，并将数据入到字典中
def extract_text(html):
    selector = etree.HTML(html)

    titles = selector.xpath('//*[@class="job-name"]/a/@title')
    href = selector.xpath('//*[@class="job-name"]/a/@href')
    info_dict = {
        'titles': titles,
        'href': href
    }

    return info_dict

# 解析详情页html,并将数据存到字符串中返回
def extract_html(html):
    selector = etree.HTML(html)
    # description = selector.xpath('//*[@class="job-sec"][1]/div[1]')[0]
    description = selector.xpath('//*[@class="text"]')
    # /bookstore/book[price>35.00]//title
    if(description):
        desc=description[0].xpath('string(.)')
    else:
        sys.exit(html) 
    return desc


# 拆分list数据
def list_split(list):
    list_1 = [list[i] for i in range(0, len(list), 2) if list[i] != '']
    list_2 = [list[i] for i in range(1, len(list), 2) if list[i] != '']
    return list_1, list_2


# 清理list数据换行符与空格
def data_clear(list):
    for i in range(1, len(list)):
        # print(list[i])
        list[i] = list[i].replace("\n", "")
        list[i] = list[i].replace("<br/>", "")
        list[i] = list[i].strip()
    return list


if __name__ == '__main__':

    conn = sqlite3.connect('boss.db')
    # 创建游标
    cursor = conn.cursor()
    
    cursor.execute('''CREATE TABLE jobs_info
        (ID INTEGER PRIMARY KEY   AUTOINCREMENT,
        titles           TEXT    NOT NULL,
        description           TEXT    NOT NULL);''')

    n=0
    for i in range(1, 5):
        url = 'https://www.zhipin.com/c101010100/?query=%E7%88%AC%E8%99%AB%E5%B7%A5%E7%A8%8B%E5%B8%88&page=' + str(
            i) + '&ka=page-' + str(i)
        print(url)
        html = open_page(url)
        # print(html)
        info_dict = extract_text(html)
        # print(len(info_dict['titles']))
        if(len(info_dict['titles']))==0:
            sys.exit('请重新获取cookies') 
        for j in range(0, len(info_dict['titles'])):
            if i==4 and j==10:  #一页有三十条数据，所以第十页只取前十条，共100条数据
                break
            # 插入数据
            temp=[]
            # print(j)
            for key,value in info_dict.items():
                # print(key,value)
                if(key=='titles'):
                    # print(value[j])
                    temp.append(value[j])
                elif(key=='href'):                    
                    dhtml=open_page('https://www.zhipin.com'+value[j])
                    description=extract_html(dhtml)
                    temp.append(description)

            temp=data_clear(temp)
            # print(temp)
            sql = '''INSERT INTO jobs_info(titles, description) VALUES("%s","%s")'''%(temp[0],temp[1])
            print(sql)
            cursor.execute(sql)
        # 提交事物
        conn.commit()
        print("已获取到第" + str(i) + "页")

    # 关闭游标
    cursor.close()
    # 关闭连接
    conn.close()
