import requests
from lxml import etree
import DBUtil
import Sha1Util
from concurrent.futures import ThreadPoolExecutor
import time

headers = {
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36"
}


def job_createTable():
    """
    创建job表
    :return:
    """
    sql = """
        create table if not exists job1 (id varchar(255) not null primary key,
        url varchar(255) not null,titleName varchar(50) not null,
        region varchar(255) not null,cityId varchar(255) not null,
        browses varchar(255) not null,times varchar(255) not null);
        """
    DBUtil.createDatabase(sql)


def getRegionUrl():
    """
    获取区域url
    :return:
    """
    sql = 'select areaUrl,cityId from area1'
    result = DBUtil.queryData(sql)
    return result


def job_data():
    """
    把数据写入数据库
    :return:
    """
    result = getRegionUrl()
    for item in result:
        regio_url = item[0]
        cityId = item[1]
        for i in range(1, 12):
            """
            获取区域中每一页的兼职数据【兼职的链接，名称，区域，浏览人数，发布时间】
            """
            area_url = regio_url + f"index{i}.html"  # 获取区域中每一页的url
            resp = requests.get(url=area_url, headers=headers)
            resp.encoding = "utf-8"
            job_text = resp.text
            tree = etree.HTML(job_text)
            job_url_list = tree.xpath('//ul[@class="content_list_wrap"]/li/a/@href')
            if job_url_list == []:
                """
                判断每一页中是否有兼职，如果没有兼职信息就结束循环，回到[for regio_url in result:]循环
                """
                break
            else:
                job_name_list = tree.xpath('//ul[@class="content_list_wrap"]/li/a/text()')
                job_region_list = tree.xpath('//ul[@class="content_list_wrap"]/li/div[1]/span/@title')
                job_browse_list = tree.xpath('//ul[@class="content_list_wrap"]/li/div[2]/span/@title')
                job_time_list = tree.xpath('//ul[@class="content_list_wrap"]/li/div[3]/@title')
                for a in range(0, len(job_name_list)):
                    """
                    通过列表中的长度进行循环，获取每个列表中同一个索引的值。
                    """
                    job_url = regio_url.split("com/")[0] + "com" + job_url_list[a]
                    id = Sha1Util.jia_mi(job_url)
                    titleName = job_name_list[a]
                    job_region = job_region_list[a]
                    job_browses = job_browse_list[a]  # 浏览人数
                    job_time = job_time_list[a]  # 发布时间

                    sql = f"""
                        insert into job1 (id,url,titleName,region,cityId,browses,times)
                        values ('{id}','{job_url}','{titleName}','{job_region}','{cityId}',
                        '{job_browses}','{job_time}');
                        """
                    DBUtil.modifyData(sql)
                    time.sleep(1)


job_createTable()
with ThreadPoolExecutor(100) as th:
    th.submit(job_data)
job_data()

