# coding=gbk
import requests
from lxml import etree
from datetime import datetime, timedelta
import DBUtil
import Sha1Util
import time

headers = {
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
}


class XiangMuShiZhang:
    # 休眠1秒
    def time(self):
        time.sleep(1)

    def city(self):
        # 主页面，爬取城市名和url地址
        url = "https://www.jianzhimao.com/ctrlcity/changeCity.html"
        resp = requests.get(url=url, headers=headers)
        XiangMuShiZhang.time(self)
        resp.encoding = "utf-8"
        html_data = resp.text
        tree = etree.HTML(html_data)
        # 城市url地址列表
        url_list = tree.xpath('//ul[@class="city_table"]/li/a/@href')
        # 城市名列表
        # print(url_list)
        data_name = tree.xpath('//ul[@class="city_table"]/li/a/text()')
        # print(data_name)
        # 城市名和url地址的字典组合
        dict_data = dict(zip(data_name, url_list))
        XiangMuShiZhang.createTable(self)
        XiangMuShiZhang.addCiytData(dict_data)
        return url_list

    # 创建city表
    def createTable(self):
        '''
        建表city表
        :return:
        '''
        sql = 'create table if not exists city' \
              '(number int(11) not null primary key AUTO_INCREMENT,city_id varchar(50) ,name varchar(10) ,url varchar(80))'
        DBUtil.createTable(sql=sql)

    # 添加id，name,url到city表
    def addCiytData(dict_data):
        """
        添加数据
        :return:
        """
        for name, url in dict_data.items():
            id = Sha1Util.jia_mi(name + url)
            sql = 'insert into city(city_id,name,url) values ("%s","%s","%s")' % (id, name, url)
            DBUtil.modifyTable(sql=sql)

    # 主页面的副页面，主要爬取各城市各区域url地址和名称
    def region_name_url(self):
        # 地区发布的信息url地址列表
        sql = "select url,city_id from city"
        city_url = DBUtil.query(sql=sql)
        for url1 in city_url:
            url = url1[0]
            city_id = url1[1]
            respon = requests.get(url, headers=headers)
            XiangMuShiZhang.time(self)
            respon.encoding = "uft-8"
            html_data_2 = respon.text
            tree_2 = etree.HTML(html_data_2)
            # 获取城市各地区url地址
            region_url = tree_2.xpath('//ul[@class="box"]/li[3]/a/@href')
            # 获取城市各地区名称
            region_name = tree_2.xpath('//ul[@class="box"]/li[3]/a/text()')

            url_complete = []
            for region_url_complete in region_url:
                url1 = url + region_url_complete.lstrip("/")
                url_complete.append(url1)
            dict_data = dict(zip(url_complete, region_name))
            XiangMuShiZhang.create_region_Table(self)
            XiangMuShiZhang.addRegionData(dict_data,city_id)

    def create_region_Table(self):
        """
        建地region
        :return:
        """
        sql = 'create table if not exists region(number int(11) not null primary key AUTO_INCREMENT,url varchar(100),region_id varchar(60),city_id varchar(60), name varchar(100))'
        DBUtil.createTable(sql=sql)

    def addRegionData(dict_data,city_id):
        """
        添加数据
        :return:
        """
        city_id = city_id
        for url, name in dict_data.items():
            id = Sha1Util.jia_mi(url + name)
            sql = 'insert into region(region_id,city_id,url,name) values ("%s","%s","%s","%s")' % (id,city_id, url, name)
            DBUtil.modifyTable(sql=sql)

    # 主页面的副页面，主要爬取各城市各区域发布的兼职信息
    def region_data(self):
        sql = "select url,region_id from region"
        region_url_tuples = DBUtil.query(sql=sql)
        for url1 in region_url_tuples:
            url = url1[0]
            region_id = url1[1]
            for i in range(1, 13):

                if url[-4:] == "com/":
                    home_page = url
                    break
                else:
                    url_2 = url + "index{}.html".format(i)
                try:
                    respon = requests.get(url=url_2, headers=headers)
                    XiangMuShiZhang.time(self)
                    respon.encoding = "uft-8"
                    html_data_3 = respon.text
                    tree_3 = etree.HTML(html_data_3)
                    # 爬取各区域的兼职的url地址
                    data_2_4 = tree_3.xpath('//ul[@class="content_list_wrap"]/li/a/@href')
                    url_list_3 = []
                    for a in data_2_4:
                        url4 = home_page + a.lstrip("/")
                        url_list_3.append(url4)
                    if url_list_3 == []:
                        break
                    # 访问人数列表
                    number_name = tree_3.xpath('//ul[@class="content_list_wrap"]/li/div[@class="left visited"]/span/@title')
                    # 兼职工作名称
                    work_name_list = tree_3.xpath('//ul[@class="content_list_wrap"]/li/a/text()')
                    # 地区名称列表
                    region_name_list = tree_3.xpath(
                         '//ul[@class="content_list_wrap"]/li/div[@class="left area"]/span/@title')
                    # 获取发布时间列表
                    time_list = tree_3.xpath('//ul[@class="content_list_wrap"]/li/div[@class="left date"]/@title')
                    # 保存时间列表
                    date_list = []
                    # 获取当前时间
                    date_1 = datetime.now()
                    for i in time_list:
                        # print(i[-3:])
                        if i == "刚刚":
                            time = date_1.strftime("%Y-%m-%d %H:%M:%S")
                            date_list.append(time)
                        elif i[-3:] == "分钟前":
                            num = int(i[0])
                            date_2 = date_1 + timedelta(minutes=-num)
                            time = date_2.strftime("%Y-%m-%d %H:%M:%S")
                            date_list.append(time)
                        elif i[-3:] == "小时前":
                            num = int(i[0])
                            date_3 = date_1 + timedelta(hours=-num)
                            time = date_3.strftime("%Y-%m-%d %H:%M:%S")
                            date_list.append(time)
                        elif i == "昨天":
                            date_4 = date_1 + timedelta(days=-1)
                            time = date_4.strftime("%Y-%m-%d %H:%M:%S")
                            date_list.append(time)
                        elif i == "前天":
                            date_4 = date_1 + timedelta(days=-2)
                            time = date_4.strftime("%Y-%m-%d %H:%M:%S")
                            date_list.append(time)
                        else:
                            date_list.append(i)

                    XiangMuShiZhang.create_region_release_Table(self)
                    XiangMuShiZhang.addRegion_Release_Data(region_name_list, work_name_list, url_list_3, number_name,date_list,region_id)
                except Exception as ex:
                    print("数据丢失")

    def create_region_release_Table(self):
        """
        创建region_release
        :return:
        """
        sql = 'create table if not exists region_release(number int(11) not null primary key AUTO_INCREMENT,data_id varchar(50),region_id varchar(50),name varchar(50),url varchar(120),views varchar(50),time varchar(50))'
        DBUtil.createTable(sql=sql)

    def addRegion_Release_Data(region_name_list, work_name_list, url_list_3, number_name,date_list,region_id):
        """
        添加数据
        :return:
        """
        for name, title, url, number, time in zip(region_name_list, work_name_list, url_list_3, number_name, date_list):
            name = str(name)
            title = str(title)
            url = str(url)
            number = str(number)
            time = str(time)
            region_id=region_id
            id = Sha1Util.jia_mi(url)
            sql = 'insert into region_release(data_id,region_id,name,url,views,time) values ("%s","%s","%s","%s","%s","%s")' % (id,region_id, name, url, number, time)
            DBUtil.modifyTable(sql=sql)

    # 区域的副页面主要爬取各区域的兼职信息
    def details(self):
        sql = "select url,data_id from region_release"
        url_tuples = DBUtil.query(sql=sql)
        for url1 in url_tuples:
            url = url1[0]
            data_id = url1[1]
            XiangMuShiZhang.time(self)
            respon = requests.get(url=url, headers=headers)
            respon.encoding = "uft-8"
            html_data_3 = respon.text
            tree_3 = etree.HTML(html_data_3)
            data_3_1 = tree_3.xpath('//div[@class="job_content"]/ul/li/span[@class="con"]/text()')
            # # 去除空格
            list = []
            for a in data_3_1:
                a = str(a).strip()
                list.append(a)
            try:
                # 人数
                number = list[0]
                # 地区
                place = list[1]
                # 类型与时间
                type_time = list[2] + list[3]
                # 付款方式
                method_of_payment = list[-2]
                # 工资
                wage = list[-1]
                # 工作详情
                data_3_3 = tree_3.xpath('//div[@class="detail"]/text()')
                Work_details = ""
                # 去除空格和字符串
                for i in data_3_3:
                    i = i.strip().replace("\""," ")
                    Work_details = Work_details + i + "  "
                data_3_6 = tree_3.xpath('//div[@class="company_info"]//text()')
                Company_introduction = ""
                for i in data_3_6:
                    Company_introduction = Company_introduction + i.strip().replace("\""," ") + "  "
                XiangMuShiZhang.create_DetailData_Table(self)
                XiangMuShiZhang.add_Detail_Data(url, number, type_time, place, method_of_payment, wage, Work_details,
                                                Company_introduction,data_id)
            except Exception as ex:
                print("数据丢失！")

    def create_DetailData_Table(self):
        """
        创建详情页面表
        :return:
        """
        sql = 'create table if not exists DetailData(number int(11) not null primary key AUTO_INCREMENT,data_id varchar(60),' \
              'num varchar(60) ,type_time varchar(60),place varchar(60),method_of_payment varchar(80),' \
              'wage varchar(90),Work_details text(0),Company_introduction text(0))'
        DBUtil.createTable(sql=sql)

    def add_Detail_Data(url, number, type_time, place, method_of_payment, wage, Work_details, Company_introduction,data_id):
        """
        添加数据
        :return:
        """
        # id = Sha1Util.jia_mi(url)
        sql = """insert into DetailData(data_id,num,type_time,place,method_of_payment,wage,Work_details,Company_introduction)
        values ("%s","%s","%s","%s","%s","%s","%s","%s")
        """ % (data_id, number, type_time, place, method_of_payment, wage, Work_details, Company_introduction)
        DBUtil.modifyTable(sql=sql)