import requests
import pandas as pd
from lxml import etree
import pymysql
import mysql
import numpy as np
import re
from concurrent.futures import ThreadPoolExecutor
from area_info_html import area_info
from 兼职吗.垃圾堆 import area_link, citylink
import time
def sheep():
    time.sleep(1)
headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36 Edg/95.0.1020.44'
}
#这个和楼下就tm的是多此一举西八罗马
# def areainfo():#从mysql拿出垃圾
#     bd = pymysql.connect(host="localhost", user="root", password="zs20211015", database="py_sql_jianzhimao")
#     cursor1 = bd.cursor()
#     sql = "SELECT * FROM areainfo"
#     cursor1.execute(sql)
#     reallylink = cursor1.fetchall()
#     print(reallylink)
#     bd.close()
#     return areainfo
# areainfo()
#我也不知道是什么垃圾,哦想起来了是西八的切割最后一个西八，tm的看楼上
# def cut_really_arealink():#切割列表的链接
#     global reallylink#？
#     arealinklist = []
#     for i in reallylink():
#         print(reallylink)

#         reallylink = i[-1]#？
#         arealinklist.append(reallylink)
#         # print(arealinklist)
#     return arealinklist
# cut_really_arealink()


# def chapter ():
#     for a1 in cut_really_arealink:
#         rrep = requests.get(url=a1, headers=headers)
#         all_chapter_Html=rrep.text
#         tree = etree.HTML(all_chapter_Html)
#         first_page=tree.xpath('')
#         other_page=tree.xpath('//ul[@class="content_page_wrap"]/li/a/@href')


        #我是错的#really_other_other_page=['https://guangzhou.jianzhimao.com/'+i for i in other_page]





#感觉对了
# def chapter():
#     global a, combine_link
#     for a2 in area_link():#对
#         reps = requests.get(url=a2, headers=headers)
#         areaHtml2 = reps.text
#         is_next_page = True if "下一页" in areaHtml2 else False
#         tree = etree.HTML(areaHtml2)
#         sheep()
#         job_page=tree.xpath('//ul[@class="content_page_wrap"]/li/a/@href')
#         # print(job_page)#//*[@id="content_page_wrap"]/li[2]/a
#         # for a3 in job_title:
#
#         if len(job_page)>=2:
#             if is_next_page:
#                 job_page=job_page[0:-1]
#             else:
#                 pass
#
#         result=''.join(job_page)
#         print(result)

        # half_job_link=[]
        # for zz in job_page:
        #     half_job_link.append(job_page)
        # print(half_job_link)
        # print(job_page)

        #废弃
        # a_job_page=[]
        # for a3 in job_page:
        #     a_job_page.append(job_page)
        # print(a_job_page)
#合体
        # for a3 in job_page():
        #     combine_link=["1" + a for a in half_job_link]
#         # print(combine_link)
# chapter()
#现在才nmd好了,yiban
def chapter2(a2):
#   for a2 in area_link():
    reps = requests.get(url=a2, headers=headers)
    areaHtml2 = reps.text
    tree = etree.HTML(areaHtml2)
    sheep()
    job_page=tree.xpath('//ul[@class="content_page_wrap"]/li/a/@href')
    # job_page1=np.unique(job_page)
    # job_page2=job_page1.tolist()
    if len(job_page) > 1:
        job_page.pop(-1)
    url = re.findall(r'http:.*?.com',a2)[0]
    job_page1 = [url+page for page in job_page]
    # print(job_page)
# def aaa():
    for area_job_html in job_page1:
        # rps=requests.get(url='http://anshan.jianzhimao.com/tiedong_zbx_0/index1.html', headers=headers)
        rps = requests.get(url=area_job_html, headers=headers)
        area_job_html=rps.text
        tree = etree.HTML(area_job_html)
        sheep()
        all_job_href = tree.xpath("//ul[@id='content_list_wrap']//a/@href")
        all_job_href = [url + href for href in all_job_href]
        area_job_title=tree.xpath('//ul[@class="content_list_wrap"]/li/a/text()')
        area_job_person = tree.xpath('//div[@class="left visited"]/span/@title')
        area_job_time = tree.xpath('//div[@class="left date"]/@title')
        # print(area_job_time)
        for data in zip(area_job_title,area_job_person,area_job_time,all_job_href):
            tianjiasuju(x=data[0],w=data[1],y=data[2],z=data[3])
# chapter2()

            # for x,y,z in area_job_title,r_area_job_person,r_area_job_time:
            #     tianjiasuju(x,y,z)



            # dict_data = dict(zip(area_job_title, r_area_job_person))
            # for x,y in dict_data.items():

        # for a3 in job_page:
        #     job_page = job_page(set(job_page))

            # if len(job_page)>1:
                # job_page = job_page[0:-1]
                # job_page.pop()
                # del(job_page[-1])
                # job_page.remove("index2")
            # else:
            #     pass


def tianjiasuju(x,w,y,z):  # 添加数据库
    # sql = """
    # insert into city_job_title(title,player)
    # values
    # ('%s','%s')
    # """ % (x,y)
    sql="""
    insert into job_company_info(title,player,time,job_title_href)
    values
    ('%s','%s','%s','%s')
    """% (x,w,y,z)
    mysql.gengxinbiao(sql)


def multi_thread():
    """多线程"""
    with ThreadPoolExecutor(10) as th:
        global job_page
        for a2 in area_link():
            th.submit(chapter2,a2)

if __name__ == '__main__':
    multi_thread()
    print("finish")

#insert into tblname values(1,2,3)