
import requests
from bs4 import BeautifulSoup
from database import operation
import threading
#齐鲁人才网站爬虫


class qiLuSpider(threading.Thread):
    def __init__(self,conn,pageNumber):
        threading.Thread.__init__(self)
        self.conn=conn
        self.pageNumber=pageNumber

    def run(self):
        #测试，连接数据库
        cur=self.conn.cursor()
        #conn=DB.connect()
        #DB.create(conn)


        header={
            "User-Agent":'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'

        }
        #齐鲁人才只显示100页,查找前5个标签（后端开发、移动/前端开发、运营、测试、产品经理）
        #爬取的页数
        urls = ['https://www.qlrc.com/personal/js/search/{:}?1regionId=32&dcJobTypeId=24%2060%2061%2023%2063',
                # 前5个标签（后端开发、移动/前端开发、运营、测试、产品经理）
                'https://www.qlrc.com/personal/js/search/{:}?2regionId=32&dcJobTypeId=28%2025%2062%2064'  # 后4个便签
                ]
        for url in urls:
            for i in range(self.pageNumber):
                request=requests.get(url.format(i+1),'parsehtml')
                if(request.status_code!=200):
                    continue;
                soup=BeautifulSoup(request.text,'lxml')
                main_top=soup.find_all(attrs={'class':'main_top'})

                for i in main_top:
                    jobrequire=i.find(attrs={'class':'jobrequire'}).text.split()
                    HTMLUrl='https://www.qlrc.com'+(i.find('a').attrs['href'])
                    jobname=i.find(attrs={'class':'jobname'}).text.replace('\n             ','')
                    company=i.find(attrs={'class':'cpname'}).text
                    #判断是否面谈,是则范围0-99k

                    if jobrequire[0]=='面议':

                        salary=[0,9]
                    else:
                        salary=jobrequire[0][:-1].split('-')

                    place=jobrequire[1]
                    education=jobrequire[-1]
                    exp=jobrequire[-3]
                    #插入数据
                    try:
                        sql='insert into recruitment(Web,Url,Job,MinPay,MaxPay,Education,Experience,Company) '\
                                         'values("{:}","{:}","{:}","{:}","{:}","{:}","{:}","{:}")'.format('齐鲁人才网',HTMLUrl,
                                         jobname,salary[0],salary[1],education,exp,company)
                        cur.execute(sql)
                        self.conn.commit()
                    except:
                        continue




