import threading
import requests
from queue import Queue
from lxml import etree
from pymysql import connect
from BossZhipin.proxy_ip import ProxyIpSpider


class BossSpider:

    def __init__(self):
        self.start_url = 'https://www.zhipin.com/job_detail/?query=&scity=101020100&industry=&position=100109'
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36"
        }
        self.conn = connect(host='localhost', port=3306, user='root', password='root',
                            database='boss_spider', charset='utf8')
        self.cursor = self.conn.cursor()
        self.domain = 'https://www.zhipin.com'
        self.job_list_page_quene = Queue()
        self.job_quene = Queue()
        self.company_quene = Queue()
        self.proxy_ip = ProxyIpSpider()

    # 地区队列生产者：生成初始地区队列，供职位队列和公司队列的生产者使用
    def produce_area(self):
        html_str = requests.get(self.start_url, headers=self.headers, proxies=self.proxy_ip.get_random_ip()).content.decode()
        element = etree.HTML(html_str)
        area_list = element.xpath('//dl[@class="condition-district show-condition-district"]/dd/a')
        del area_list[0]
        for area in area_list:
            area_name = area.xpath('./text()')[0]
            area_url = self.domain + area.xpath('./@href')[0]
            self.job_list_page_quene.put({
                'area_name': area_name,
                'page_url': area_url
            })
        print('地区队列已经全部添加')

    # 职位队列和公司队列的生产者( 职位进职位队列，等待采集; 公司进公司队列，等待采集 )
    def produce_job_company(self):
        while True:
            item = self.job_list_page_quene.get()
            print(item)
            area_name = item['area_name']
            page_url = item['page_url']
            html_str = requests.get(page_url, headers=self.headers, proxies = self.proxy_ip.get_random_ip()).content.decode()
            element = etree.HTML(html_str)
            job_list = element.xpath('//div[@class="job-list"]/ul/li')
            for job in job_list:
                job_url = self.domain + job.xpath('./div[@class="job-primary"]/div[@class="info-primary"]/h3/a/@href')[0]
                company_url = self.domain + job.xpath('.//div[@class="company-text"]/h3/a/@href')[0]
                # 保存职位的基本信息
                job_id, company_id = self.save_job_base_info(job_url, company_url, area_name)
                # 推进队列
                if job_id is not None:
                    self.job_quene.put({'job_id': job_id, 'job_url': job_url})
                if company_id is not None:
                    self.company_quene.put({'company_id': company_id, 'company_url': company_url})
            self.job_list_page_quene.task_done()
            # 检测是否有下一页
            next_url = element.xpath('.//div[@class="page"]/a[last()]/@href')
            if len(next_url) > 0:
                page_url = next_url[0]
                if page_url != 'javascript:;':
                    self.job_list_page_quene.put({
                        'area_name': area_name,
                        'page_url': self.domain + page_url
                    })

    # 保存职位的基本信息，返回职位id和公司id
    def save_job_base_info(self, job_url, company_url, area_name):
        # 判断职位是否已添加
        sql = 'select * from job where job_url="%s"' % job_url
        self.cursor.execute(sql)
        db_job = self.cursor.fetchone()
        if db_job is not None:
            # 数据库已经存在当前职位
            return None, None
        # 判断公司是否已被添加
        sql = 'select * from company where company_url="%s"' % company_url
        self.cursor.execute(sql)
        db_company = self.cursor.fetchone()
        if db_company is not None:
            # 数据库已经存在当前公司
            new_company_id = None
            db_company_id = db_company[0]
        else:
            # 添加当前公司信息
            sql = 'insert into company (company_url) values ("%s")' % company_url
            self.cursor.execute(sql)
            self.conn.commit()
            db_company_id = new_company_id = self.cursor.lastrowid
        # 添加职位信息
        sql = f'insert into job (company,area,job_url) values ({db_company_id},"{area_name}","{job_url}")'
        self.cursor.execute(sql)
        new_job_id = self.cursor.lastrowid
        self.conn.commit()
        # 返回数据库中新增的信息
        return new_job_id, new_company_id

    # 消费者：职位
    def consume_job(self):
        while True:
            job = self.job_quene.get()
            job_id = job['job_id']
            job_url = job['job_url']
            html_str = requests.get(job_url, headers=self.headers).content.decode()
            element = etree.HTML(html_str)
            publish_time = element.xpath('.//span[@class="time"]/text()')[0][3:]
            job_name = element.xpath('//div[@class="job-primary detail-box"]/div[2]/div[2]/h1/text()')[0]
            job_salary = element.xpath('//div[@class="job-primary detail-box"]/div[2]/div[2]/span/text()')[0]
            # 薪资拆开
            job_salary = job_salary.split('-')
            job_salary_low = job_salary[0][:len(job_salary[0])-1]
            job_salary_high = job_salary[1][:len(job_salary[1])-1]
            job_condition = element.xpath('//div[@class="job-primary detail-box"]/div[2]/p/text()')
            job_city = job_condition[0].split('：')[1]
            job_experience = job_condition[1].split('：')[1]
            job_education = job_condition[2].split('：')[1]
            job_tags = ';'.join(element.xpath('//div[@class="job-primary detail-box"]//div[@class="job-tags"]/span/text()'))
            job_desc = ';'.join(element.xpath('//div[@class="detail-content"]/div[1]/div/text()'))
            job_desc = job_desc.strip()
            sql = f"update job set name='{job_name}',salary_low={job_salary_low},salary_high={job_salary_high}," \
                  f"work_experience='{job_experience}',education='{job_education}',city='{job_city}'," \
                  f"description='{job_desc}'," \
                  f"tags='{job_tags}',publish_time='{publish_time}' where id={job_id}"
            self.cursor.execute(sql)
            self.conn.commit()
            print('职位'+job_url+'已采集')
            # 更新队列
            self.job_quene.task_done()

    # 消费者：公司
    def consume_company(self):
        while True:
            company = self.company_quene.get()
            company_id = company['company_id']
            company_url = company['company_url']
            html_str = requests.get(company_url, headers=self.headers).content.decode()
            element = etree.HTML(html_str)
            company_name = element.xpath('//div[@class="job-sec company-business"]/h4/text()')[0]
            company_info = element.xpath('//div[@class="company-banner"]//div[@class="info-primary"]/p/text()')
            if len(company_info) == 3:
                company_type = company_info[2]
                company_size = company_info[1]
                company_finance = company_info[0]
            else:
                company_type = company_info[1]
                company_size = company_info[0]
                company_finance = "暂无信息"
            # 公司简介
            company_desc = ' '.join(element.xpath('//div[@class="detail-content"]/div[1]/div/text()'))
            company_desc = company_desc.strip()
            # 公司环境
            environments = ';'.join(element.xpath('//div[@class="job-sec picture-list"]/div/ul/li/img/@src'))
            # 公司工商信息
            reg_principal = element.xpath('//div[@class="job-sec company-business"]/div[1]/ul/li[2]/text()')[0]
            reg_time = element.xpath('//div[@class="job-sec company-business"]/div[1]/ul/li[3]/text()')[0]
            reg_address = element.xpath('//div[@class="job-sec company-business"]/div[1]/ul/li[6]/text()')[0]
            sql = f"update company set name='{company_name}',type='{company_type}',company_size='{company_size}'," \
                  f"company_finance='{company_finance}',description='{company_desc}',environments='{environments}'," \
                  f"reg_principal='{reg_principal}',reg_time='{reg_time}',reg_address='{reg_address}' " \
                  f"where id={company_id}"
            self.cursor.execute(sql)
            self.conn.commit()
            print('公司' + company_url + '已采集')
            # 更新队列
            self.company_quene.task_done()

    # 调度
    def run(self):
        thread_list = []  # 线程容器
        # 地区队列生产者的线程
        self.produce_area()
        # # 职位队列和公司队列的生产者的线程
        # t_produce_job_company = threading.Thread(target=self.produce_job_company)
        # thread_list.append(t_produce_job_company)
        # # 职位消费者的线程
        # t_consume_job = threading.Thread(target=self.consume_job)
        # thread_list.append(t_consume_job)
        # # 公司消费者的线程
        # t_consume_company = threading.Thread(target=self.consume_company)
        # thread_list.append(t_consume_company)
        #
        # for t in thread_list:
        #     t.setDaemon(True)  # 把线程设置为守护进程
        #     t.start()
        self.produce_job_company()
        self.consume_company()
        self.consume_job()

        for q in [self.job_list_page_quene, self.company_quene, self.job_quene]:
            q.join()  # 使队列阻塞主线程，直至队列全部被消耗

        print('主线程结束，采集结束')


if __name__ == '__main__':
    # boss = BossSpider()
    # boss.run()
    proxies = ProxyIpSpider().get_random_ip()
    print(proxies)
    res = requests.get('https://www.zhipin.com/', proxies=proxies)
    print(res.status_code)
