# -*- coding: utf-8 -*-
# @Time: 2023-1-29 9:01
# @File: tools.py
# @IDE: PyCharm

import time
from lxml import etree
from multiprocessing.dummy import Pool
import pymysql

import os
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options

# 获取当前文件的目录
current_dir = os.path.dirname(os.path.abspath(__file__))
# 指定 chromedriver 的路径
driver_path = os.path.join(current_dir, 'chromedriver.exe')  # 确保 chromedriver 文件名是正确的


# city, all_page, spider_code
def lieSpider(key_word, city, all_page):
    """
    主函数，用于启动爬虫
    :param key_word: 搜索关键词
    :param city: 城市名称
    :param all_page: 需要爬取的页数
    """
    city_dict = {'全国': '410', '北京': '010', '上海': '020', '天津': '030', '重庆': '040', '广州': '050020',
                 '深圳': '050090',
                 '苏州': '060080', '南京': '060020', '杭州': '070020', '大连': '210040', '成都': '280020',
                 '武汉': '170020',
                 '西安': '270020'}
    # 生成需要爬取的URL列表
    urls_list = get_urls(key_word, all_page, city_dict.get(city, '410'))  # 默认为全国
    # 使用线程池进行多线程爬取
    pool = Pool(2)  # 适当增加线程数，但不宜过多以免被封IP
    pool.map(get_pages, urls_list)
    pool.close()
    pool.join()
    print("爬虫执行完成")


def get_urls(key_word, all_page, city_code):
    """
    生成需要爬取的URL列表
    :param key_word: 搜索关键词
    :param all_page: 需要爬取的页数
    :param city_code: 城市代码
    :return: URL列表
    """
    urls_list = []
    for page in range(1, int(all_page) + 1):
        url = f'https://www.liepin.com/zhaopin/?city={city_code}&dq={city_code}&currentPage={page}&pageSize=40&key={key_word}'
        urls_list.append(url)
    return urls_list


def get_city():
    """
    抓取城市列表及其对应的代码
    :return: 城市列表，每个元素为[城市名称, 城市代码]
    """
    print('开始抓取城市列表...')

    chrome_options = Options()
    chrome_options.add_argument('--headless')  # 无头模式
    chrome_options.add_argument('--disable-gpu')
    chrome_options.add_argument('--no-sandbox')

    # 使用 Service 指定 chromedriver 路径
    service = Service(driver_path)
    driver = webdriver.Chrome(service=service, options=chrome_options)

    try:
        driver.get('https://www.liepin.com/zhaopin/?inputFrom=head_navigation&scene=init&workYearCode=0&ckId=ayvlgrooqq8e4w2b3yoae69sd91dmbq9')
        time.sleep(3)
        req_html = etree.HTML(driver.page_source)
        code_list = req_html.xpath('//li[@data-key="dq"]/@data-code')
        name_list = req_html.xpath('//li[@data-key="dq"]/@data-name')
        city_list = [[name, code] for name, code in zip(name_list, code_list)]
        print('抓取到的城市列表:', city_list)
        return city_list
    except Exception as e:
        print('抓取城市列表失败:', e)
        return []
    finally:
        driver.quit()


def get_pages(url):
    """
    爬取单个页面的职位信息并存储到数据库
    :param url: 需要爬取的页面URL
    """
    mysql_conn = get_mysql()
    conn = mysql_conn[0]
    cur = mysql_conn[1]
    print(f'开始爬取 {url}...')

    chrome_options = Options()
    chrome_options.add_argument('--headless')
    chrome_options.add_argument('--disable-gpu')
    chrome_options.add_argument('--no-sandbox')

    service = Service(driver_path)
    driver = webdriver.Chrome(service=service, options=chrome_options)

    try:
        driver.get(url)
        time.sleep(3)
        req_html = etree.HTML(driver.page_source)

        # 提取职位信息
        name = req_html.xpath('//div[@class="jsx-2387891236 ellipsis-1"]/text()')
        salary = req_html.xpath('//span[@class="jsx-2387891236 job-salary"]/text()')
        address = req_html.xpath('//span[@class="jsx-2387891236 ellipsis-1"]/text()')
        education = req_html.xpath('//div[@class="jsx-2387891236 job-labels-box"]/span[2]/text()')
        experience = req_html.xpath('//div[@class="jsx-2387891236 job-labels-box"]/span[1]/text()')
        com_name = req_html.xpath('//span[@class="jsx-2387891236 company-name ellipsis-1"]/text()')
        tag_list = req_html.xpath('//div[@class="jsx-2387891236 company-tags-box ellipsis-1"]')
        href_list = req_html.xpath('//a[@data-nick="job-detail-job-info"]/@href')

        # 处理标签信息
        label_list = []
        scale_list = []
        for tag in tag_list:
            span_list = tag.xpath('./span/text()')
            if span_list:
                label_list.append(span_list[0])
                scale_list.append(span_list[-1])
            else:
                label_list.append('')
                scale_list.append('')

        # 确保所有列表长度一致
        lists = [name, salary, address, education, experience, com_name, label_list, scale_list, href_list]
        min_length = min(len(lst) for lst in lists)
        for lst in lists:
            lst[:] = lst[:min_length]

        # 插入数据库
        select_sql = 'SELECT href FROM job_data'
        cur.execute(select_sql)
        href_list_mysql = [x[0] for x in cur.fetchall()]

        insert_sql = '''INSERT INTO job_data(name, salary, place, education, experience, company, label, scale, href, key_word) 
                         VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'''

        for i in range(min_length):
            href = href_list[i].split('?')[0]
            if href not in href_list_mysql:
                data = (name[i], salary[i], address[i], education[i], experience[i], com_name[i], label_list[i], scale_list[i], href, url.split('=')[-1])
                try:
                    cur.execute(insert_sql, data)
                    conn.commit()
                    print(f'插入数据成功: {name[i]}')
                except Exception as e:
                    print(f'插入数据失败: {e}')
                    conn.rollback()
            else:
                print(f'数据已存在，跳过: {href}')

    except Exception as e:
        print(f'爬取页面 {url} 失败: {e}')
    finally:
        cur.close()
        conn.close()
        driver.quit()


def get_mysql():
    """
    连接MySQL数据库
    :return: 数据库连接和游标
    """
    try:
        conn = pymysql.connect(
            host='localhost',
            port=3306,
            user='root',
            passwd='123321qq',
            database='recommend_job',
            autocommit=True,
            charset='utf8mb4'
        )
        cur = conn.cursor()
        return conn, cur
    except Exception as e:
        print(f'连接数据库失败: {e}')
        return None, None


if __name__ == '__main__':
    lieSpider('java', '北京', '1')
