import csv
import json
import os.path
from urllib.parse import urlparse, parse_qs

import django

os.environ.setdefault("DJANGO_SETTINGS_MODULE", "boss直聘数据可视化分析.settings")
django.setup()
import pandas as pd

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from myApp.models import *


def normalize_url(url):
    """移除 URL 中的动态参数，仅保留静态部分"""
    parsed_url = urlparse(url)
    # 仅保留协议、主机和路径部分
    return f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"

class Spider(object):
    def __init__(self, type, page):
        self.type = type  # 岗位关键字
        self.page = page  # 页码数
        self.spiderUrl = "https://www.zhipin.com/web/geek/job?query=%s&city=100010000&page=%s"

    def startBrowser(self):
        options = webdriver.EdgeOptions()
        options.add_experimental_option('excludeSwitches', ['enable-automation'])
        options.add_argument('--disable-blink-features=AutomationControlled')
        options.add_argument(
            'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0')
        browser = webdriver.Edge(options=options)
        return browser

    def main(self, max_page):
        if self.page > max_page:
            print("所有页面已爬取完毕")
            return

        browser = self.startBrowser()
        try:
            while self.page <= max_page:
                print(f"正在爬取页面的路径：{self.spiderUrl % (self.type, self.page)}")
                browser.get(self.spiderUrl % (self.type, self.page))

                # 等待页面加载完成
                try:
                    WebDriverWait(browser, 30).until(
                        EC.presence_of_element_located((By.XPATH, '//ul[@class="job-list-box"]/li'))
                    )
                except TimeoutException:
                    print(f"页面加载超时，跳过第 {self.page} 页")
                    self.page += 1
                    continue

                job_list = browser.find_elements(by=By.XPATH, value='//ul[@class="job-list-box"]/li')  # 30个li

                for index, job in enumerate(job_list):
                    try:
                        job_data = []
                        print(f'正在爬取第{index + 1}个数据')

                        # title
                        title = job.find_element(by=By.XPATH,
                                                 value='.//a[@class="job-card-left"]/div[@class="job-title clearfix"]/span[@class="job-name"]').text

                        # address
                        addresses = job.find_element(by=By.XPATH,
                                                     value='.//a[@class="job-card-left"]/div[@class="job-title clearfix"]/span[@class="job-area-wrapper"]/span[@class="job-area"]').text.split(
                            '·')
                        address = addresses[0]

                        # dist
                        if len(addresses) > 1:
                            dist = addresses[1]
                        else:
                            dist = ''

                        # type
                        type_ = self.type

                        tag_list = job.find_elements(by=By.XPATH,
                                                     value='.//a[@class="job-card-left"]/div[@class="job-info clearfix"]/ul[@class="tag-list"]/li')
                        if len(tag_list) == 2:  # 正常岗位
                            # educational
                            educational = tag_list[1].text
                            # workExperience
                            work_experience = tag_list[0].text
                        else:  # 实习岗
                            # educational
                            educational = tag_list[2].text
                            # workExperience
                            work_experience = ''

                        # hrName
                        hr_name = job.find_element(by=By.XPATH,
                                                   value='.//a[@class="job-card-left"]/div[@class="job-info clearfix"]/div[@class="info-public"]').text

                        # hrWork
                        hr_work = job.find_element(by=By.XPATH,
                                                   value='.//a[@class="job-card-left"]/div[@class="job-info clearfix"]/div[@class="info-public"]/em').text

                        # workTag
                        work_tag = job.find_elements(by=By.XPATH,
                                                     value='.//div[@class="job-card-footer clearfix"]/ul[@class="tag-list"]/li')
                        work_tag = json.dumps(list(map(lambda tag: tag.text, work_tag)), ensure_ascii=False)

                        # internship
                        internship = 0

                        # salary
                        salaries = job.find_element(by=By.XPATH,
                                                    value='.//a[@class="job-card-left"]/div[@class="job-info clearfix"]/span[@class="salary"]').text
                        if 'K' in salaries:  # 正常岗位
                            salaries = salaries.split('·')
                            # salary
                            salary = list(map(lambda x: int(x) * 1000, salaries[0].replace('K', '').split('-')))
                            if len(salaries) == 1:
                                # salaryMonth
                                salary_month = '0薪'
                            else:
                                # salaryMonth
                                salary_month = salaries[1]
                        elif '元/天' in salaries:  # 实习岗位
                            internship = 1
                            # salary
                            salary = list(map(lambda x: int(x), salaries.replace('元/天', '').split('-')))
                            # salaryMonth
                            salary_month = '0薪'
                        elif '元/时' in salaries:  # 兼职
                            # salary
                            salary = list(map(lambda x: int(x), salaries.replace('元/时', '').split('-')))
                            # salaryMonth
                            salary_month = '0薪'
                        elif '元/周' in salaries:  # 兼职
                            # salary
                            salary = list(map(lambda x: int(x), salaries.replace('元/周', '').split('-')))
                            # salaryMonth
                            salary_month = '0薪'
                        elif '元/月' in salaries:  # 兼职
                            # salary
                            salary = list(map(lambda x: int(x), salaries.replace('元/月', '').split('-')))
                            # salaryMonth
                            salary_month = '0薪'

                        # companyTitle
                        company_title = job.find_element(by=By.XPATH,
                                                         value='.//div[@class="job-card-right"]/div[@class="company-info"]/h3').text

                        # companyAvatar
                        try:
                            company_avatar = job.find_element(by=By.XPATH,
                                                              value='.//div[@class="job-card-right"]/div[@class="company-logo"]/a/img').get_attribute(
                                'src')
                        except NoSuchElementException:
                            company_avatar = '无'

                        company_infos = job.find_elements(by=By.XPATH,
                                                          value='.//div[@class="job-card-right"]/div[@class="company-info"]/ul[@class="company-tag-list"]/li')
                        if len(company_infos) == 3:
                            # companyNature
                            company_nature = company_infos[0].text
                            # companyStatus
                            company_status = company_infos[1].text
                            # companyPeople
                            company_peoples = company_infos[2].text
                            if company_peoples != '10000人以上':
                                company_people = list(
                                    map(lambda x: int(x), company_peoples.replace('人', '').split('-')))
                            else:
                                company_people = [0, 10000]
                        else:
                            # companyNature
                            company_nature = company_infos[0].text
                            # companyStatus
                            company_status = '未融资'
                            # companyPeople
                            company_peoples = company_infos[1].text
                            if company_peoples != '10000人以上':
                                company_people = list(
                                    map(lambda x: int(x), company_peoples.replace('人', '').split('-')))
                            else:
                                company_people = [0, 10000]

                        # companyTags
                        try:
                            company_tags = job.find_element(by=By.XPATH,
                                                            value='.//div[@class="job-card-footer clearfix"]/div[@class="info-desc"]').text
                            if not company_tags:
                                company_tags = '无'
                            else:
                                company_tags = json.dumps(company_tags.split('，'), ensure_ascii=False)
                        except NoSuchElementException:
                            company_tags = '无'

                        # detailUrl
                        detail_url = job.find_element(by=By.XPATH, value='.//a[@class="job-card-left"]').get_attribute(
                            'href')

                        # companyUrl
                        company_url = job.find_element(by=By.XPATH,
                                                       value='.//div[@class="company-info"]/h3/a').get_attribute('href')

                        job_data.append(title)
                        job_data.append(address)
                        job_data.append(type_)
                        job_data.append(educational)
                        job_data.append(work_experience)
                        job_data.append(work_tag)
                        job_data.append(salary)
                        job_data.append(salary_month)
                        job_data.append(company_tags)
                        job_data.append(hr_work)
                        job_data.append(hr_name)
                        job_data.append(internship)
                        job_data.append(company_title)
                        job_data.append(company_avatar)
                        job_data.append(company_nature)
                        job_data.append(company_status)
                        job_data.append(company_people)
                        job_data.append(detail_url)
                        job_data.append(company_url)
                        job_data.append(dist)

                        self.save_to_csv(job_data)
                    except Exception as e:
                        print(f"Error processing job {index + 1} on page {self.page}: {e}")

                self.page += 1
        finally:
            browser.quit()

    def clear_csv(self):
        df = pd.read_csv('./temp.csv')
        print("清理前数据量:", df.shape[0])

        # 只删除必填字段为空的行
        required_columns = ['title', 'address', 'type', 'salary']
        df.dropna(subset=required_columns, inplace=True)
        print("删除空值后的数据量:", df.shape[0])

        # 去重
        df.drop_duplicates(inplace=True)
        print("删除重复值后的数据量:", df.shape[0])

        # 处理字段映射，填充空值并避免错误
        df['salaryMonth'] = df['salaryMonth'].fillna('').map(lambda x: x.replace('薪', ''))

        print("最终清理后的数据量:", df.shape[0])
        return df.values

    def save_to_sql(self):
        data = self.clear_csv()
        for job in data:
            try:
                # 数据转换
                salary = json.dumps(job[6], ensure_ascii=False) if isinstance(job[6], list) else job[6]
                company_people = json.dumps(job[16], ensure_ascii=False) if isinstance(job[16], list) else job[16]
                internship = str(job[11])  # 转换为字符串
                normalized_detail_url = normalize_url(job[17])  # 调用独立函数，标准化 URL

                # 检查数据库中是否已经存在相同的记录
                exists = JobInfo.objects.filter(
                    title=job[0],
                    address=job[1],
                    type=job[2],
                    companyTitle=job[12],
                    detailUrl=normalized_detail_url  # 使用标准化 URL
                ).exists()

                if not exists:  # 如果不存在，则添加到数据库
                    new_job = JobInfo.objects.create(
                        title=job[0],
                        address=job[1],
                        type=job[2],
                        educational=job[3],
                        workExperience=job[4],
                        workTag=job[5],
                        salary=salary,
                        salaryMonth=job[7],
                        companyTags=job[8],
                        hrWork=job[9],
                        hrName=job[10],
                        internship=internship,
                        companyTitle=job[12],
                        companyAvatar=job[13],
                        companyNature=job[14],
                        companyStatus=job[15],
                        companyPeople=company_people,
                        detailUrl=normalized_detail_url,  # 保存标准化 URL
                        companyUrl=job[18],
                        dist=job[19] if len(job) > 19 else ''
                    )
                    print(f"新添加: {new_job}")
                else:
                    print(f"记录已存在，跳过: {job[0]}, {job[1]}, {job[2]}, {job[12]}, {normalized_detail_url}")
            except Exception as e:
                print(f"数据库写入失败: {job}, 错误信息: {e}")

    def save_to_csv(self, row_data):
        with open('./temp.csv', 'a', newline='', encoding='utf-8') as fp:
            writer = csv.writer(fp)
            writer.writerow(row_data)

    def init(self):
        if not os.path.exists('./temp.csv'):
            with open('./temp.csv', 'a', newline='', encoding='utf-8') as fp:
                writer = csv.writer(fp)
                writer.writerow(['title', 'address', 'type', 'educational', 'workExperience', 'workTag',
                                 'salary', 'salaryMonth', 'companyTags', 'hrWork', 'hrName', 'internship',
                                 'companyTitle','companyAvatar', 'companyNature', 'companyStatus',
                                 'companyPeople','detailUrl', 'companyUrl', 'dist'])


if __name__ == '__main__':
    spider_obj = Spider('go实习', 1)
    # spider_obj.init()
    # spider_obj.main(2)
    spider_obj.save_to_sql()



