import scrapy
from ..items import BossItem
import time
import json

class BossSpider(scrapy.Spider):
    name = "boss"
    allowed_domains = ["www.zhipin.com"]
    start_urls = ["https://www.zhipin.com/"]
    page = 1

    def parse(self, response):
        with open('test.html', 'w', encoding='utf-8') as f:
            f.write(response.text)

        # 改进的XPath表达式
        li_list = response.xpath('//li[@class="job-card-wrapper"]')
        print(f"Number of items found: {len(li_list)}===============================================")

        for li in li_list:
            title = li.xpath(".//span[@class='job-name']/text()").extract_first() or ''
            salary = li.xpath(".//span[@class='salary']/text()").extract_first() or ''
            area = li.xpath(".//span[@class='job-area']/text()").extract_first() or ''
            
            informationUrl = li.xpath(".//a[@class='job-card-left']/@href").extract_first() or ''

            # 确保  提取job_lable_list的正确性
            job_lable_list = li.xpath(".//ul[@class='tag-list']//text()").extract()
            if len(job_lable_list) >= 2:
                experience = job_lable_list[0] or ''
                education = job_lable_list[1] or ''
            else:
                experience = ''
                education = ''

            company = li.xpath(".//h3[@class='company-name']/a/text()").extract_first() or ''

            # 确保提取company_message的正确性
            company_message = li.xpath(".//ul[@class='company-tag-list']//text()").extract()
            company_type = company_message[0] if company_message else ''

            # 提取boon字段
            boon = li.xpath('.//div[@class="job_card_footer"]//div[@class="info-desc"]/text()').extract()
            boon = boon[0] if boon else None
            # 技能
            skill_list = li.xpath(
                ".//div[@class='job-card-footer clearfix']//ul[@class='tag-list']/li/text()").extract() or []
            skill = "|".join(skill_list)
            # 创建BossItem对象并传递数据
            book = BossItem(
                title=title,
                address=area,
                salary=salary,
                experience=experience,
                education=education,
                company=company,
                companyType=company_type,
                skill_list=skill,
                content=f"https://www.zhipin.com{informationUrl}"
            )
            # yield book
            
            if informationUrl:
                print("发起详细信息请求")
                # 这里发起请求获取信息页面的内容，回调函数是parse_information
                urlNow = f"https://www.zhipin.com{informationUrl}"
                print(urlNow)
                yield scrapy.Request(
                    url=urlNow, 
                    callback=self.parse_information_miao,
                    meta={'page_number': self.page,'first_request': False,'book': book,'identity':f"https://www.zhipin.com{informationUrl}"}  # 将当前的book数据传递到回调函数
                )
            else:
                print("无详细信息")
                # 如果没有信息页链接，直接返回职位数据
                yield book

        if self.page < 10:
            self.page += 1
            next_url = f"https://www.zhipin.com/web/geek/job?query=java&city=101210100&page={self.page}"
            yield scrapy.Request(
                url=next_url,
                callback=self.parse,
                meta={'page_number': self.page, 'first_request': False}
            )
    def parse_information_miao(self, response):
            print("开始处理信息")
            print(response.status)
            if response.status == 200:
                print(f"成功返回页面: {response.url}")
            else:
                print(f"请求失败，状态码: {response.status}，URL: {response.url}")
            print("进入parse_information函数")
            # 从meta中获取传递的数据
            book = response.meta['book']
            postTitle = response.xpath(".//div[@class='info-primary']/div[@class='name']/h1/text()").extract_first() or ''
            positionContent = response.xpath(".//a[@class='text-desc text-city']/text()").extract_first() or ''
            seniorityContent = response.xpath(".//span[@class='text-desc text-experiece']/text()").extract_first() or ''
            DegreeContent = response.xpath(".//span[@class='text-desc text-degree']/text()").extract_first() or ''
            
            informationContent = response.xpath("string(.//div[@class='job-sec-text'])").extract_first() or ''
            peopleNumber = response.xpath(".//div[@class='sider-company']/p[3]/text()").extract_first() or ''
            industryContent = response.xpath(".//div[@class='sider-company']/p[4]/a/text()").extract_first() or ''
            locationContent = response.xpath(".//div[@class='location-address']/text()").extract_first() or ''
            content = {
            "post_title": postTitle or '无',
            "position_content": positionContent or '无',
            "seniority_content": seniorityContent or '无',
            "degree_content": DegreeContent or '无',
            "information_content": str(informationContent) or '无',
            "people_number": peopleNumber or '无',
            "industry_content": industryContent or '无',
            "location_content": locationContent or '无'
        }
            # 更新book中的字段
            book['content'] = json.dumps(content,ensure_ascii=False)

            # 返回更新后的book对象
            yield book