import os
from typing import Generator

import requests
from lxml import etree

from xunter_utils.xunter_requests import ChromeClient

"""
方法二
做两次请求，先抓取登录请求，传入账号和密码，得到响应的Set-Cookie或token
再抓取请求数据得到需要抓取的数据
"""

class MovieItem:
    title:str
    content:str


class Login2Spider(ChromeClient):
    host = 'https://www.vcg.com'

    def login(self, username: str, password: str) -> None:
        self.send_request_post(f'{self.host}/graphql/login',json={
            'host':'www.vcg.com',
            'ip':'218.75.229.71',
            'username': username,
            'password': password,
        })
        #这里通过会话维持就保存了Set-Cookie
        """
        看传递的参数确认是传入data还是json
        如果是Form Data  就传data
        如果是Request Payload  就传json
        """

    def parse(self,response:requests.Response)-> Generator:
        """
        解析数据
        :param response:
        :return:
        """
        html = etree.HTML(response.text)
        # cards = html.xpath('//div[@class="m-t is-hover-shadow"]')   #xpath属性值一般必须写全
        cards = html.xpath('//div[contains(@class,"m-t is-hover-shadow")]')   #模糊查询  contains包含
        for card in cards:
            yield card.xpath('.//a[@class="name"]/@href')[0]

    def parse_detail(self,response:requests.Response)-> Generator:
        html = etree.HTML(response.text)
        content_div = html.xpath('//div[@class="item el-row"]')[0]

        item = MovieItem()
        item.title = content_div.xpath('.//h2[@class="m-b-sm"]/text()')[0]
        item.content = content_div.xpath('.//div[@class="drama"]/p/text()')[0].strip()
        yield item

    def save_item(self,filename:str,item:MovieItem) -> None:
        """
        保存数据
        :param filename:存放的文件夹名称
        :param item: 数据
        :return:
        """
        with open(filename,'a',encoding='utf-8') as f:
            f.write(item.title+'\n')
            f.write(item.content+'\n')

if __name__ == '__main__':
    client = Login2Spider()
    client.login('15116366954','wxy123456')
    res = client.send_request('https://www.vcg.com/workstation/overview/?pageNo=1&pageSize=10&downloadedType=1&downloadBeginDate=2024-09-07&downloadEndDate=2024-12-07')
    # for detail_url in client.parse(res):
    #     res = client.send_request(client.host + detail_url)
    #     for i in client.parse_detail(res):
    #         client.save_item('details.txt',i)
    print(res.text)
