import scrapy
from scrapy import Request, FormRequest
from scrapy.http import JsonRequest

from scrapy_selenium import SeleniumRequest

# from scrapy.selector import HtmlXPathSelector
from scrapy import Spider
import json


"""
爬取 闲鱼 网站的信息
command: scrapy crawl goofish-spider -a title="prs se nf3" -o output.csv
"""


class GoofishSpider(scrapy.Spider):
    name = "goofish-spider"
    # 查询条件
    title = ""
    formData = {
        "pageNumber": 1,
        "keyword": "prs se nf3",
        "fromFilter": "false",
        "rowsPerPage": 30,
        "sortValue": "",
        "sortField": "",
        "customDistance": "",
        "gps": "",
        "propValueStr": "{}",
        "customGps": "",
        "searchReqFromPage": "pcSearch",
        "extraFilterValue": "{}",
        "userPositionJson": "{}",
    }
    headers = {
        "Accept": "application/json",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "content-type": "application/x-www-form-urlencoded; charset=UTF-8",
    }

    cookies = {
        "cna": "Ac6QIBhyJCoCAQAAAAB44Q54",
        "t": "02f5f2017120955781197619f9eb4f44",
        "cookie2": "1e74b6b295850bbec1e44e9eeb2b8fb5",
        "xlly_s": "1",
        "_samesite_flag_": "true",
        "_tb_token_": "ee54bbe64e3e3",
        "tracknick": "guitay",
        "unb": "71576485",
        "havana_lgc2_77": "eyJoaWQiOjcxNTc2NDg1LCJzZyI6ImJjOGM3MDI1YTJjMWM0Zjk4MmExMDFhNDI3YjY4Y2Q5Iiwic2l0ZSI6NzcsInRva2VuIjoiMVdoQWptbmNNeldnaHZBZFhwS0xGbncifQ",
        "_hvn_lgc_": "77",
        "havana_lgc_exp": "1749780257847",
        "_m_h5_tk": "ad54ba996a1f51d706f2da4e9bb0bc60_1747284412532",
        "_m_h5_tk_enc": "1dafb633bb2be9ce4f86cddab40fffd8",
        "sgcookie": "E1008s4h8AMLVo8D3tohcko7bUC32fG3QZs4NXnFjPAfAl3oQzKMLBWZSvL2RQv8Krrt2U8EIeNup3DtZpN%2FciGclmNeFLq2WyO0QtQlXKAjzNcDnJzwDfphnAs4YxakQS1K",
        "csg": "9872201e",
        "sdkSilent": "1747365731363",
        "tfstk": "gk0sax42HOX1LN4AGlAUNkXOwE4XTB8PGsNxZjQNMPUOHth8LAya7IYbHA2E7RlaW-Qj3juZ3qGclShxnC22sHlisr4vULkXzfcguwLrUpzvMXU4G9cqfTGisrfnXj0pUffbUDgUkrHYJkFbwrFYBGdCvS2dWSQTM9dQKJFAB-EAv6Fuw-QtHrdI9JVYk5HYXBGKTrC7Ghwi1CqfE_O6L7hTdZQxREq_FfIVkZ3Q1lGq6Jisf2N_f8EYe-eqWvlx7VqHqgzmT0MjDvLC5xGt2xeZJLQb30VjM7aJr_wK5jgQjDWOX7a_Cu3Tv_Ox_onIRPu9ngcEORE78D-HQozsC02uXHvnhXwZHVZdCpyq4XuTAYpFRxqjXANLygJOz8tdIi1QqZNQUBOCmiYvtI0FjXWOPlF31kRBOOn0XWVQUBOCmiqTt5weOB6tm",
    }

    custom_settings = {
        "ITEM_PIPELINES": {
            # 'goofisher.pipelines.TutorialPipeline': 300,
        },
        "middelwares": {},
    }

    start_urls = [
        "https://h5api.m.goofish.com/h5/mtop.taobao.idlemtopsearch.pc.search/1.0/?jsv=2.7.2&appKey=34839810&t=1747279332158&sign=47ed51008e9fc05092508f5f63f44f08&v=1.0&type=originaljson&accountSite=xianyu&dataType=json&timeout=20000&api=mtop.taobao.idlemtopsearch.pc.search&sessionOption=AutoLoginOnly&spm_cnt=a21ybx.search.0.0&spm_pre=a21ybx.home.searchInput.0",
        # "https://www.goofish.com/search?q=prs+se+nf3&spm=a21ybx.home.searchInput.0",
    ]

    def __init__(self, name=None, **kwargs):
        super().__init__(name, **kwargs)
        self.logger.debug("params==>" + str(kwargs))
        self.title = kwargs.get("title")
        self.formData["keyword"] = self.title

    def start_request(self):
        self.logger.debug("start_request........")
        for url in self.start_urls:
            yield Request(
                url=url,
                headers=self.headers,
                meta={"dont_redirect": True},
                method="POST",
                callback=self.parse,
                body=json.dumps(self.formData),
                formdata=json.dumps(self.formData),
                cookies=self.cookies,
            )

            # yield SeleniumRequest(
            #     url=url,
            #     callback=self.parse,
            #     wait_time=10,  # 等待页面加载
            #     # script='document.getElementById("username").value = "admin";',  # 执行JS代码
            # )

    def parse(self, response):
        # print('0---->', response.meta, '\n')
        # print('1---->', response.headers, '\n')
        # print('111---->', response.headers['Set-Cookie'], '\n')
        # print('2---->', response.headers.get('Set-Cookie'), '\n')
        # print('3---->', response.headers[b'Set-Cookie'], '\n')
        # print("type of response ===>", type(response))
        # print("text===>", response)
        # searchFeedList = response.xpath('//div[@data-spm="searchFeedList"]').extract()
        # print("searchFeedList===>", searchFeedList)

        # 从Selenium驱动中获取渲染后的页面源码
        print("response===>", response)
        # driver = response.meta['driver']
        # dynamic_content = driver.find_element_by_css_selector('div.result').text
        # yield {'content': dynamic_content}


#       yield Request(url=url, method='POST', meta=response.meta, headers=response.headers,cookies=response.headers['Set-Cookie'], data=body, callback=self.parseQueryData)


# if __name__ == "__main__":
#    # 示例 API（可替换为您需要请求的 URL）
#    test_url = "https://www.baidu.com"

#    # 调用函数获取数据
#    result = fetch_url(test_url)

#    # 可添加后续处理逻辑
#    if result:
# 示例：提取并展示特定字段
#        if isinstance(result, dict):
#            print("\n提取的字段：")
#            print(f"标题: {result.get('title', '')}")
#            print(f"内容: {result.get('body', '')[:50]}...")
