import requests
import execjs
from time import time
from queue import Queue
from concurrent.futures.thread import ThreadPoolExecutor
from jsonpath import jsonpath
from random  import choice
from threading import Lock
import json
import os

class XianYu:
    def __init__(self,name,page):
        self.name=name
        self.page=page
        self.dir=os.makedirs("d:/咸鱼/{}".format(self.name),exist_ok=True)
        self.lock=Lock() #锁机制的引入，是为了防止写冲突
        self.dirs=os.makedirs("d:/咸鱼",exist_ok=True)
        self.token='6f2b3bf593af973773bd2fd80373d591'
        # self.my_time=int(time()*1000)
        self.h='34839810'
        self.my_time=int(time()*1000)
        # 代理列表
        self.proxy=[]
        #多来几个请求头，做到一定频率反爬，另外，在这里我不做多代理，因为收费
        self.userAgent=[
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0',
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36 SLBrowser/9.0.5.9101 SLBChan/105 SLBVPV/64-bit"
        ]
        self.headers={
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
            "Referer": "https://www.goofish.com/",
            "Cookie": "t=81aa15a2e6b982780ecb1017fc3c80bb; cna=KpzZH+wytAECAXUWa5PRyxF8; xlly_s=1; tracknick=xy260453002738; unb=2217763581785; havana_lgc2_77=eyJoaWQiOjIyMTc3NjM1ODE3ODUsInNnIjoiNzVmMTY4ODFlZjRmMDIwYjRkOGNlYjY0MzFhMDQwMjkiLCJzaXRlIjo3NywidG9rZW4iOiIxREdNUnVlQjVpSjNDM0ZOUk43TjZOdyJ9; _hvn_lgc_=77; havana_lgc_exp=1736157502528; isg=BIyMXnWkB4v-GhPxIqZrtdnQXeq-xTBvVHX8V-ZM_DfacS17DtUY_5eKEXnJOWjH; mtop_partitioned_detect=1; _m_h5_tk=6f2b3bf593af973773bd2fd80373d591_1733638923701; _m_h5_tk_enc=b45883959a9709650e7aa2a1bccb209a; cookie2=1bc1e3892c3927d0184d94ad28515049; _samesite_flag_=true; sgcookie=E100V5t3e8xbivhgp8MCUGY%2FOIWEeffxbrHY9Z%2B9xYw5VD%2F0k7TBOU%2Fpq%2BImqz5iQuQPZlW7w6KvaLONosl8kxOAVKhp8BukcQas0vey0CI%2FGV0%3D; csg=31c984c5; _tb_token_=3fbb83e83a3d5; sdkSilent=1733717765666; tfstk=fbNS_61pJ3x7ro6PCeQqcGsdo0GINk1wOegLSydyJbhJvHUt0uorawfIvum4a0zrwU20bPdzaTceObcn9GSN_1zzrXcdBBs_C04Yo2d8pRnarzcHo8U3MGzkARkbCkhLvjHx-VhpvvnRlj3KPXd-pQpvDmmxvXH-2rdxr2R-J4nRlrnmJXnKwKCjuwo7PZrtnXMhk04-cCa0M4FHEznX9BFbhYi9VmOp9Sg7uPJZhC6EXRmrQcqAgQGQk2ZjQ8s6NkaLSJHbO3QzXyEuGbc1LaD_H-yZF-sJOmPrRAhKhedLlji0fSlO6ng0HzyLZuTB94Vz_viihwdnLjFZBREW-N4xw2EnIWSMZ0ULS52Z1_tiP-UsGg7X_c6JepTjIBgjbZ_XKp4qx1NEjDa3NY3muh7fl3Gxeq0jbZ_XKpD-oq7NlZtSM",
            "content-type": "application/x-www-form-urlencoded"
        }
        self.url = 'https://h5api.m.goofish.com/h5/mtop.taobao.idlemtopsearch.pc.search/1.0/'
        self.queue1=Queue() #请求参数队列,
        self.queue2=Queue() #文本存储队列
        self.queue3=Queue() #解析数据队列
        # 页数计数器
        self.count=1
    def getParameter(self):
        parameters=[
            {
                "data": json.dumps(
                    {"pageNumber": n+1, "keyword": f"{self.name}", "fromFilter": False, "rowsPerPage": 30, "sortValue": "",
                     "sortField": "", "customDistance": "", "gps": "", "propValueStr": {}, "customGps": "",
                     "searchReqFromPage": "pcSearch"}, ensure_ascii=False)
            }
            for n in range(self.page)
        ]
        # 多线程参数传递
        for parameter in parameters:
            self.queue1.put(parameter) #一次发一个
    def getSign(self,complete_compose_data):
        # 签名参数配凑过程
        # cookie+'&'+'时间戳'+'&'+'34839810'+'&'+'标签数据'
        # token='365d1ea3bb8ae0b575dc951b4020fdc0'
        # my_time=int(time()*1000) #13位的时间戳
        # h='34839810'  #版本号
        # datas={
        #     "data":json.dumps({"pageNumber":2,"keyword":"帽子","fromFilter":False,"rowsPerPage":30,"sortValue":"","sortField":"","customDistance":"","gps":"","propValueStr":{},"customGps":"","searchReqFromPage":"pcSearch"},ensure_ascii=False)
        # }
        # # 完整的compose的数据
        # complete_compose_data=token+'&'+str(my_time)+'&'+h+'&'+datas["data"]
        # print(complete_compose_data)
        file = ''
        with open('./sign.js', 'r', encoding='utf8') as f:
            file = f.read()
        ex = execjs.compile(file)
        # 获取签名信息
        sign=ex.call('getSign',complete_compose_data)
        return sign
    def sendRequest(self):
        while True:
            # 这个超时异常非常重要，因为，如果没有他的话，你就会出现无法退出的尴尬境地
            try:
                # print(self.count)
                data=self.queue1.get(timeout=5)
            except:
                break
            # my_time=int(time()*1000)
            complete_compose_data=self.token + '&' + str(self.my_time) + '&' + self.h + '&' + data["data"]
            sign=self.getSign(complete_compose_data)
            # 咸鱼这个爬虫，有点小技巧，就是他的查询参数中的时间戳要和参加md5加密的配凑参数中的时间戳保持一致，如果不保持一致，就会出现相应的错误！!!!!!!!!!!!!
            # 另外，我想说的是,token使用有有效期的，这个，如果出现过期，只需及时更换token即可
            paramers = {
                "jsv": "2.7.2",
                "appKey": "34839810",
                "t": self.my_time,
                "sign": sign,
                "v": "1.0",
                "type": "originaljson",
                "accountSite": "xianyu",
                "dataType": "json",
                "timeout": "20000",
                "api": "mtop.taobao.idlemtopsearch.pc.search",
                "sessionOption": "AutoLoginOnly",
                "spm_cnt": "a21ybx.search.0.0",
                "spm_pre": "a21ybx.home.searchInput.0"
            }
            # print(paramers)
            try:
                self.headers['User-Agent']=choice(self.userAgent) #利用python中的字典数据中的键是以引用地址进行存储的，因此，只改变引用地址所指向的值即能改变这个值
                res = requests.post(self.url, data=data, params=paramers,headers=self.headers)
            except:
                continue
            self.queue2.put(res.json())
            self.queue1.task_done()
    def parse_data(self):
        while True:
            try:
                json_text=self.queue2.get(timeout=5) #json数据
            except:
                break
            #以下是用来解析相应的数据，因为，我也不知道，以后需要什么数据，因此，我就把所有数据全部拿下来，以后用着方便,因此，这里的数据清理比较简单
            try:
                parse_text=jsonpath(json_text,"$..resultList")[0]
                # print(parse_text)
            except:
                continue
            self.queue3.put(parse_text)
            self.queue2.task_done()
    def save_data(self):
        while True:
            
                 try:
                    # print(self.count)
                    parse_text=self.queue3.get(timeout=5)
                 except:
                     break
                 # print("A")
                 with open(f"d:/咸鱼/{self.name}/第{self.count}页.json",'w',encoding='utf8') as f:
                        f.write(json.dumps(parse_text))
                        print(f"第{self.count}页已经完成!")
                        self.count+=1
                        self.queue3.task_done()
    # 多线程
    def run(self):
        with ThreadPoolExecutor(max_workers=10) as f:
            # 比例划分1:2:1:2  获取串:发请求:获取文本:保存数据
            f.submit(self.getParameter)
            f.submit(self.sendRequest)
            f.submit(self.sendRequest)
            f.submit(self.parse_data)
            f.submit(self.save_data)
            f.submit(self.save_data)
if __name__=='__main__':
    name=input("请输入你要爬取的商品名字:")
    pages=input("请输入你的页数:")
    xianyu=XianYu(name,int(pages))
    xianyu.run()



