import json
import re
import time
import hashlib
from requests_html import HTMLSession

'''
使用方法：先从淘宝使用开发者工具抓包搜索?jsv包，以复制bash格式复制包到https://curlconverter.com/#，获取cookies中的_m_h5_tk和_m_h5_tk_enc

'''
# 创建请求对象
session = HTMLSession()

class TbSpider(object):

    # 爬虫原理的第一步：准备数据
    def __init__(self):
        # 定义一个属性user_input,模仿用户搜索，存储关键字
        self.user_input = input('请输入需要采集的目标：<示例：家具>')
        self.user_input_sort = input('请输入排序方式：<示例：1>  0:_sale销售量 1:_coefp综合排序 2:_ratesum信用 3:bid价格从低到高 4:_bid价格从高到低')
        self.sort = {'0': '_sale', '1': '_coefp', '2': '_ratesum', '3': 'bid', '4': '_bid'}
        # self.user_input = '家具'
        # self.sort = 1
        self.search_url = 'https://h5api.m.taobao.com/h5/mtop.relationrecommend.WirelessRecommend.recommend/2.0'

        self.cookies = {
            '_m_h5_tk': '537970a9c6f00c3b375adb717a02d7c0_1692098361774',
            '_m_h5_tk_enc': 'aec143d5ae575143e58dd3f03f60d1f2',
        }

        # 请求头headers
        self.headers = {
            # 'authority': 'h5api.m.taobao.com',
            # 'accept': '*/*',
            # 'accept-language': 'zh-CN,zh;q=0.9',
            # 'referer': 'https://s.taobao.com/',
            # 'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
            # 'sec-ch-ua-mobile': '?0',
            # 'sec-ch-ua-platform': '"Windows"',
            # 'sec-fetch-dest': 'script',
            # 'sec-fetch-mode': 'no-cors',
            # 'sec-fetch-site': 'same-site',
            # 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
        }

    # 爬虫原理的第二步：发送请求，获取响应
    def parse_start_url(self):

        for page in range(1, 31):
            # 构造请求参数格式
            # params = {
            #     'data': {
            #         'data2': {
            #             'data3': {...}
            #         }
            #     }
            # }
            data3 = {
                "device": "HMA-AL00",
                "isBeta": "false",
                "grayHair": "false",
                "from": "nt_history",
                "brand": "HUAWEI",
                "info": "wifi",
                "index": "4",
                "rainbow": "",
                "schemaType": "auction",
                "elderHome": "false",
                "isEnterSrpSearch": "true",
                "newSearch": "false",
                "network": "wifi",
                "subtype": "",
                "hasPreposeFilter": "false",
                "prepositionVersion": "v2",
                "client_os": "Android",
                "gpsEnabled": "false",
                "searchDoorFrom": "srp",
                "debug_rerankNewOpenCard": "false",
                "homePageVersion": "v7",
                "searchElderHomeOpen": "false",
                "search_action": "initiative",
                "sugg": "_4_1",
                "sversion": "13.6",
                "style": "list",
                "ttid": "600000@taobao_pc_10.7.0",
                "needTabs": "true",
                "areaCode": "CN",
                "vm": "nw",
                "countryNum": "156",
                "m": "pc",
                "page": f"{page}",  # 翻页参数
                "n": 48,
                "q": f"{self.user_input}",  # 搜索关键词
                "tab": "all",
                "pageSize": 48,
                "totalPage": 100,
                "totalResults": 4800,
                "sourceS": "0",
                "sort": "_bid",   # 排序方式 _sale销售量 _coefp综合排序 _ratesum信用 bid价格从低到高 _bid价格从高到低
                "bcoffset": "",
                "ntoffset": "",
                "filterTag": "",
                "service": "",
                "prop": "",
                "loc": "",
                "start_price": "",
                "end_price": "",
                "startPrice": "",
                "endPrice": "",
                "itemIds": "",
                "p4pIds": "",
            }
            data2 = {
                "appId": "34385",
                "params": f'{data3}'
            }

            md5_h5_tk = self.cookies['_m_h5_tk']
            md5_h5_tk = md5_h5_tk.split('_')

            # sign加密参数e_md5的构造 r是token会过期，值为_m_h5_tk的前半部分
            r = f'{md5_h5_tk[0]}'
            t = f'{int(time.time())}'
            # t = '1692085139585'
            a = '12574478'
            n = f'{data2}'
            e = r + "&" + t + "&" + a + "&" + n
            md5 = hashlib.md5()  # 创建md5加密对象
            md5.update(e.encode('utf-8'))  # 指定需要加密的字符串（先编码utf-8）
            e_md5 = md5.hexdigest()  # 加密后的字符串

            # 将非固定值填充至params请求参数内
            params = {
                'jsv': '2.6.2',
                'appKey': '12574478',
                't': f'{int(time.time())}',
                'sign': e_md5,
                'api': 'mtop.relationrecommend.WirelessRecommend.recommend',
                'v': '2.0',
                'type': 'jsonp',
                'dataType': 'jsonp',
                'callback': 'mtopjsonp' + f'{page}',
                'data': f'{data2}',
            }

            # 请求失败时重新请求
            while True:
                # 传入地址发送get请求
                response = session.get(
                    url=self.search_url,
                    params=params,
                    cookies=self.cookies,
                    headers=self.headers
                )
                # time.sleep(1)
                # 类中，函数方法之间的调用
                if (self.parse_response_data(response, page) == False):
                    print('非法请求')
                else:
                    print(f'采集第{page}页成功')
                    # 暂停1秒，之后回到while
                    # time.sleep(1)
                    break

    # 爬虫原理的第三步：解析响应，数据提取
    def parse_response_data(self, response, page):
        print(f'正在爬取第{page}页数据')
        # 一步直接解析响应并进行数据提取，decode()默认utf-8
        html = response.content.decode()
        # 正则表达式匹配jsonp格式数据
        r_str = re.match(".*?({.*}).*", html, re.S).group(1)
        r_json = json.loads(r_str)
        if r_json['ret'] == ['FAIL_SYS_ILLEGAL_ACCESS::非法请求']:
            return False
        r2_json = r_json['data']['itemsArray']
        print('\n')
        # 构造保存格式（json格式）
        for data in r2_json:
            name = data['nick']
            price = data['priceWap']
            real_sales = data['realSales']
            title = data['title']
            procity = data['procity']
            item = {
                '店铺名': name,
                '显示最低价格': price,
                '月销售': real_sales,
                'title': title,
                '地区': procity
            }
            print(item)
            self.parse_save_data(item)

    # 爬虫原理的第四步：保存数据
    def parse_save_data(self, item):
        with open(f'淘宝{self.user_input}销售数据.txt', 'a', encoding='utf-8') as f:
            f.write(f'{item}')
            f.write('\n')
        return True


if __name__ == '__main__':
    s = TbSpider()
    s.parse_start_url()
