# -*- coding: utf-8 -*-
# @Author : Ace
# @File : day02_centos_mongodb数据库.py
# @Software: PyCharm
"""
课题：
  Mongodb数据库的常规使用

知识点：
    1.centos操作系统
    2.centos操作系统安装mongodb数据库
    3.mongodb的常规命令
    4.mongodb与python的交互
    5.爬虫采集数据存储mongodb数据库
"""


"""
分析：
    商品的名称，商品的价格，发货地，销量，
    用户的输入：

报价：1500
"""


"""
if (d.H5Request === !0) {
    var f = "//" + (d.prefix ? d.prefix + "." : "") + (d.subDomain ? d.subDomain + "." : "") + d.mainDomain + "/h5/" + c.api.toLowerCase() + "/" + c.v.toLowerCase() + "/"
      , g = c.appKey || ("waptest" === d.subDomain ? "4272" : "12574478")
      , i = (new Date).getTime()
      , j = 
      , k = {
        jsv: w,
        appKey: g,
        t: i,
        sign: j
    }
    
后端：会话保持：保持登录状态访问每一个不同页面

h(d.token + "&" + i + "&" + g + "&" + c.data)
d.token: token值来自于cookie中
i: 时间戳
g: 12574478
c.data: 请求参数中的data

h： 加密手段：hashlib--md5加密方式

一个url地址的构成：
# 1.协议部分：https， http
# 2.域名部分：h5api.m.taobao.com
# 3.路径资源部分：/h5/mtop.alimama.union.xt.en.api.entry/1.0/
              D:\SpiderMp4\Python爬虫开发进阶
# 4.参数部分：?jsv=2.5.1&appKey=12574478&t=1646742042945&sign=3cb3fa5d614af86d773bfee89461
            从？开始，后面的数据都是参数部分
"""
from requests_html import HTMLSession
# 构造请求对象
session = HTMLSession()
from urllib.parse import quote, unquote
import time, re, hashlib
import os, xlwt, xlrd
from xlutils.copy import copy
cookie_str = """tracknick=%5Cu9A6C%5Cu5934%5Cu5C71%5Cu603B%5Cu7EDF; thw=cn; enc=fs86FfqmzoFD6NVLmu83Ek65vpyeYjkkIjO5fiZqJEXVdiwwAdv95JZDc%2F0Dz0qA3bE2zslYYNwXyTtyQ6pwBw%3D%3D; miid=5144610761588153279; t=0036afa14cfaa3edb1d99dbe4493e6bc; sgcookie=E100%2Bh%2BqZxHnFKuZOaZ223h3T9B9w4Rtx9Xh5EBPshk9vPq0evwlrzUhONbs3I%2BRJd2kRfF9jpYpP2lVzcSjUAWt%2Bob3nX1QI93ljvKkgHMZyeySat7idqvMihDs662OYiY9; uc3=nk2=odNi1ugSIEsG1Q%3D%3D&lg2=UtASsssmOIJ0bQ%3D%3D&vt3=F8dCvUFht7pP0IsGg98%3D&id2=UU20sOVQOL42mQ%3D%3D; lgc=%5Cu9A6C%5Cu5934%5Cu5C71%5Cu603B%5Cu7EDF; uc4=nk4=0%40o5sMsILjzh9Nu2uetVn566Dbcnyp&id4=0%40U2%2Fz9fEqZEDrEaYBXUVervyjgKsx; _cc_=VT5L2FSpdA%3D%3D; mt=ci=-1_0; _tb_token_=731e6e64639ee; cna=0MGsGgroNmYCAa8IMqrq/P7a; xlly_s=1; _m_h5_tk=8b2c8a7a84de9c8f6c265072f2a532e5_1646750476666; _m_h5_tk_enc=0555257ae1569965507b5e1d907037b7; cookie2=2a30fd6d2e880f9a827d5cb1e04aa1ad; tfstk=c78FBWfeF23elDQSDw_rdBZBFIIdZ06lhP5VxHIymAvgOOjhidwRs-3P__PaEMf..; l=eBMLbg-HgcmgWBPTBOfahurza77OvIRbjuPzaNbMiOCP9s5H522VW6muATYMCnGVHsp9R3yZPcGTBZ0vqyCSnxv9-b293Oex3dC..; uc1=cookie14=UoewBj166kY8KA%3D%3D; isg=BDEx7orO9-f1WFml0tHpE7n1QL3LHqWQaP1PQxNGLfgXOlGMW261YN9cXM5cyT3I"""


class TbSpider(object):

    def __init__(self):
        """
        爬虫原理的第一步：准备数据
        """
        self.user_input = input('请输入要查询的商品名称<例：礼物>:')
        # 准备请求的地址
        self.start_url = 'https://h5api.m.taobao.com/h5/mtop.alimama.union.xt.en.api.entry/1.0/'
        # 请求头
        self.headers = {
            'cache-control': 'no-cache',
            'cookie': cookie_str,
            'pragma': 'no-cache',
            'referer': 'https://uland.taobao.com/',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36'
        }

    def parse_start_url(self):
        """
        爬虫原理的第二步：发送请求，获取响应
        :return:
        """
        # 获取当前时间戳
        time_temp = str(int(time.time() * 1000))
        # for 循环模拟翻页
        for page in range(10):
            print(f'\n\n\n\n正在采集第{page+1}页-------logging！！！')
            # 拼接完整的地址
            data_dict = {"pNum":page,"pSize":"60","refpid":"mm_26632258_3504122_32538762","variableMap":"{\"q\":\"" + self.user_input + "\",\"navigator\":false,\"clk1\":\"e2dc4599fe505ee9261be1e478469611\",\"union_lens\":\"recoveryid:201_33.51.64.148_18646217_1646741114417;prepvid:201_33.51.64.148_18646217_1646741114417\",\"recoveryId\":\"201_33.8.67.233_19876166_1646742041186\"}","qieId":"36308","spm":"a2e0b.20350158.31919782","app_pvid":"201_33.8.67.233_19876166_1646742041186","ctm":"spm-url:a2e0b.20350158.search.1;page_url:https%3A%2F%2Fuland.taobao.com%2Fsem%2Ftbsearch%3Frefpid%3Dmm_26632258_3504122_32538762%26keyword%3D%25E7%25A4%25BC%25E7%2589%25A9%26clk1%3De2dc4599fe505ee9261be1e478469611%26upsId%3De2dc4599fe505ee9261be1e478469611%26spm%3Da2e0b.20350158.search.1%26pid%3Dmm_26632258_3504122_32538762%26union_lens%3Drecoveryid%253A201_33.51.64.148_18646217_1646741114417%253Bprepvid%253A201_33.51.64.148_18646217_1646741114417"}
            # url转码
            data_dict_str = quote(f"{data_dict}")
            # 请求参数部分的拼接
            params = f'?jsv=2.5.1&appKey=12574478&t={time_temp}&sign={self.parse_sign_data(time_temp, data_dict)}&api=mtop.alimama.union.xt.en.api.entry&v=1.0&AntiCreep=true&timeout=20000&AntiFlood=true&data='
            """最终请求地址的拼接"""
            url = self.start_url + params + data_dict_str
            response = session.get(url, headers=self.headers).json()
            self.parse_response_data(response)

    def parse_sign_data(self, time_temp, data_dict):
        """
        解析sign的生成
        :param time_temp: 时间戳
        :param data_dict: 查询请求参数data值
        :return:
        h(d.token + "&" + i + "&" + g + "&" + c.data)
        """
        token = str(re.findall('_m_h5_tk=(.*?)_', cookie_str)[0])
        result = token + "&" + time_temp + "&" + '12574478' + "&" + f"{data_dict}"
        sign = hashlib.md5(result.encode()).hexdigest()
        # print(sign)
        return sign

    def parse_response_data(self, response):
        """
        爬虫原理的第三步：解析响应，数据提取
        :param response:
        :return:
        """
        resultList = response['data']['recommend']['resultList']
        # 遍历列表
        for result in resultList:
            # 提取商品的名称
            shop_name = result['itemName']
            # 店铺的名称
            shop_title = result['shopTitle']
            # 发货地址
            provcity = result['provcity']
            # 详情页地址
            info_url = result['url']
            # 最低价格
            promotionPrice = result['promotionPrice']
            # 最高价格
            price = result['price']
            # 月销量
            monthSellCountFuzzyString = result['monthSellCountFuzzyString']
            # 构造保存模板代码data参数要求的格式
            data = {
                self.user_input: [shop_title, shop_name, provcity, promotionPrice, price, monthSellCountFuzzyString, info_url]
            }
            self.parse_save_data(data)
            print(f"商品：{shop_name}-------数据采集完成！！！")

    def parse_save_data(self, data):
        """
        使用前，请先阅读代码
        :param data: 需要保存的data字典(有格式要求)
        :return:
        格式要求:
            data = {
            '基本详情': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
        }
        """
        # 获取表的名称
        sheet_name = [i for i in data.keys()][0]
        # 创建保存excel表格的文件夹
        # os.getcwd() 获取当前文件路径
        os_mkdir_path = os.getcwd() + '/TB数据/'
        # 判断这个路径是否存在，不存在就创建
        if not os.path.exists(os_mkdir_path):
            os.mkdir(os_mkdir_path)
        # 判断excel表格是否存在           工作簿文件名称
        os_excel_path = os_mkdir_path + 'tb数据.xls'
        if not os.path.exists(os_excel_path):
            # 不存在，创建工作簿(也就是创建excel表格)
            workbook = xlwt.Workbook(encoding='utf-8')
            """工作簿中创建新的sheet表"""  # 设置表名
            worksheet1 = workbook.add_sheet(sheet_name, cell_overwrite_ok=True)
            """设置sheet表的表头"""
            sheet1_headers = ('店铺的名称', '商品的名称', '发货地址', '最低价格', '最高价格', '月销量', '详情页地址')
            # 将表头写入工作簿
            for header_num in range(0, len(sheet1_headers)):
                # 设置表格长度
                worksheet1.col(header_num).width = 2560 * 3
                # 写入表头        行,    列,           内容
                worksheet1.write(0, header_num, sheet1_headers[header_num])
            # 循环结束，代表表头写入完成，保存工作簿
            workbook.save(os_excel_path)
        """=============================已有工作簿添加新表==============================================="""
        # 打开工作薄
        workbook = xlrd.open_workbook(os_excel_path)
        # 获取工作薄中所有表的名称
        sheets_list = workbook.sheet_names()
        # 如果表名称：字典的key值不在工作簿的表名列表中
        if sheet_name not in sheets_list:
            # 复制先有工作簿对象
            work = copy(workbook)
            # 通过复制过来的工作簿对象，创建新表  -- 保留原有表结构
            sh = work.add_sheet(sheet_name)
            # 给新表设置表头
            excel_headers_tuple = ('店铺的名称', '商品的名称', '发货地址', '最低价格', '最高价格', '月销量', '详情页地址')
            for head_num in range(0, len(excel_headers_tuple)):
                sh.col(head_num).width = 2560 * 3
                #               行，列，  内容，            样式
                sh.write(0, head_num, excel_headers_tuple[head_num])
            work.save(os_excel_path)
        """========================================================================================="""
        # 判断工作簿是否存在
        if os.path.exists(os_excel_path):
            # 打开工作簿
            workbook = xlrd.open_workbook(os_excel_path)
            # 获取工作薄中所有表的个数
            sheets = workbook.sheet_names()
            for i in range(len(sheets)):
                for name in data.keys():
                    worksheet = workbook.sheet_by_name(sheets[i])
                    # 获取工作薄中所有表中的表名与数据名对比
                    if worksheet.name == name:
                        # 获取表中已存在的行数
                        rows_old = worksheet.nrows
                        # 将xlrd对象拷贝转化为xlwt对象
                        new_workbook = copy(workbook)
                        # 获取转化后的工作薄中的第i张表
                        new_worksheet = new_workbook.get_sheet(i)
                        for num in range(0, len(data[name])):
                            new_worksheet.write(rows_old, num, data[name][num])
                        new_workbook.save(os_excel_path)

if __name__ == '__main__':
    s = TbSpider()
    s.parse_start_url()




# aa = 'abcd{}efgh'
# cc = 1
# dd = 2
# def index(cc, dd):
#     ee = cc + dd
#     return ee
#
# f = index(cc, dd)
#
# result = aa.format(f)
# print(result)









































