import requests
import json
from pymongo import MongoClient
class LittleRabbit:
    def __init__(self):
        # 准备车载用品类页面的URL
        self.init_url = 'https://apipc-xiaotuxian-front.itheima.net/category/goods/temporary'
        # 请求头
        self.headers = {
            "Content-Type": "application/json;charset=utf-8",
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)'
                          'AppleWebKit/537.36 (KHTML, like Gecko)'
                          'Chrome/90.0.4430.212 Safari/537.36'}
        # 连接MongoDB的客户端
        self.client = MongoClient('127.0.0.1', 27017)

    def load_category_page(self, page):
        """
        抓取车载用品类商品展示页面的数据
        :param page:待抓取的页码数
        :return:车载用品类下的所有商品
        """
        # 准备请求体
        request_payload = {"page": page, "pageSize": 20, "categoryId": "109243036"}
        # 将字典form_data转换为JSON字符串
        json_data = json.dumps(request_payload)
        response = requests.post(url=self.init_url, data=json_data,
                                 headers=self.headers)
        # 将服务器返回的JSON字符串先转换成字典，再获取字典中的商品信息
        all_goods = json.loads(response.text)["result"]["items"]
        return all_goods

    def load_detail_page(self, all_goods):
        """
        抓取商品详情页的数据
        :param all_goods: 车载用品类下的所有商品
        :return: 所有商品的详情信息
        """
        # 准备基本URL
        base_url = 'https://apipc-xiaotuxian-front.itheima.net/goods?'
        # 定义一个数组，保存所有商品的详情信息
        goods_detail_info = []
        for good_info in all_goods:
            # 提取商品的ID标识
            good_id = dict(id=good_info['id'])
            # 根据拼接商品详情页的完整URL，发送GET请求
            response = requests.get(url=base_url, params=good_id)
            # 将服务器返回的JSON数据转换为字典
            good_detail = json.loads(response.text)
            goods_detail_info.append(good_detail)
        return goods_detail_info

    def parse_page(self, detail_data):
        """
        解析商品详情页的数据，提取目标数据
        :param detail_data:所有商品的详情数据
        :return:所有商品的信息
        """
        # 定义一个列表，保存所有商品的信息
        all_goods_info = []
        temp_url = 'http://erabbit.itheima.net/#/product/'
        for info in detail_data:
            dict_data = dict()
            dict_data['商品名称'] = info['result']['name']
            dict_data['商品描述'] = info['result']['desc']
            dict_data['商品链接'] = temp_url + info['result']['id']
            dict_data['商品价格'] = info['result']['price']
            # 获取详情页面中的第一张图片
            dict_data['商品图片'] = info['result']['mainPictures'][0]
            good_detail = info['result']['details']['properties']
            dict_data['商品详情'] = ''.join([':'.join(info.values()) + '\n' for info in good_detail])
            all_goods_info.append(dict_data)
        return all_goods_info

    def save_data(self, goods_info):
        """
        存储商品详情的数据
        :param get_goods_info:
        """
        # 建立连接到本地的MongoDB
        client = self.client
        # 访问/创建数据库rabbit
        db = client.rabbi
        try:
            for good in goods_info:
                # 创建集合little_rabbit，并在该集合中插入文档对象
                db.little_rabbit.insert_one(good)
            print('保存成功')
            # 访问集合中的文档对象
            result = db.little_rabbit.find()
            for doc in result:
                print(doc)
        except Exception as error:
            print(error)

    def run(self):
        """
        启动网络爬虫，控制网络爬虫的执行流程
        """
        begin_page = int(input('起始页码:'))
        end_page = int(input('结束页码:'))
        if begin_page <= 0:
            print('起始页码从1开始')
        else:
            for page in range(begin_page, end_page + 1):
                print(f'正在抓取第{page}页')
                all_goods = self.load_category_page(page)
                goods_detail = self.load_detail_page(all_goods)
                goods_info = self.parse_page(goods_detail)
                self.save_data(goods_info)


if __name__ == '__main__':
    lr = LittleRabbit()
    lr.run()
