# -*- coding = utf-8 -*-
# @Time : 2021/3/24 20:00
# @Author : kk_J
# @File: spider.py
# @Software: PyCharm
import json
import time
import urllib.request
import urllib.error
import requests

import xlwt
from bs4 import BeautifulSoup

param = {
    'sleep_time': 3,
    'list_length': 10
}


def save_data(datalist, save_path):
    data = xlwt.Workbook(encoding="utf-8")
    worksheet = data.add_sheet('sheet1', cell_overwrite_ok=True)
    col = ['数据名称', '价格', '规格(次)', '正面评价次数', '正确返回示例']
    try:
        if len(datalist):
            # 写入表头
            for index, th in enumerate(col):
                worksheet.write(0, index, th)
            # 写入数据
            for index_i, values in enumerate(datalist):
                for index_j, value in enumerate(values):
                    if value:
                        worksheet.write(index_i + 1, index_j, value)
    except Exception as e:
        data.save(save_path)
        print("-------------------------已保存，保存时有不知名错误，错误信息如下------------------------")
        print(e)
        print("----------------------------------------------------------------------------------")
    else:
        data.save(save_path)
        print("-------------------------已保存，保存时无错误------------------------")


def get_html(link):
    head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                      "Chrome/88.0.4324.182 Safari/537.36 Edg/88.0.705.81 "
    }
    request = urllib.request.Request(link, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode('utf-8')
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    bs = BeautifulSoup(html, "html.parser")
    return bs


baseurl = "https://market.baidu.com"
recodes = []
count = 0
files_number = 0

for page in range(1, 43):
    url = baseurl + "/list/0?keyword=&label=&cid=130&priceFrom=0&pageNo=" + str(page)
    bs0 = get_html(url)
    items = bs0.select('div.productList__recommend-1pfHj+div a')
    # 进入页中的每项数据进行信息爬取
    for item in items:
        recode = []
        try:

            itemUrl = baseurl + item.get('href')
            bs1 = get_html(itemUrl)
            jsonUrl = baseurl + '/api/market/web/product' + item.get("href")[7:] + '/detail'
            itemJson = json.loads(requests.get(jsonUrl).text)['result']['product']['productInfo']

            # 名称
            title = itemJson['title']
            # print("title:" + title)
            recode.append(title)

            # 价格/规格
            price = 0
            specification = 0

            item_prices = itemJson['priceInfo'][0]['periods']
            for item_price in item_prices:
                if item_price['price']:
                    price = item_price['price']
                    specification = item_price['time']
                    break
            # print("price:" + str(price))
            # print("specification:" + str(specification))
            recode.append(price)
            recode.append(specification)

            # 正面评价次数
            commentJsonUrl = jsonUrl + '/ratings?pageNo=1'
            commentCount = json.loads(requests.get(commentJsonUrl).text)['result']['totalCount']
            # print(commentCount)
            recode.append(commentCount)

            # 正确返回示例
            apiIdUrl = baseurl + '/api/market/web/api/groups' + item.get('href')[7:]
            apiId = json.loads(requests.get(apiIdUrl).text)['result']['apis'][0]['apiId']
            sampleItemUrl = baseurl + '/api/market/web/api/detail/productId=' + item.get('href')[8:] + '&apiId=' + apiId
            resultSample = json.loads(requests.get(sampleItemUrl).text)['result']['resultSample']
            # print(resultSample)
            recode.append(resultSample)

            # 将这条记录插入到记录总表
            recodes.append(recode)
            print('page:' + str(page) + ' ' + title + " finished")
            count = count + 1
            # 如果总表中数据超过list_length，则清空，防止变量溢出
            if count > param['list_length']:
                save_data(recodes, "./data/data" + str(files_number) + ".xls")
                files_number = files_number + 1
                count = 0
                recodes = []

            # 挂起，防止误判为恶意攻击
            time.sleep(param['sleep_time'])
        except Exception as e:
            print("------------------运行时不知名错误，保存已有数据，错误信息如下--------------------")
            print(e)
            print("---------------------------------------------------------------------------")
            recodes.append(recode)
            save_data(recodes, "./data/data" + str(files_number) + ".xls")
            recodes = []
            count = 0
            files_number = files_number + 1
