# -*- coding = utf-8 -*-
# @Time : 2021/3/7 13:20
# @Author : kk_J
# @File: spider.py
# @Software: PyCharm
import json
import urllib.request
import urllib.error

import xlwt
from scapy.all import *

import requests
from bs4 import BeautifulSoup

param = {
    'sleepTime': 3
}


def getHtml(link):
    head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36 Edg/88.0.705.81"
    }
    request = urllib.request.Request(link, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode('utf-8')
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
        print("error lins:" + link)
    bs = BeautifulSoup(html, "html.parser")
    return bs


def saveData(datalist, savepath):
    tdata = xlwt.Workbook(encoding="utf-8")
    worksheet = tdata.add_sheet('sheet1', cell_overwrite_ok=True)
    col = ['数据名称', '价格', '规格(次)', '字段', '下载量', '收藏量']
    try:
        if len(datalist):
            # 写入表头
            for index, th in enumerate(col):
                worksheet.write(0, index, th)
            # 写入数据
            for index_i, values in enumerate(datalist):
                for index_j, value in enumerate(values):
                    if value:
                        worksheet.write(index_i + 1, index_j, value)
    except Exception as e:
        tdata.save(savepath)
        print("-------------------------已保存，保存时有不知名错误，错误信息如下------------------------")
        print(e)
        print("----------------------------------------------------------------------------------")
    else:
        tdata.save(savepath)
        print("-------------------------已保存，保存时无错误------------------------")


baseurl = 'https://www.juhe.cn/'
recodes = []
tempTableId = 0
# 进入每一页进行搜索
for page in range(1, 13):
    bs1 = getHtml(baseurl + "docs/index/page/" + str(page))
    items = bs1.select("div.api-info>a")
    count = 0
    # 进入页内每个数据进行数据的提取
    for item in items:
        count += 1

        bs2 = getHtml(baseurl + item.get("href"))
        recode = []  # 单条数据
        if not bs2.select('.api-aid>a'):  # 如果找不到api接口的aid，说明接口正在维护，跳过
            continue
        try:
            # 提取数据标题
            title = bs2.select(".title-api h1")[0].string
            recode.append(title)
            print(title + "begin...")

            # 提取价格、规格,这个网页通过js进行渲染，需要通过网页返回的json数据才能提取价格信息
            price = 0
            specification = 0
            item_id = re.findall("/\d+$", item.get("href"))
            item_url = baseurl + "docs/api/packages" + item_id[0]
            item_jsContent = requests.get(item_url).text

            item_jsDict = json.loads(item_jsContent)
            packages = item_jsDict['result']['packages']
            # 找到第一个非免费的价格，如果列表为空，说明会员免费，会员免费时规格用-1表示
            if packages:
                for package in packages:
                    if package['price']:
                        price = package['price']
                        specification = package['renums']
            else:
                price = 0
                specification = -1
            recode.append(price)
            recode.append(specification)

            # 字段
            ziduan = ''
            aid = re.findall('/\d+$', bs2.select('.api-aid>a')[0].get('href'))
            ziduan_url = baseurl + item.get("href") + "/aid" + aid[0] + re.findall('/\d+$', item.get('href'))[0]
            ziduan_bs = BeautifulSoup(json.loads(requests.get(ziduan_url).text)['result']['html'], 'html.parser')
            simpleTables = ziduan_bs.select('.simpleTable')
            for simpleTable in simpleTables:
                if simpleTable.select('p') and simpleTable.select('p')[0].string == '返回参数说明：':
                    for tr in simpleTable.select('.api-table tr'):
                        if tr.select('td') and tr.select('td')[-1].string:  # 判断是否存在表格行，并且表格行内有内容
                            ziduan += tr.select('td')[-1].string + ','
            recode.append(ziduan)

            # 下载量
            apply_count = item_jsDict['result']['apiData']['applycount']
            recode.append(apply_count)

            # 收藏数
            favcount = item_jsDict['result']['apiData']['favcount']
            recode.append(favcount)

            recodes.append(recode)
            print(title + 'recorded[' + str(page) + '/' + str(count) + "/20]")
            time.sleep(param['sleepTime'])
        except Exception as e:
            print("------------------运行时不知名错误，保存已有数据，错误信息如下--------------------")
            print(e)
            print("---------------------------------------------------------------------------")
            recodes.append(recode)
            saveData(recodes, "temp/temp" + str(tempTableId) + ".xls")
            tempTableId += 1


    saveData(recodes, "jdata" + str(page) + ".xls")
    print("page " + str(page) + "saved")
