# -*- coding = utf-8 -*-
# @Time : 2021/3/6 14:58
# @Author : kk_J
# @File: spider.py
# @Software: PyCharm
import urllib.request
import urllib.error

import xlwt
from bs4 import BeautifulSoup
import re


def getHtml(link):
    head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36 Edg/88.0.705.81"
    }
    request = urllib.request.Request(link, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode('utf-8')
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    bs = BeautifulSoup(html, "html.parser")
    return bs


def saveData(datalist, savepath):
    if len(datalist):
        tdata = xlwt.Workbook(encoding="utf-8")
        worksheet = tdata.add_sheet('sheet1', cell_overwrite_ok=True)
        col = ['数据名称', '价格', '规格', '详细信息', '浏览量']
        # 写入表头
        for index, th in enumerate(col):
            worksheet.write(0, index, th)
        for index_i, values in enumerate(datalist):
            for index_j, value in enumerate(values):
                worksheet.write(index_i+1, index_j, value)
        tdata.save(savepath)
        print("数据已保存")

# 数据获取
baseurl = 'https://www.tdata.cn'
recodes = []  # 记录总表
for page in range(1, 59):
    url = baseurl + '/temp/list/index/param/0-0-0/p/' + str(page) + ".html"
    bs0 = getHtml(url)
    a_links = bs0.select(".datalist .col-md-8 h4 a")

    # 进入每一条数据内部进行数据的提取
    for a_link in a_links:
        link = a_link.get('href')
        bs1 = getHtml(baseurl + link)
        recode = []  # 单条记录

        # 获取数据名称
        title = bs1.select('.datacontent_head .col-md-9 h3')[0].text
        # print(title, end='-----------')
        recode.append(title)
        print(title + "is getting")

        # 获取价格
        price = bs1.select('.api_price_frame .col-md-8 b')[0].text
        recode.append(price)

        # 规格
        specifications = bs1.select('.api_format_selected')
        specification = ''
        if len(specifications):
            specification = specifications[0].text.strip()
        else:
            specification = '无'
        recode.append(specification)

        # 获取数据详细信息
        describe = bs1.select('.api_tab_show')[0].text
        # print(describe)
        recode.append(describe)

        # 获取浏览量
        lookthrough = bs1.select('.api_lookthrough span')[0].text
        recode.append(lookthrough)

        recodes.append(recode)
        print(title + "recorded")
    print("page" + str(page) + "finished")

# 数据保存
saveData(recodes, 'tdata.xls')