# -*- coding = utf-8 -*-
# @Time : 2021/3/3 13:54
# @Author : kk_J
# @File: spider.py
# @Software: PyCharm

import urllib.error
import urllib.request
from bs4 import BeautifulSoup
import xlwt
import re


def getHtml(link):
    head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36 Edg/88.0.705.81"
    }
    request = urllib.request.Request(link, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode('utf-8')
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    bs = BeautifulSoup(html, "html.parser")
    return bs


def saveData(datalist, savepath):
    if len(datalist):
        china_data_city = xlwt.Workbook(encoding="utf-8")
        worksheet = china_data_city.add_sheet('sheet1', cell_overwrite_ok=True)
        for index_i, values in enumerate(datalist):
            for index_j, value in enumerate(values):
                worksheet.write(index_i, index_j, value)
        china_data_city.save(savepath)
        print("数据已保存")


baseurl = "http://www.chinadatastore.cn/"

bs1 = getHtml(baseurl + "index.html")
classlink = bs1.select("html body div.warpper div#nav ul.tit li.mod_cate h2 a")
for index, class_ in enumerate(classlink):
    recodes = []
    for page in range(1, 4):
        classlink2 = baseurl + class_.get("href")
        classlink2 = re.sub('-\d', '-' + str(page), classlink2)
        bs2 = getHtml(classlink2)
        currentClass = bs2.select('ul.nav li')
        if (currentClass[-1].string == '外贸企业名录'):
            items = bs2.select("div.bk_pro>a")
            for item in items:
                classlink3 = baseurl + 'product/' + item.get("href")
                bs3 = getHtml(classlink3)
                currentClass = bs3.select('ul.nav li')
                recode = []
                data = bs3.select('div.pic_description table')
                if len(data) == 2:
                    data = data[1]
                    trs = data.find_all('tr')
                    for tr in trs[1:]:
                        tds = tr.find_all('td')
                        recode = [currentClass[-1].string]
                        for td in tds:
                            recode.append(td.string)
                recodes.append(recode)
                print(currentClass[-1].string + "finished")
        print("-----------------" + str(page) + "--------------------")
    saveData(recodes, "china_data_city.xls")
    print("class" + str(index) + "已经完成搜索")
