# -*- coding: utf-8 -*-
# 开发者：LKM
# 开发日期：2022/6/09 0010 22:39

import time
import warnings
import requests
import pandas as pd
from bs4 import BeautifulSoup

warnings.filterwarnings("ignore")


def timmer(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        res = func(*args, **kwargs)
        stop_time = time.time()
        print('Func %s, run time: %s' % (func.__name__, stop_time - start_time))
        return res
    return wrapper


class CrawlingInfo:
    def __init__(self, save_xlsx=False, sheet=848):
        """
        采集工业园区信息
        :param save_xlsx: 是否保存为xlsx文件
        :param sheet: 页码，默认值为848
        """
        self.url = r"https://f.qianzhan.com/yuanqu/diqu/44/?pg={}"
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                          'Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE',
            'Content-Type': 'application/json'}
        self.sheet = sheet
        self.data_list = list()
        self.xml_transform_structure()

        if save_xlsx:
            data = pd.DataFrame(self.data_list).T
            data.columns = ["序号", "名称", "省份", "城市", "地区", "详细地址", "大约面积(亩)", "企业数"]
            data.to_excel("产业园区信息表.xlsx", index=False)
        else:
            with open(r"产业园区信息表.txt", "a+") as f:
                for info_list in self.data_list:
                    f.write(" ".join(info_list))

    @timmer
    def xml_transform_structure(self):
        for page_number in range(1, self.sheet, 1):
            print('正在采集第{}页，请稍后！'.format(page_number))
            url = self.url.format(page_number)
            try:
                response = requests.get(url, headers=self.headers, timeout=5)
                status = response.status_code
                if status == 200:
                    html_comment = response.content.decode("utf-8")
                    soup = BeautifulSoup(html_comment, 'lxml')
                    for index in range(1, 21, 1):
                        info = soup.find_all(name="tr")[index]  # 信息
                        order_number = info.find_all(name='td')[0].get_text()  # 序号
                        name = info.find_all(name='td')[1].get_text()  # 工业园名称
                        province = info.find_all(name='td')[2].get_text()  # 省份
                        city = info.find_all(name='td')[3].get_text()  # 城市
                        district = info.find_all(name='td')[4].get_text()  # 地区
                        address = info.find_all(name='td')[5].get_text()  # 详细地址
                        area = info.find_all(name='td')[6].get_text()  # 大约面积(亩)
                        enterprise_number = info.find_all(name='td')[7].get_text()  # 企业数
                        data = [order_number, name, province, city, district, address, area, enterprise_number]
                        self.data_list.append(data)
            except Exception as e:
                with open("error.txt", "a+") as f:
                    f.write(str(e) + "   错误页码{}".format(page_number) + "\n")
                pass

            time.sleep(1.5)  # 休眠1.5秒


if __name__ == "__main__":
    CrawlingInfo()
