# -*- coding: UTF-8 -*-
# Author: Damon(wuud1)
# CreateDate: 
# Message:

from untils.requestsTools import requestsTools
import os
from lxml import etree
import pymongo

class dpacSpider:

    def __init__(self):
        self.basePath = os.getcwd() + os.sep + '../dapcHtml'
        if not os.path.exists(self.basePath):
            os.makedirs(self.basePath)
        self.indexUrl = 'http://wap.dpac.gov.cn/qczh/qczhgg1/index{}.html'
        self.rTools = requestsTools(self.basePath)
        self.rTools.headers = {
            "Host": "wap.dpac.gov.cn",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Language": "zh-CN,zh;q=0.9",
            # "": "",
        }
        self.mon = pymongo.MongoClient(host='192.168.10.222', port=27017)["dpac"]["dpac"]
        self._set = []

    def parse_save(self, doc):
        tail_tr_list = doc.xpath('//div[@class="table"]/table/tbody/tr')
        loop_tr = tail_tr_list[4:-9]
        make = tail_tr_list[0].xpath('./td[2]/text()')[0].strip()  # 生厂商
        print(make)
        date = tail_tr_list[1].xpath('./td[2]/text()')[0].strip().replace("-", "").replace("至", "-")
        print(date)
        count = tail_tr_list[2].xpath('./td[2]/text()')[0].strip().replace("辆", "")
        print(count)
        info_l = []
        for tr in loop_tr:
            model = tr.xpath('./td[1]/text()')[0].strip()
            name = tr.xpath('./td[2]/text()')[0].strip()
            years = tr.xpath('./td[3]/text()')[0].strip()
            qty = tr.xpath('./td[4]/text()')[0].strip()
            time_start_end = tr.xpath('./td[5]//text()')
            time_s = time_start_end[0].replace("起：", "").strip().replace("-", "")
            time_e = time_start_end[1].replace("止：", "").strip(' \n').replace("-", "")
            vin_start_end = tr.xpath('./td[6]//text()')
            vin_s = vin_start_end[0].replace("起：", "").strip()
            vin_e = vin_start_end[1].replace("止：", "").strip(' \n')
            _dict = {
                "model": model,
                "name": name,
                "years": years,
                "qty": qty,
                "time_s": time_s,
                "time_e": time_e,
                "vin_s": vin_s,
                "vin_e": vin_e,
            }
            info_l.append(_dict)
        A = tail_tr_list[-9].xpath('./td[2]/text()')[0].strip()
        print(A)
        B = tail_tr_list[-8].xpath('./td[2]/text()')[0].strip()
        print(B)
        C = tail_tr_list[-7].xpath('./td[2]/text()')[0].strip()
        print(C)
        D = tail_tr_list[-6].xpath('./td[2]/text()')[0].strip()
        print(D)
        E = tail_tr_list[-5].xpath('./td[2]/text()')[0].strip()
        print(E)
        F = tail_tr_list[-4].xpath('./td[2]/text()')[0].strip()
        print(F)
        G = tail_tr_list[-3].xpath('./td[2]/text()')[0].strip()
        print(G)
        H = tail_tr_list[-2].xpath('./td[2]/text()')[0].strip()
        print(H)
        I = tail_tr_list[-1].xpath('./td[2]/text()')[0].strip()
        print(I)
        other = {
            "A": A,
            "B": B,
            "C": C,
            "D": D,
            "E": E,
            "F": F,
            "G": G,
            "H": H,
            "I": I,
        }
        _fina = {
            "make": make,
            "date": date,
            "count": count,
            "info": info_l,
            "other": other,
        }
        # print(_fina)
        self.mon.insert(_fina)

    def parseDoc(self, li):
        title = li.xpath('./a/@title')[0]
        docUrl = li.xpath('./a/@href')[0]
        docDate = li.xpath('./span/text()')[0]
        url = 'http://wap.dpac.gov.cn/qczh/qczhgg1' + docUrl.strip('.')
        print(title, url, docDate)
        fileName = docUrl.split("/")[-1].replace(".html", "")
        print(fileName)
        resInfo = self.rTools.parseUrl(url=url, fileName=fileName)
        doc = etree.HTML(resInfo["content"])
        try:
            self.parse_save(doc)
        except:
            self._set.append(url)

    def run(self):
        countPage = 59
        nextPage = 1
        while nextPage:
            print("第{}页".format(nextPage))
            if nextPage == 1:
                url = self.indexUrl.format("")
            else:
                url = self.indexUrl.format("_{}".format(nextPage))
            fileName = "index_{}".format(nextPage)
            if nextPage >= countPage:
                break
            else:
                nextPage += 1
            resInfo = self.rTools.parseUrl(url=url, fileName=fileName)
            doc = etree.HTML(resInfo["content"])
            # 进入下一层级
            # 公告标题列表
            titleList = doc.xpath('//div[@class="boxl_ul"]/ul/li')
            for li in titleList:
                self.parseDoc(li)

            # 翻页

        print("end...")
        print(self._set)


if __name__ == '__main__':
    tools = dpacSpider()
    tools.run()
