# -*- coding: UTF-8 -*-
# Author: Damon(wuud1)
# CreateDate: 
# Message: 公告网数据

import requests
import time
import os
from collections import OrderedDict
from lxml import etree

class ChinaCarWap:

    def __init__(self, brand):
        self.brand = brand
        self.basePath = os.getcwd()+'/../chinacarHtml'
        self.brandPath = os.getcwd()+'/../chinacarHtml/{}'.format(self.brand)
        if not os.path.exists(self.basePath):
            os.mkdir(self.basePath)
        if not os.path.exists(self.brandPath):
            os.mkdir(self.brandPath)
        self.baseUrl = 'http://wap.chinacar.com.cn'
        self.searchUrl = 'http://wap.chinacar.com.cn/search.html'
        self.listUrl = 'http://wap.chinacar.com.cn/ggcx_new/list.html'
        self.headers = {
            "Host": "wap.chinacar.com.cn",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
            # "": "",
        }
        self.cookies = {
            "Hm_lvt_6c1a81e7deb77ce536977738372f872a": "1547462413,1547692754",
            "Hm_lpvt_6c1a81e7deb77ce536977738372f872a": "1547703886",
            "PHPSESSID": "0s3sn1n8l4cqsecardgueaaoa4",
            "Hm_lvt_4643e19c6c22e7974ad707294b823b53": "1547703922",
            "clcp_list": "743214%7C743809%7C743893%7C",
            "Hm_lpvt_4643e19c6c22e7974ad707294b823b53": "",  # 每次请求置为当前时间戳
        }


    def get_nowTimeStr(self, len=10):
        return str(time.time())[:len]

    def getCookies(self, cookies):
        cookies['Hm_lpvt_4643e19c6c22e7974ad707294b823b53'] = self.get_nowTimeStr()
        return cookies

    def get_searchPostData(self):
        _postData = OrderedDict(
            s2="",
            s0="",
            s1=self.brand,
            s12="",
            s9="",
            s4="",
            s11="",
            s18="",
            s3="",
            s7="",
            s8="",
            s5="",
            s6="",
            s10="",
            ss_1="1",
            s_1="",
            ss_2="1",
            s_2="",
            ss_3="1",
            s_3="",
            ss_4="1",
            s_4="",
            ss_5="1",
            s_5="",
            ss_6="1",
            s_6="",
            ss_7="1",
            s_7="",
            ss_8="1",
            s_8="",
            ss_9="1",
            s_9="",
            s15="",
            s16="",
            s17="",
        )

        return _postData

    def get_postHeaders(self, referer):
        self.headers['Origin'] = 'http://wap.chinacar.com.cn'
        self.headers['Referer'] = referer

    def handle_dataList(self, dataList):
        for info in dataList:
            carCode = info.xpath('.//p[contains(text(), "整车型号")]/text()')
            carUrl = info.xpath('./a/@href')
            carCode = carCode[0] if carCode else ""
            carUrl = carUrl[0] if carUrl else ""
            if not carCode or not carUrl:
                continue
            carCode = carCode.split('：')[1]
            carUrl = self.baseUrl + carUrl
            carPath = self.brandPath + '/{}'.format(carCode)
            if not os.path.exists(carPath):
                os.mkdir(carPath)
            print(carCode, carUrl)
            response = requests.get(url=carUrl, headers=self.get_postHeaders(self.listUrl), cookies=self.getCookies(self.cookies), timeout=30)
            with open(carPath+'/'+self.get_nowTimeStr(10)+'.html', 'w') as h:
                h.write(response.content.decode('utf8'))


    def getData(self):
        # 获取公告查询界面
        _ = requests.get(url=self.searchUrl, headers = self.headers, cookies=self.getCookies(self.cookies), timeout=30)
        allPage = 10
        nowPage = 1
        while True:
            # 查询某品牌

            if nowPage != 1:
                self.listUrl = self.listUrl + '?page={}'.format(nowPage)
            searchResponse = requests.post(url=self.listUrl, data=self.get_searchPostData(), headers=self.get_postHeaders(self.searchUrl), cookies=self.cookies)
            # 获取当页数据列表
            searchResponse_html = etree.HTML(searchResponse.content.decode('utf8'))
            allPage = searchResponse_html.xpath('//ul[contains(@class, "mui-pagination")]/li[9]//text()')
            allPage = int(allPage[0]) if allPage else 71
            dataList = searchResponse_html.xpath('//li[@class="bb1"]')
            # 遍历请求及保存
            print("第{}页".format(nowPage))
            self.handle_dataList(dataList)
            nowPage += 1
            if nowPage > allPage:
                break
        print("end...")

    def run(self):
        try:
            self.getData()
        except Exception as e:
            print(e.__repr__())

if __name__ == '__main__':
    brandDict = {
        "changan": "长安"
    }
    brand = "changan"
    tools = ChinaCarWap(brandDict[brand])
    tools.run()

r = {
    "formatdata": "",
    "status": 1,
    "imgname": ""
}