#encoding:utf8
import re
import json
import time
import requests
import codecs
import requests
try:
    import urlparse
except:
    from urllib.parse import urlparse
import logging
from selenium import webdriver
from lxml import html

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class AutohomePriceSpider(object):
    name = "price"

    detail_headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        #"Cookie": "sessionid=C1DE3E16-0AFF-4E67-95ED-17644FFE1B05%7C%7C2017-11-14+20%3A45%3A02.249%7C%7C0; fvlid=1510663596931m2XEqGgk0_; historybbsName4=c-314%7C%E6%9C%AC%E7%94%B0CR-V; Hm_lvt_9924a05a5a75caf05dbbfb51af638b07=1510663592; ahpau=1; ahpvno=13; sessionip=183.53.66.87; ref=blog.csdn.net%7C0%7C0%7C0%7C2017-12-25+22%3A15%3A58.295%7C2017-12-24+23%3A18%3A22.103; sessionvid=2EDD7B7C-7F25-405D-8748-188BE2C889A8; area=441999; ahrlid=15142114617715h6l6zb3Ck-1514211466512",
        "Host": "jiage.autohome.com.cn",
        "Referer": "https://jiage.autohome.com.cn/price/carlist/s-2313",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36"
    }

    excel = codecs.open("autohome.csv", "a+", 'utf_8_sig')
    excel.write(u"型号,指导价,购车总价,裸车价,付款方式,购车时间,购车地点,url\r\n")
    driver = webdriver.PhantomJS(executable_path='phantomjs.exe')


    def __init__(self, url):
        self.url = url

    def close(self):
        if self.excel:
            self.excel.close()

        if self.driver:
            self.driver.quit()

    def start_request(self):
        headers = { 
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Connection": "keep-alive",
            "Content-Type": "application/json",
            #"Cookie": "sessionid=C1DE3E16-0AFF-4E67-95ED-17644FFE1B05%7C%7C2017-11-14+20%3A45%3A02.249%7C%7C0; fvlid=1510663596931m2XEqGgk0_; historybbsName4=c-314%7C%E6%9C%AC%E7%94%B0CR-V; Hm_lvt_9924a05a5a75caf05dbbfb51af638b07=1510663592; sessionip=121.14.14.35; ahpau=1; ref=blog.csdn.net%7C0%7C0%7C0%7C2017-12-24+23%3A20%3A31.458%7C2017-12-24+23%3A18%3A22.103; area=440199; ahpvno=12",
            "Host": "jiage.autohome.com.cn",
            "Referer": "https://jiage.autohome.com.cn/price/carlist/s-2313",
            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36",
            "X-Requested-With": "XMLHttpRequest"
        }

        sid = re.compile(r"s\-(\d+)#")
        sid = re.search(sid, self.url)
        if not sid:
            raise Exception

        sid = sid.group(1)
        logger.info(sid)
        url = "https://jiage.autohome.com.cn/getSpec?seriesId=" + sid
        logger.info(url)
        resp = requests.get(url, headers=headers)

        for car_year in json.loads(resp.text):
            for car in car_year.get("specitems"):
                car_id = car.get("id")
                name = car.get("name")
                detial_url = 'https://jiage.autohome.com.cn/price/carlist/p-' + str(car_id)
                logger.info(detial_url)

                self.parse(detial_url, name)
        
    def parse(self, detial_url, name):
        self.driver.get(detial_url)
        self.parse_data(self.driver.page_source, detial_url, name)

        
    def parse_data(self, page_source, url, name):
        data_list = []
        # 指导价
        spans = re.compile(r'<span class="(hs_kw0_fctPrice.*?)">', re.S)
        for index, span_class in enumerate(re.findall(spans, page_source)):
            data_list.append({})
            script = "return window.getComputedStyle(document.getElementsByClassName('%s')[0],'before').getPropertyValue('content')" % span_class
            span = self.driver.execute_script(script).replace("'", "")
            logger.info(data_list[index])
            data_list[index]["fctPrice"] = span


        # 购车总价
        spans = re.compile(r'<span class="(hs_kw0_fullPrice.*?)">', re.S)
        for index, span_class in enumerate(re.findall(spans, page_source)):
            script = "return window.getComputedStyle(document.getElementsByClassName('%s')[0],'before').getPropertyValue('content')" % span_class
            span = self.driver.execute_script(script).replace("'", "")
            logger.info(data_list[index])
            data_list[index]["fullPrice"] = span

        # 裸车价
        spans = re.compile(r'<span class="(hs_kw0_nakedPrice.*?)">', re.S)
        for index, span_class in enumerate(re.findall(spans, page_source)):
            script = "return window.getComputedStyle(document.getElementsByClassName('%s')[0],'before').getPropertyValue('content')" % span_class
            span = self.driver.execute_script(script).replace("'", "")
            data_list[index]["nakedPrice"] = span

        logger.info(data_list)

        tree = html.fromstring(page_source)
        for index, ul in enumerate(tree.xpath('/html/body/div[3]/div[5]/div[1]/div[@class="price-boxs"]/div[3]/ul')):
            logger.info("index %s" % index)
            
            for li in ul.xpath("./div/li"):
                span = li.xpath("./span[1]/text()")[0]
                if span == u'付款方式： ':
                    paytype = li.xpath('./span[2]/text()')[0]

            logger.info(paytype)
            for li in ul.xpath('./li'):
                span = li.xpath('./span[1]/text()')
                if span:
                    if span[0] == u"购车时间：":
                        logger.info(span[0])
                        paydate = clear_text(li.xpath('./span[2]/text()'))
                        paywhere = clear_text(li.xpath('./span[4]/text()'))
                        self.excel.write("""%s,%s,%s,%s,%s,%s,%s,%s\r\n""" % 
                       (name,data_list[index]['fctPrice'], data_list[index]['fullPrice'], data_list[index]['nakedPrice'], 
                        paytype, paydate, paywhere, url))


    # def parse_detail(self, detial_url):
    #     resp = requests.get(detial_url, headers=self.headers)
    #     time.sleep(0.2)
    #     tree = html.fromstring(resp.content)
    #     data = {}

    #     data['title'] = clear_text(tree.xpath('//*[@id="mainArea"]/div[@class="wxmain"]/div[1]/h2/text()'))

    #     author = tree.xpath('//*[@id="mainArea"]/div[@class="wxmain"]/div[1]/div[1]/span')
    #     data['author'] = ""
    #     for span in author:
    #         data['author'] += "|" + clear_text(span.xpath("string(.)"))
    #     if data['author']:
    #         data['author'] = data['author'][1:]

    #     orgn = tree.xpath('//*[@id="mainArea"]/div[@class="wxmain"]/div[1]/div[2]/span')
    #     data['orgn'] = ""
    #     for span in orgn:
    #         data['orgn'] += "|" + clear_text(span.xpath("string(.)"))
    #     if data['orgn']:
    #         data['orgn'] = data['orgn'][1:]

    #     summary = tree.xpath('//*[@id="ChDivSummary"]')
    #     if summary:
    #         data['summary'] = clear_text(summary[0].xpath("string(.)"))


    #     if "summary" not in data.keys():
    #         data['summary'] = ""

    #     # !
    #     fund = tree.xpath('//*[@id="catalog_FUND"]')
    #     if fund:
    #         father = fund[0].xpath("./..")
    #         if father:
    #         #     a = father[0].xpath("./a")
    #         #     if a:
    #             data['fund'] = clear_text(father[0].xpath("string(.)"))

    #     if "fund" not in data.keys():
    #         data['fund'] = ""


    #     # !
    #     keyword = tree.xpath('//*[@id="catalog_KEYWORD"]')
    #     if keyword:
    #         father = keyword[0].xpath("./..")
    #         if father:
    #             # a = father[0].xpath("./a")
    #             # if a:
    #             data['keyword'] = clear_text(father[0].xpath("string(.)"))

    #     if "keyword" not in data.keys():
    #         data['keyword'] = ""

    #     cata = tree.xpath('//*[@id="catalog_ZTCLS"]')
    #     if cata:
    #         father = cata[0].xpath("./..")
    #         if father:
    #             data['cata'] = clear_text(father[0].xpath("./text()"))
    #     else:
    #         data['cata'] = ""

    #     filename = re.search(self.pattern_filename, detial_url).group(1)
    #     print(filename)

    #     download = tree.xpath('//*[@id="mainArea"]/div[@class="wxmain"]/div[3]/div[1]/div[4]/div[1]/div[1]/span[1]/b/text()')
    #     if download:
    #         data['download'] = clear_text(download[0])
    #     else:
    #         data['download'] = ""

    #     page_total = tree.xpath('//*[@id="mainArea"]/div[@class="wxmain"]/div[3]/div[1]/div[4]/div[1]/div[1]/span[3]/b/text()')
    #     if page_total:
    #         data['page_total'] = clear_text(page_total[0])
    #     else:
    #         data['page_total'] = ""

    #     more_url = 'http://kns.cnki.net/kcms/detail/block/refcount.aspx?dbcode=CJFD&filename={filename}&vl='.format(filename=filename)

    #     resp = requests.get(more_url, headers=self.headers)
    #     try:
    #         data_json = json.loads(resp.text.replace("'", '"'))
    #         data["citing"] = clear_text(data_json.get("CITING"))
    #         data["co_cited"] = clear_text(data_json.get("CO_CITED"))
    #         data["co_citing"] = clear_text(data_json.get("CO_CITING"))
    #         data["reference"] = clear_text(data_json.get("REFERENCE"))
    #         data["sub_citing"] = clear_text(data_json.get("SUB_CITING"))
    #         data["sub_reference"] = clear_text(data_json.get("SUB_REFERENCE"))
    #     except:
    #         data["citing"] = 0
    #         data["co_cited"] = 0
    #         data["co_citing"] = 0
    #         data["reference"] = 0
    #         data["sub_citing"] = 0
    #         data["sub_reference"] = 0
       
    #     self.excel.write("""%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,\r\n""" % 
    #                            (data['title'], data['author'], data['orgn'], data['summary'], 
    #                             data['keyword'], data['fund'], data['cata'], filename, data['download'], data['page_total'],
    #                             data["sub_reference"], data["reference"], data["co_citing"],
    #                             data["co_cited"], data["citing"], data["sub_citing"], detial_url))

def clear_text(node):
    if isinstance(node, list):
        node = node[0]
    if node:
        return node.strip().replace("\r\n", "").replace("\r", "").replace("\n", "").replace(",", u"，")
    return ""

def main():
    logger.info("begin crawler")
    spider = None
    try:
        u = input("input your url:")#'https://jiage.autohome.com.cn/price/carlist/s-2313#pvareaid=103596'
        spider = AutohomePriceSpider(u)
        spider.start_request()
        logger.info("end crawler")
    except Exception as e:
        logger.error("error:%s" % e)
    finally:
        spider.close()

if __name__ == '__main__':
    main()
