# -*- coding: UTF-8 -*-
# 捷豹保养套餐价格
# Python Version: 2.7.1
import requests
import re
from lxml import etree
import xlwt
import time

# xls配置
workbook = xlwt.Workbook(encoding = 'utf-8')
worksheet = workbook.add_sheet('Worksheet')

headers = {
        "Referer":"https://www.jaguar.com.cn/ownership/service-warranties/service-package/xe-service-maintenance/xe-2t-service-before-2018.html",
        "User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"
    }

re_a_item = '基础尊护A套餐保养价格: ¥(.*?)</td>'
re_b_item = '基础尊护B套餐保养价格.*?¥(.*?)</td>'
last_url = "https://www.jaguar.com.cn/ownership/service-warranties/service-package/xe-service-maintenance/xe-2t-service-before-2018.html"

def parse_url(url):
    print url
    response = requests.get(url=url, headers=headers)

    global last_url
    last_url = url.split("?")[0]
    return response.content

def jaguar():
    base_url = "https://www.jaguar.com.cn"
    start_url = "https://www.jaguar.com.cn/ownership/service-warranties/service-package/index.html"
    headers = {
        "Referer":"https://www.jaguar.com.cn/ownership/service-warranties/service-package/xe-service-maintenance/xe-2t-service-before-2018.html",
        "User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"

    }

    response = requests.get(url=start_url, headers=headers)
    html = etree.HTML(response.content)
    url_list = html.xpath("//div[@class='section']/div[@class='el width-1of3 ']/a")
    # cars_first_title = html.xpath("//div[@class='section']/div[@class='el width-1of3 ']//h2/text()")
    # print url_list  # 获取所有车型的url入口
    num = 0
    rows = 0
    worksheet.write(rows, 0, label="车型")
    worksheet.write(rows, 1, label="套餐")
    worksheet.write(rows, 2, label="价格")
    rows += 1
    for url in url_list:
        cars_first_title = url.xpath('./h2/text()')[0]
        url = url.xpath('./@href')
        url = base_url+url[0]
        print url
        content2 = parse_url(url)
        html2 = etree.HTML(content2)

        # 获取当前车型所有款式url入口
        url_list2 = html2.xpath("//aside[@class='el width-1of3']//ul/li/a")
        url_list2.pop()  # 去除最后一个返回上级菜单的url
        # print "a"
        # print url_list2
        for url in url_list2:  # 记得最后一个url不是目标url  已去除
            time.sleep(1)
            car_last_title = url.xpath('./text()')[0]
            print car_last_title
            url = url.xpath('./@href')[0]
            url = base_url+url
            print url
            content3 = parse_url(url)
            html3 = etree.HTML(content3)
            url_list3 = html3.xpath("//ul[@class='list--arrowed']/li/a/@href")  # 最终抓取内容url入口
            last_url_A = url_list3[0]
            last_url_B = url_list3[1]
            last_url_C = url_list3[2]
            # print last_url_A, last_url_B, last_url_C

            # 分别针对这三个url做不同处理
            contentA = parse_url(last_url+last_url_A)
            contentB = parse_url(last_url+last_url_B)
            # with open('./B_item.html', 'w') as f:
            #     f.write(contentB)
            contentC = parse_url(last_url+last_url_C)
            # with open("c_html{}.html".format(num), 'w') as f:
            #     num += 1
            #     f.write(contentC)

            a_item_price = re.findall(re_a_item, contentA)
            if a_item_price:
                try:
                    print a_item_price[0], "a套餐价格"
                except Exception as e:
                    print "没有拿到a"
            else:
                print "wu a"
            b_item_price = re.findall(re_b_item, contentB)
            if b_item_price:
                try:
                    print b_item_price[0], "b套餐价格"
                except Exception as e:
                    print "没有拿到b"
            else:
                print "wu b"

            html_c = etree.HTML(contentC)
            # c_html = etree.HTML(response.content)
            tr_list = html_c.xpath('//tbody/tr')

            tr_list.pop(0)  # 去除第一个非数据标签

            item_c_list = []
            for tr in tr_list:
                label1 = tr.xpath("./th[1]/text()")[0]
                label2 = tr.xpath("./th[2]/text()")[0]
                # label = "".join(label)
                # label = label.replace("\n", "")
                # label = label.split(u"¥")
                print label1, label2
                item_c_list.append({"part":label1, "price":label2})

            dict = {"first_title":cars_first_title, "last_title":car_last_title, "item_a":a_item_price[0], "item_b": b_item_price[0],
                    "item_c":item_c_list
                    }

            # 写入xls
            worksheet.write(rows, 0, label=dict["last_title"])
            worksheet.write(rows, 1, label="A")
            worksheet.write(rows, 2, label=dict["item_a"])
            rows += 1
            worksheet.write(rows, 0, label=dict["last_title"])
            worksheet.write(rows, 1, label="B")
            worksheet.write(rows, 2, label=dict["item_b"])
            rows += 1
            for part in dict["item_c"]:
                worksheet.write(rows, 0, label=dict["last_title"])
                worksheet.write(rows, 1, label=part["part"])
                worksheet.write(rows, 2, label=part["price"])
                rows += 1
            print dict
    workbook.save("jaguar_.xls")

    print "DONE"








if __name__ == '__main__':

    jaguar()
    # except:
    #     workbook.save("jaguar_.xls")