import requests

from lxml import etree

class Gzi:

    def __init__(self,headers):
        self.headers = headers

    def get_detail_urls(self,url):
        response = requests.get(url, headers=self.headers)
        text = response.content.decode('utf-8')
        # 解析数据
        html = etree.HTML(text)
        # 获取热卖中所有车型 ul
        ul = html.xpath("//ul[@class='carlist clearfix js-top']")[0]
        lis = ul.xpath('./li')
        detail_list = []
        for li in lis:
            detail_url = li.xpath('./a/@href')  # 获取详情页地址
            headers = 'https://www.guazi.com'  # header url头部段
            new_url = headers + detail_url[0]
            detail_list.append(new_url)
        return detail_list


    def get_detail_data(self,url):
        details_list = self.get_detail_urls(url)
        detail_li_data = []
        for details_url in details_list:
            detail_dic = {}
            response = requests.get(details_url, headers=self.headers)
            text = response.content.decode('utf-8')
            html = etree.HTML(text)
            title = html.xpath('//div[@class="product-textbox"]/h2/text()')[0].strip()  # title
            num = html.xpath('//div[@class="product-textbox"]/ul/li[@class="two"]/span/text()')[0].strip()  # 公里数
            address = html.xpath('//div[@class="product-textbox"]/ul/li[@class="three"][1]/span/text()')[0].strip()  # 上牌地
            displacement = html.xpath('//div[@class="product-textbox"]/ul/li[last()-1]/span/text()')[0].strip()  # 排量
            transmission = html.xpath('//div[@class="product-textbox"]/ul/li[@class="last"]/span/text()')[0].strip()  # 变速箱
            detail_dic['title'] = title
            detail_dic['公里数'] = num
            detail_dic['上牌地'] = address
            detail_dic['排量'] = displacement
            detail_dic['变速箱'] = transmission
            detail_li_data.append(detail_dic)
        return detail_li_data


    # 保存数据
    def save(self,data,f):
        f.write(f'{data["title"]}{data["公里数"]}{data["上牌地"]}{data["排量"]}{data["变速箱"]}\n')

    def main(self):
        base_url = "https://www.guazi.com/cs/buy/o{}/#bread"
        with open('guazi.txt', 'a', encoding='utf-8') as f:
            for x in range(1,5):
                url = base_url.format(x)
                ret = self.get_detail_data(url)
                print(ret)
                for i in ret:
                    self.save(i,f)


if __name__ == '__main__':
    headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
            'Cookie': 'antipas=40G2u395hs340734lQ60J841; uuid=4610e2f5-4aba-442a-afb9-609d1b96c132; clueSourceCode=%2A%2300; ganji_uuid=2908693382579419401843; sessionid=d59d4849-eed8-4b3c-a68f-42f5b8e6e370; lg=1; cainfo=%7B%22ca_a%22%3A%22-%22%2C%22ca_b%22%3A%22-%22%2C%22ca_s%22%3A%22seo_google%22%2C%22ca_n%22%3A%22default%22%2C%22ca_medium%22%3A%22-%22%2C%22ca_term%22%3A%22-%22%2C%22ca_content%22%3A%22-%22%2C%22ca_campaign%22%3A%22-%22%2C%22ca_kw%22%3A%22-%22%2C%22ca_i%22%3A%22-%22%2C%22scode%22%3A%22-%22%2C%22keyword%22%3A%22-%22%2C%22ca_keywordid%22%3A%22-%22%2C%22display_finance_flag%22%3A%22-%22%2C%22platform%22%3A%221%22%2C%22version%22%3A1%2C%22client_ab%22%3A%22-%22%2C%22guid%22%3A%224610e2f5-4aba-442a-afb9-609d1b96c132%22%2C%22ca_city%22%3A%22sh%22%2C%22sessionid%22%3A%22d59d4849-eed8-4b3c-a68f-42f5b8e6e370%22%7D; Hm_lvt_936a6d5df3f3d309bda39e92da3dd52f=1593244198; close_finance_popup=2020-06-27; gps_type=1; _gl_tracker=%7B%22ca_source%22%3A%22-%22%2C%22ca_name%22%3A%22-%22%2C%22ca_kw%22%3A%22-%22%2C%22ca_id%22%3A%22-%22%2C%22ca_s%22%3A%22self%22%2C%22ca_n%22%3A%22-%22%2C%22ca_i%22%3A%22-%22%2C%22sid%22%3A57677555769%7D; lng_lat=121.310625_31.106025; cityDomain=cs; user_city_id=204; preTime=%7B%22last%22%3A1593245288%2C%22this%22%3A1593244197%2C%22pre%22%3A1593244197%7D; Hm_lpvt_936a6d5df3f3d309bda39e92da3dd52f=1593245289'
        }

    Gzi(headers).main()
