import csv
import os
import time

import requests
from lxml import etree


class GuaZiSpider:
    def __init__(self):

        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36',
            'Cookie': 'uuid=2476a195-74ab-4064-fce0-b426f6fc0230; cityDomain=bj; clueSourceCode=%2A%2300; user_city_id=12; ganji_uuid=7319117991478059499719; sessionid=4d0f6329-8fdf-45e5-a0ad-c57c4622b493; lg=1; Hm_lvt_bf3ee5b290ce731c7a4ce7a617256354=1605140212; cainfo=%7B%22ca_a%22%3A%22-%22%2C%22ca_b%22%3A%22-%22%2C%22ca_s%22%3A%22self%22%2C%22ca_n%22%3A%22self%22%2C%22ca_medium%22%3A%22-%22%2C%22ca_term%22%3A%22-%22%2C%22ca_content%22%3A%22-%22%2C%22ca_campaign%22%3A%22-%22%2C%22ca_kw%22%3A%22-%22%2C%22ca_i%22%3A%22-%22%2C%22scode%22%3A%22-%22%2C%22keyword%22%3A%22-%22%2C%22ca_keywordid%22%3A%22-%22%2C%22display_finance_flag%22%3A%22-%22%2C%22platform%22%3A%221%22%2C%22version%22%3A1%2C%22client_ab%22%3A%22-%22%2C%22guid%22%3A%222476a195-74ab-4064-fce0-b426f6fc0230%22%2C%22ca_city%22%3A%22suqian%22%2C%22sessionid%22%3A%224d0f6329-8fdf-45e5-a0ad-c57c4622b493%22%7D; antipas=1R154H88K081vP8435732P160; preTime=%7B%22last%22%3A1605141004%2C%22this%22%3A1605140212%2C%22pre%22%3A1605140212%7D; Hm_lpvt_bf3ee5b290ce731c7a4ce7a617256354=1605141006'
        }
        self.csv_headers = ['title', 'year', 'km', 'price', 'original_price']

        if not os.path.exists('./guazi'):
            os.mkdir('./guazi')

    def send_request(self):
        for i in range(1, 11):
            url = 'https://www.guazi.com/bj/buy/o' + str(i)
            resp = requests.get(url=url, headers=self.headers).text

            html = etree.HTML(resp)
            self.processing_data(html, str(i))

    def processing_data(self, html, page_index):

        title_list = html.xpath('//ul[@class="carlist clearfix js-top"]/li/a/h2/text()')
        car_info_list = html.xpath('//ul[@class="carlist clearfix js-top"]/li//div[@class="t-i"]/text()')
        price_list = html.xpath('//ul[@class="carlist clearfix js-top"]/li//div[@class="t-price"]/p/text()')
        original_price = html.xpath('//ul[@class="carlist clearfix js-top"]/li//div[@class="t-price"]/em/text()')

        year_list = []
        km_list = []
        for i in car_info_list:
            if '年' in i:
                year_list.append(i)
            elif '公里' in i:
                km_list.append(i)

        for r in price_list:
            if r == '                    ':
                price_list.remove(r)

        csv_list = []

        index = 0
        for car in title_list:
            new_dict = {}
            new_dict['title'] = car
            new_dict['year'] = year_list[index]
            new_dict['km'] = km_list[index]
            new_dict['price'] = price_list[index]
            new_dict['original_price'] = original_price[index]
            csv_list.append(new_dict)
            index += 1
        self.save_csv(csv_list, page_index)

    def save_csv(self, csv_list, page_index):

        with open('./guazi/car_info_page_' + page_index + '.csv', 'w', encoding='utf-8') as f:
            # 创建一个csv的DictWriter对象，这样才能够将写入csv格式数据到这个文件
            f_csv = csv.DictWriter(f, self.csv_headers)
            # 写入一行（我们用第一行当做表头）
            f_csv.writeheader()
            # 写入多行行（当做数据）
            f_csv.writerows(csv_list)
        print('第%s页爬取完成!' % page_index)
        time.sleep(1)


if __name__ == '__main__':
    guazi_spider = GuaZiSpider()
    guazi_spider.send_request()
