import csv
import os
import time
import requests
from lxml import etree

for i in range(1, 6):
    url = 'https://www.xin.com/suqian/i%s/' % i
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',
        'Cookie': 'pif=; RELEASE_KEY=; XIN_UID_CK=9e19a121-98ad-4a5b-5588-1e53e6bac0db; acw_tc=deba12a716075631762666122e59e210a6275878d073ab04c874e5d131; Hm_lvt_ae57612a280420ca44598b857c8a9712=1607563178; acw_sc__v2=5fd1781d72d3b50ec8e137c8b720ed1c4bc77a57; XIN_anti_uid=F801FD5D-8A2F-D33C-0132-B295D1789224; XIN_LOCATION_CITY=%7B%22cityid%22%3A%221520%22%2C%22cityname%22%3A%22%5Cu5bbf%5Cu8fc1%22%2C%22ename%22%3A%22suqian%22%2C%22service%22%3A%221%22%2C%22zhigou%22%3A%221%22%2C%22is_visit%22%3A%221%22%2C%22city_rank%22%3A%22100%22%2C%22is_gold_partner%22%3A%22-1%22%2C%22direct_rent_support%22%3A%221%22%2C%22is_wz_mortgage%22%3A%221%22%7D; session_xin=q63evu8l9mj5a4civduss1oaracu0t0l; uid=CvQxVl/ReB1SeAAWE7JIAg==; SEO_SOURCE=https://www.xin.com/suqian/i2/; SEO_REF=https://www.xin.com/suqian/i2/; Hm_lpvt_ae57612a280420ca44598b857c8a9712=1607563299; SERVERID=b13028cd19ef1711d5f40612dd61793a|1607563297|1607563293'
    }
    r = requests.get(url=url, headers=headers)
    html = etree.HTML(r.content.decode('utf-8'))
    li_list = html.xpath('//*[@id="search_container"]/div[1]/ul/li')
    for li_temp in li_list:
        img = li_temp.xpath('normalize-space(./@data-img)')
        name = li_temp.xpath('normalize-space(./div[2]/a/div/h2/span/text())')
        # 有空格
        year = li_temp.xpath('normalize-space(./div[2]/a/div/span/text()[1])')
        # 有空格
        lc = li_temp.xpath('normalize-space(./div[2]/a/div/span/text()[2])')
        cang = li_temp.xpath('normalize-space(./div[2]/a/div/span/span/text())')
        # 有空格
        price_first = li_temp.xpath('normalize-space(./div[2]/a/div/p[1]/span/text())')
        # 有空格
        price = li_temp.xpath('normalize-space(./div[2]/a/div/p[1]/em/text())')
        print(img, name, year, lc, cang, price_first, price)
        # print(name)
        name_first = name.split()
        download_path = os.getcwd() + '/优信二手车/%s/' % name_first[0]
        if not os.path.exists(download_path):  # 判断文件夹
            os.makedirs(download_path)
        if name_first[2] == 'Z4敞篷车' or name_first[2] == '7系':
            csv_headers = ['图片url', '车名', '年份', '里程', '仓库', '首付', '价格']
            g = open(download_path + './%s.csv' % name_first[2], 'a', encoding='utf-8')
            g_csv = csv.writer(g)
            g_csv.writerow(csv_headers)
            info = img, name, year, lc, cang, price_first, price
            g_csv.writerow(info)
        # print(name_first[1])
        download_path = os.getcwd() + '/优信二手车/%s/images/' % name_first[0]
        if not os.path.exists(download_path):  # 判断文件夹
            os.makedirs(download_path)
        url = 'http:' + img
        r = requests.get(url=url)
        with open(download_path + '%s.jpg' % name_first[1:3], "wb") as f:
            r.encoding = 'utf-8'
            f.write(r.content)
            print("保存图片完成....ok")
