import asyncio
import csv
import datetime
import os
import re
import time

import requests
from lxml import etree
import aiohttp


async def main():
    # 循环五次获取每次的url,并创建task,添加回调函数
    tasks = []
    for i in range(5):
        url = 'https://www.xin.com/suqian/i'
        url1 = url + str(i + 1) + '/'
        one_page = asyncio.create_task(one_classify(url1))
        tasks.append(one_page)
        one_page.add_done_callback(save_data)
        # break

    await asyncio.wait(tasks)


async def one_classify(url):
    async with aiohttp.ClientSession() as session:
        header = {
            'Cookie':
                'pif=; RELEASE_KEY=; XIN_UID_CK=4b0dde85-ef1a-9601-ba3b-9b35dfed160a; Hm_lvt_ae57612a280420ca44598b857c8a9712=1607568632; XIN_anti_uid=387F5A8F-823F-F65E-5C77-F73EC552279E; XIN_LOCATION_CITY=%7B%22cityid%22%3A%221520%22%2C%22cityname%22%3A%22%5Cu5bbf%5Cu8fc1%22%2C%22ename%22%3A%22suqian%22%2C%22service%22%3A%221%22%2C%22zhigou%22%3A%221%22%2C%22is_visit%22%3A%221%22%2C%22city_rank%22%3A%22100%22%2C%22is_gold_partner%22%3A%22-1%22%2C%22direct_rent_support%22%3A%221%22%2C%22is_wz_mortgage%22%3A%221%22%7D; uid=CvQuyV/RjtQMNQATE98PAg==; SEO_SOURCE=https://www.xin.com/suqian/i1/; SEO_REF=https://www.xin.com/suqian/i1/; acw_tc=dde7519d16075827077614016e941c27dadfdfddaaf911018dc1c9d868; acw_sc__v2=5fd1c3f39817d6d7b613bd5acee0be051f9b7796; session_xin=s2dcvg2vhac6m9rm76518sl1uurlb7ig; Hm_lpvt_ae57612a280420ca44598b857c8a9712=1607582710; SERVERID=0e053b057d2244a83e6d26f4478a028f|1607582712|1607578556'
        }
        async with session.get(url, headers=header) as r:
            return await r.text()


def save_data(task_obj):
    # 判断是否存在文件夹，没有则创建
    download_path = os.getcwd() + '/优信二手车/'
    if not os.path.exists(download_path):
        os.makedirs(download_path)
    a = etree.HTML(task_obj.result())
    first_img_url = a.xpath('//div[@class="_list-con list-con clearfix ab_carlist"]//li//img/@src')
    img_url = a.xpath('//div[@class="_list-con list-con clearfix ab_carlist"]//li//img/@data-original')
    title = a.xpath('//div[@class="_list-con list-con clearfix ab_carlist"]//li/@data-title')
    price = a.xpath('//div[@class="_list-con list-con clearfix ab_carlist"]//li//em/text()')
    year = a.xpath('//div[@class="_list-con list-con clearfix ab_carlist"]//li//div[@class="pad"]/span/text()[1]')
    time = datetime.datetime.now().strftime('%Y-%m-%d')
    for i in range(len(title)):
        l1 = {'title': title[i],
              'price': price[i].replace(' ', '').replace('\n', '')[:-1],
              'year': year[i].replace(' ', '').replace('\n', '')[:-1],
              'time': time}
        title_data = title[i].split()
        # 判断品牌是否存在
        brand = download_path + title_data[0]
        if not os.path.exists(brand):
            os.makedirs(brand)
        with open(brand + '/%s.csv' % title_data[1], 'w') as f:
            print('正在存贮....' + brand + '%s.csv' % title_data[1])
            a = csv.DictWriter(f, ['title', 'price', 'year', 'time'])
            a.writerow(l1)
        print(4)
        if i == 0:
            save_img(str(first_img_url[i]), title[i])
        else:
            save_img(str(img_url[i - 1]), title[i])


def save_img(img_url, title):
    img_name = title + img_url[img_url.rfind('.'):]
    r = requests.get('http:' + img_url)
    print(img_url)
    # 判断是否存在文件夹，没有则创建
    download_path = os.getcwd() + '/优信二手车/' + title.split()[0] + '/images/'
    if not os.path.exists(download_path):
        os.makedirs(download_path)
    with open(download_path + '%s' % img_name, 'wb') as f:
        f.write(r.content)


if __name__ == '__main__':
    asyncio.run(main())
