import csv

import aiocsv
import requests,re,asyncio,aiohttp,aiofiles,os
from lxml import etree

headers = {
    "authority": "search.jd.com",
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
    "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
    "cache-control": "max-age=0",
    "dnt": "1",
    "sec-ch-ua": "^\\^Chromium^^;v=^\\^104^^, ^\\^",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "^\\^Windows^^",
    "sec-fetch-dest": "document",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "none",
    "sec-fetch-user": "?1",
    "upgrade-insecure-requests": "1",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.81 Safari/537.36 Edg/104.0.1293.47"
}
cookies = {
    "__jdu": "16562541366491405850625",
    "shshshfpa": "98e0919b-9bab-f953-5a2d-03e62bd4663a-1656254137",
    "shshshfpb": "q9xNhgGY158EFd2eiGe575A",
    "qrsc": "3",
    "pinId": "XQNc3bPqk0uMCQak-PD8CbV9-x-f3wj7",
    "__jdv": "122270672^|direct^|-^|none^|-^|1659408132763",
    "PCSYCityID": "CN_410000_410100_0",
    "__jdc": "122270672",
    "shshshfp": "767c1d51e52832337e6343afb2110052",
    "rkv": "1.0",
    "areaId": "7",
    "ipLoc-djd": "7-412-416-47178",
    "jsavif": "0",
    "__jda": "122270672.16562541366491405850625.1656254137.1660014353.1660286836.15",
    "__jdb": "122270672.1.16562541366491405850625^|15.1660286836",
    "shshshsID": "663e4cff53af1cdd4bde9b35d78db679_1_1660286836678",
    "3AB9D23F7A4B3C9B": "DRPPYPU5OXATTJNZQK32BZ6ZCSHTMFAL4M5COXQL673UYR2J4D46Y6Y6LMUXWT3OL7GYCFPI42HQMPI7HOICOBY6QU"
}
params = {
    "keyword": "^%^E5^%^8D^%^AB^%^E7^%^94^%^9F^%^E7^%^BA^%^B8",
    "page": "4"
}


def down(url):
    response = requests.get(url, headers=headers, cookies=cookies, params=params)
    return response

async def aio_down(url,session):
    global n,imgstat
    async with session.get(url=url,headers=headers,params=params,cookies=cookies) as resp:
        res = await resp.text()
        tree = etree.HTML(res)
        li_list = tree.xpath('//div[@id="J_goodsList"]/ul/li')
        for li in li_list:
            try:
                id = ''.join(li.xpath('./div/div[2]/strong/i/@data-price')).strip()
                brand = ''.join(li.xpath('./div/div[3]/a/em//text()[1]')[0]).strip()
                product = ''.join(li.xpath('./div/div[3]/a/em//text()[2]')[0] + li.xpath('./div/div[3]/a/em//text()[1]')[2]).strip()
                shop = ''.join(li.xpath('./div/div[5]/span/a/text()')).strip()

                price = ''.join(li.xpath('./div/div[2]/strong/i/text()')).strip()
                product_url = 'https:' + ''.join((li.xpath('./div/div[1]/a/@href'))).strip()
                sour = ([id, brand, product, price, shop, product_url])
                print(id, brand, product, price, shop, product_url)

                """结果追加到CSV文件"""
                async with aiofiles.open('./jd.csv', mode='a+', newline='', encoding='utf-8') as fp:
                    writer = aiocsv.AsyncWriter(fp)
                    await writer.writerow(sour)
                    # print('success',n)
                    n += 1

                # mysql(id, brand, product, price, shop, product_url)

                """保存主图，需要在目录下新建img文件夹，否则不保存"""
                if imgstat == 'Y':
                    print('Y')
                    print(session)
                    async with session.get(url = product_url,headers=headers,params=params,cookies=cookies) as response:
                        tree1 = etree.HTML(await response.text())
                        # print(await response)
                        img_url = 'https:'+ ''.join(tree1.xpath('//*[@id="spec-img"]/@data-origin')).replace('.avif','')
                        print(img_url)
                        async with session.get(url = img_url) as imgres:
                            async with aiofiles.open(f'./img/{id}.jpg','wb',) as fp:
                                await fp.write(await imgres.content.read())
                                print('success')

            except:
                # print('error')
                continue
# async aio_func()

async def get_page(kw,pagenum):
    tasks = []
    async with aiohttp.ClientSession() as session:
        for page in range(eval(pagenum)):
            url = 'https://search.jd.com/Search?keyword={}&page={}'.format(kw,page)
            # print(page)

            # 2，获取每个商品的链接
            # 拿出每一页的链接
            tasks.append(asyncio.create_task(aio_down(url,session)))
        await asyncio.wait(tasks)


if __name__ == '__main__':
    n=1

    path = './img'
    kw = input("搜索产品：")
    pagenum = input("请输入要查询多少页内容:")
    imgstat = input("是否要保存图片(Y/N):")
    if imgstat == "Y":
        if os.path.exists(path) is False:
            os.mkdir(path)

    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    loop.run_until_complete(get_page(kw,pagenum))

    for dirpath, dirnames, filenames in os.walk(path):
        for file in filenames:
            file_tmp = os.path.join(dirpath, file)
            file_size = os.path.getsize(file_tmp)
            if (file_size < 10240):  # size<10KB的话，删除文件
                os.remove(file_tmp)


