import requests
from spider.proxy import IpPool, USEPROXY
from lxml import etree
# import csv
import pandas as pd
import os
from time import sleep,strftime
import re
import random
from PriceCompare.settings import HOST
# HOST = "127.0.0.1"
from urllib.parse import parse_qsl,urlparse
from urllib.parse import quote
import asyncio
import aiohttp


UserAgentLib=[]


if os.path.exists(os.path.join('spider','UA.txt')) :
    with open(os.path.join('spider','UA.txt'),'rt',encoding='utf-8') as f:
        for u in f: 
            data = u.replace('\n','')
            UserAgentLib.append(data)
    print('使用UA库')
else:
    print('未找到UA库')
    UserAgentLib.append('Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36 Edg/99.0.1150.46')






def show_log(*args, **kwargs):
    print(strftime('[%y-%m-%d %H:%M:%S] '), *args, **kwargs)


class GoodsSpider:
    ip_pool = IpPool()
    dataFile = None
    loop = asyncio.new_event_loop()

    def __init__(self) -> None:
        # self.spider = Spider()
        self.search_url = 'http://s.manmanbuy.com/Default.aspx?btnSearch=%CB%D1%CB%F7&key={}'
        self.ua = UserAgentLib
        self.result = None

    async def get_proxies(self):
        proxies = None
        if USEPROXY:
            proxy = await self.ip_pool.getIp()
            show_log(f"use proxy {proxy} count {proxy.count}")
        else:
            proxy = None
            show_log("使用本机ip")
        proxies = proxy() if proxy else None
        return proxies

    async def fetch(self, url, headers=None, proxies=None, timeout=7):
        async with aiohttp.ClientSession(headers=headers) as session:
            async with session.get(url, proxy=proxies, timeout=timeout) as response:
                html = await response.text(encoding='gbk')
        return html

    def get_header(self):
        return {"user-agent":random.choice(self.ua)}

    async def parse_info(self, tree):
        goods = tree.xpath('//div[@class="div1100"]/div[contains(@class,"bjlineSmall singlebj")]')

        # print("page",page)
        print(len(goods))
        products_list = []
        # print('html=',goods[0].text)
        for each in goods:
            try:
                # 标题
                title = each.xpath('./div[@class="title"]/div[@class="t"]/a//text()')
                title = (" ".join(title).strip())
                # print("title",title)
                # 价格
                price = each.xpath('./div[@class="cost"]/div/span/text()')[0]
                price = float(price)
                # print("price",price)
                # 链接
                detail_link = each.xpath('./div[@class="pic"]/a/@href')
                print('detail_link',detail_link)
                detail_link = await self.link_filter(detail_link)
                if not detail_link : continue
                detail_link=f'http://{HOST}/detail/?redict='+"".join(detail_link).strip()
                detail_link = detail_link.replace('&','@')

                # 商城，店铺，创建时间
                p = each.xpath('./div[@class="mall"]/p//text()')

                mall = p[1]
                store = p[2]
                create_time = p[3]

                # 评论
                comment = each.xpath('./div[@class="comment"]//text()')
                if comment == '\xa0' : comment=0
                else: comment= int(comment[1])
                # print("comment",comment)
                # 图片
                price_trend = each.xpath('./div[@class="cost"]//span[@class="poptrend"]/a/@thref')[0]
                print("price_trend=",price_trend)
                pic = each.xpath('./div[@class="pic"]//img/@src')[0]
            except Exception as e:
                print(e)
                continue

            products_list.append([
                title,
                price,
                detail_link,
                mall,
                store,
                comment,
                pic,
                create_time,
                price_trend
            ])
        
        labels = ["title","price","detail_link","mall","store","comment","pic","create_time","price_trend"]
        return labels, products_list

    async def link_filter(self, url):
        """
        获取源连接，清洗无效链接
        """


        proxies = await self.get_proxies()

        if isinstance(url,list) and len(url) >0 : url = url[0]
        if 'redirectUrl' in url:
            # resp = requests.get(url,headers={'user-agent':random.choice(UserAgentLib)})
            headers={'user-agent':random.choice(UserAgentLib)}
            

            html = await self.fetch(url, headers=headers, proxies=proxies)

            tree = etree.HTML(html)
            origin_url = tree.xpath("//div[@class='tan-b']/a/@href")
            # print('origin_url=',origin_url)
            return origin_url.pop()
        elif 'originalUrl' in url:
            parsed = urlparse(url)
            query_args = dict(parse_qsl(parsed.query, keep_blank_values=True))
            origin_url = query_args.get('originalUrl')
            # print('origin_url=',origin_url)
            return origin_url
        else: 
            parsed = urlparse(url)
            query_args = dict(parse_qsl(parsed.query))
            if len(query_args) == 0 : return None
        return url


    async def searchKey(self, key,orderby=''):
        '''通过关键字获取页面信息'''
        # 首次访问
        url = self.search_url.format(quote(key,encoding="gb2312"))
        if orderby: url+= f'&orderby={orderby}'


        # resp = requests.get(url, headers=self.get_header())
        resp = await self.fetch(url, headers=self.get_header())
        
        tree = etree.HTML(resp)
        page_temp = tree.xpath("//div[@id='dispage']//text()")
        mall_list = tree.xpath("//li[@class='divSXblock']//@value")
        print('mall_list',mall_list)
        mall_list = mall_list[1:]
        # 总页数
        total_page = int(re.search(r"(\d+)页",''.join(page_temp)).groups()[0])
        
        
        page_info = []
        for i in range(1, total_page+1):
            page_info.append({"url":f"{url}&PageID={i}","headers":self.get_header(), "parse_func":self.parse_info})

        return page_info,mall_list




    async def handlePage(self, url,headers,parse,proxy=None):
        proxy = None
        for i in range(5):

            proxies = await self.get_proxies()

            # r = requests.get(url,headers=headers,proxies=proxies,timeout=7)
            # r.raise_for_status()
            r = await self.fetch(url, headers=headers, proxies=proxies, timeout=7)

            tree = etree.HTML(r)
            labels, data = await parse(tree)
            return labels,data


    async def run_async(self, key, page=1, orderby=''):
        pages,mall_list = await self.searchKey(key,orderby)
        page = pages[page-1]
        labels,data = await self.handlePage(page["url"], page["headers"], page["parse_func"])
        pt = pd.DataFrame(data,columns=labels)
        print('='*30)
        print(pt)
        # pt.to_csv(f"files/{key}.csv",encoding='utf-8',index=False)
        # return {'curPage':pt,'pageNum':len(pages),'malls':mall_list}
        self.result = {'curPage':pt,'pageNum':len(pages),'malls':mall_list}



    def run(self, key,page=1,orderby=''):
        self.loop.run_until_complete(self.run_async(key, page, orderby))
        return self.result




if __name__=="__main__":
    r = GoodsSpider().run("k40")
    print(r)


