from lxml import etree
from urllib import parse
from App.models import Block, Hot
import aiohttp
import asyncio
from concurrent.futures import ThreadPoolExecutor

threadpool = ThreadPoolExecutor(100)


class CrawlData:
    def __init__(self):
        self.urls = {
            'V2EX': 'https://www.v2ex.com/?tab=hot',
            'Github': 'https://github.com/trending',
            'WeiBo': 'https://s.weibo.com/top/summary',
            'ZhiHu': 'https://www.zhihu.com/hot',
            'BaiDu': 'http://top.baidu.com/?vit=1',
        }
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
        }

    async def getConn(self, name):
        url = self.urls.get(name)
        async with aiohttp.ClientSession(headers=self.headers)as session:
            async with session.get(url)as resp:
                if resp.status == 200:
                    soup = etree.HTML(await resp.text())
                    return soup
                else:
                    print('获取{}失败'.format(name))
                    return None

    async def GetZhiHu(self):
        url = 'https://www.zhihu.com/hot'
        headers = {
            "cookie":'_zap=1d6be8a4-e9d3-42e6-a6ec-c4cc54c0134a; _xsrf=616f1f2f-4f29-4afb-914c-743e87cf1172; d_c0="AACdNBT6jBKPThcsAf5VQWdFjeCUfE3J9l8=|1611478442"; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1611478448; captcha_session_v2="2|1:0|10:1611480060|18:captcha_session_v2|28:YzA2am52MXRnc25mZXQwZHBuYmc=|cf6a4d5c26946e116327b1aa501c998c553013cb67fe4cf3828ff408901e4e00"; SESSIONID=w3pjaVQmitSd1MVhMbJrVa7Hl1ICsK2fbiwjySqpsIZ; JOID=VV4XCkhkDvBxUAvIKGobbp0RBUA_EjmLETVcnU4Je6cea0KfRJw6_B9VDscplUy9g4VXNnayIo0-iBOY8ZbvbOc=; osd=UVAdAk9gAPp5Vw_GImIcapMbDUc7HDODFjFSl0YOf6kUY0WbSpYy-xtbBM8ukUK3i4JTOHy6JYkwghuf9ZjlZOA=; __snaker__id=mM1DNHHzmaFe3iGX; gdxidpyhxdE=x%2BvSyvdWyRPp%2BsNgltyJ9HTgrGPgkMk2aeAmRru89zwQjxblH1E6UvrN84OuzoKU6QapE1Q1jqNqUcDWWSk9SAVtyDBJeeYrLCUofK8Jhp3w3l%5CqQrmtmQ5PQvEjUgARC%5CXtsgQrdh4VagrQPHNR3I%5CZhQa9p%2FUXImXngTgVqD4etHW7%3A1611480964335; _9755xjdesxxd_=32; YD00517437729195%3AWM_TID=vwtcoJMTvPdFRAFQRFZvPw5emxh23M5v; YD00517437729195%3AWM_NI=tcNgZVqQgfydgE6A14peWJQ%2FeGwFGH8xRLuJM8zOPxE6s6BZj021eQD%2FJIP4VFn%2FE3%2FcT1jtkskAp7PFJpEAkRA743dKmvaAibK4SrKSM5%2BZ2PRCrRRHOvPJ%2F0QribcIMUU%3D; YD00517437729195%3AWM_NIKE=9ca17ae2e6ffcda170e2e6ee82c874b4a8b990f159bceb8ea7d54e929b8faff56bacac81d3ea68f1ee9794b62af0fea7c3b92ab8bd8288d95eb38fa987fc70a59bfaa2b440b4bb99abd425a799b9a7c644fca89eadf8428fe8b6b0d37487eabdd7c152aaf0fed2e85b88f186aad340969aa2d0c839edbd9cd2e980a9ada5a2f33d949a8691c84ba98e8fd8b739a895a397e44babbe85d5d27a898d83d6aa52f2909ea4f15a92988197d342edf09f93ec6aa19783b6d837e2a3; l_n_c=1; r_cap_id="MTEyNWMyN2Y0ZTFiNGNhNjg1ZGNjMzFhNTMzZGEzYzQ=|1611480072|0092b4dad9b7f49f9939a1219cf9620d91767243"; cap_id="M2YxYzJhYmQ4OTNkNGE1YWE3NjY2YWYzMWE2NmIwOGE=|1611480072|8e3e8ef4c0ab4fb20293f1e8339b1f68e5a4c6f3"; l_cap_id="ODY1NmExZDFlMGY3NDgzZmE3YjFkY2FkZmVlOGY2MGY=|1611480072|93dad257b28eb1461848b3553ac4b4de0914793c"; n_c=1; z_c0=Mi4xYjZ3OENRQUFBQUFBQUowMEZQcU1FaGNBQUFCaEFsVk5ESXI2WUFBS1RtdWI0WmxrSXpQWlZYU1puZ1NCaGhCSHVn|1611480076|257207ea2fe24ec1b73d109dee3d9dcbfd6e3ed4; tshl=; tst=h; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1611480081; KLBRSID=fb3eda1aa35a9ed9f88f346a7a3ebe83|1611480079|1611478448',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
        }
        async with aiohttp.ClientSession(headers=headers)as session:
            async with session.get(url)as resp:
                if resp.status == 200:
                    soup = etree.HTML(await resp.text())
                else:
                    print('获取{}失败'.format(url))
                    return None

        items = soup.xpath('//div[@class="HotList-list"]/section')
        for item in items:
            title = item.xpath('div[2]/a/h2/text()')[0]
            content = item.xpath('div[2]/a/p/text()')
            url = item.xpath('div[2]/a/@href')[0]
            if content:
                content = content[0].strip()
            else:
                content = ''
            print(title, url, content)
            threadpool.submit(Hot.addHot, title=str(title), url=str(url), block='ZhiHu', content=content)
            # await Hot.addHot(title=str(title), url=str(url), block='ZhiHu', content=content)

    async def GetBaiDu(self):
        url = "http://top.baidu.com/?vit=1"
        async with aiohttp.ClientSession(headers=self.headers)as session:
            async with session.get(url)as resp:
                if resp.status == 200:
                    soup = etree.HTML(await resp.text(encoding='gbk'))
                else:
                    print('获取{}失败'.format(url))

        items = soup.xpath('//ul[@id="hot-list"]/li')

        for data in items:
            title = data.xpath('a[1]/@title')[0]
            url = data.xpath('a[1]/@href')[0]
            print(title, url)
            threadpool.submit(Hot.addHot, title=str(title), url=str(url), block='BaiDu', content='')


    async def GetWeiBo(self):
        soup = await self.getConn('WeiBo')
        items = soup.xpath('//div[@class="data"]/table/tbody/tr')
        for item in items:
            title = item.xpath('td[2]/a/text()')[0]
            url = parse.urljoin('https://s.weibo.com', item.xpath('td[2]/a/@href')[0])
            print(title, url)
            threadpool.submit(Hot.addHot, title=str(title), url=str(url), block='WeiBo', content='')
            # await Hot.addHot(title=str(title), url=str(url), block='WeiBo', content='')

    async def GetGithub(self):
        soup = await self.getConn('Github')
        items = soup.xpath('//article[@class="Box-row"]')
        for item in items:
            title = "".join(item.xpath('h1/a//text()')).replace("\n","").strip().replace("      ","")
            url = item.xpath('h1/a/@href')[0].strip()
            try:
                content = item.xpath('p[contains(@class,"col-9")]/text()')[0].strip()
            except:
                content = ''
            if title and url:
                url = parse.urljoin('https://github.com/', url)
                print(title, url, content)
                threadpool.submit(Hot.addHot, title=str(title), url=str(url), block='Github', content=content)
                # await Hot.addHot(title=str(title), url=str(url), block='Github', content=content)

    async def GetV2EX(self):
        soup = await self.getConn('V2EX')
        if soup:
            items = soup.xpath('//div[@id="Main"]/div[@class="box"]/div[contains(@class,"cell")]')[1:]
            for item in items:
                title = item.xpath('table//td[3]/span/a/text()')
                url = item.xpath('table//td[3]/span/a/@href')
                if title and url:
                    url = parse.urljoin('https://www.v2ex.com/', url[1])
                    print(title[1], url)
                    threadpool.submit(Hot.addHot, title=str(title[0]), url=str(url), block='V2EX', content='')
                    # Hot.addHot(title=str(title[0]),url=str(url),block='V3EX',content='')
        else:
            print('GetV2EX Error Done')


def ExecGetData(spider, value):
    dataType = getattr(spider, "Get" + value)
    return dataType


def main():
    allData = [
        'Github',
        'WeiBo',
        'V2EX',
        'ZhiHu',
        "BaiDu"
    ]
    new_loop = asyncio.new_event_loop()
    asyncio.set_event_loop(new_loop)
    loop = asyncio.get_event_loop()
    print("开始抓取{}种数据类型".format(len(allData)))
    spider = CrawlData()
    tasks = []
    for _, value in enumerate(allData):
        print("开始抓取" + value)
        Block.addBolck(value)
        func = ExecGetData(spider, value)
        tasks.append(func())
    loop.run_until_complete(asyncio.gather(*tasks))


if __name__ == '__main__':
    main()
