import time
import requests
from lxml import etree
from worker.yuqing.crawler.spider.utils import *
from bs4 import BeautifulSoup as bs
# 忽略requests证书警告
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

def getComments(page_url):
    base_url = "https://weibo.cn"
    headers = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:61.0) Gecko/20100101 Firefox/61.0',
        'Cookie': 'SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWjQZI50Arb0eqJ-dsNqj7z5JpX5K-hUgL.Fo-0eh.7So5R1KM2dJLoIXnLxKqL1-eL1h.LxK.LB-BL1KBLxKqLBoeL1K-LxKML1-2L1hBLxKnLB.qLBoMLxK.L1-zLB.-LxKqLBKeLB--LxKqL1--L1KMt; _T_WM=20d15ea5d78554ce77aa1520812a6328; SCF=AsgKsGlIYOASs6D1liNbIvr-IJmdywutRR0TeMd3FONP5evI_ac7tH5VS_E3wvDn0voU223C9LDQTviFGTunUrI.; SUB=_2A25yxxCtDeRhGeNN61sR9i7EwjuIHXVuS7DlrDV6PUJbktCOLRejkW1NSU_KkAfUAvp5v5t2QXWNbJIujshJ8A-7; SSOLoginState=1606639870; ALF=1609231870'
    }
    page_url = page_url.split("#")[0] + '&page=1'
    # page_url = 'https://weibo.cn/comment/Je6J949RR?uid=7484746694&rl=1&page=1'
    curr_page = 1
    comment_items = []
    comment_response = requests.get(page_url, headers=headers)
    comment_response.encoding = 'UTF-8'  # 编码问题
    soup = bs(comment_response.text, 'lxml')
    if len(soup.select('span.cc > a')) == 0:
        return []
    else:
        all_page = 0
        if len(soup.select('#pagelist > form > div'))!=0:
            p1 = re.compile(pattern='(?<=/).*?(?=页)', flags=re.IGNORECASE)       # 最小匹配，获得页数
            all_page = int(re.findall(p1, str(soup.select('#pagelist > form > div')[0].text))[0])

        item_id_list = []
        temp_item_id_list = soup.select('div')
        for item_id in temp_item_id_list:
            item_id = str(item_id)
            if item_id.startswith('<div class="c" id="C_'):
                p2 = re.compile(pattern='(?<=id="C_).*?(?=">)', flags=re.IGNORECASE)
                comment_id = re.findall(p2, item_id)[0]
                item_id_list.append(comment_id)

        temp_item_content_list = soup.select('div.c > span.ctt')
        item_content_list = [item.text for item in temp_item_content_list]
        temp_item_datetime_list = soup.select('div.c > span.ct')
        item_datetime_list = [str(datetime.datetime.now().year)+"年" + str(date_time.text).split("来自")[0].replace("\xa0","").replace(" ", "") for date_time in temp_item_datetime_list]

        temp_item_userid_list = soup.select('div.c > a')
        item_userid_list = []
        for i in range(len(temp_item_userid_list)):
            link = str(temp_item_userid_list[i].get('href'))
            if link.startswith('/u/'):
                item_userid_list.append(link.replace("/u/", ""))


        if all_page>1:
            for i in range(all_page-1):
                page_url = page_url.replace('page='+str(curr_page), 'page='+str(curr_page+1))
                curr_page += 1

                comment_response = requests.get(page_url, headers=headers)
                # comment_response.encoding = comment_response.apparent_encoding  # 编码问题
                comment_response.encoding = 'UTF-8'  # 编码问题
                soup = bs(comment_response.text, 'lxml')
                # 获取第curr_page页内容
                temp_item_id_list = soup.select('div')
                for item_id in temp_item_id_list:
                    item_id = str(item_id)
                    if item_id.startswith('<div class="c" id="C_'):
                        p2 = re.compile(pattern='(?<=id="C_).*?(?=">)', flags=re.IGNORECASE)
                        comment_id = re.findall(p2, item_id)[0]
                        item_id_list.append(comment_id)

                temp_item_content_list = soup.select('div.c > span.ctt')
                item_content_list += [item.text for item in temp_item_content_list]
                temp_item_datetime_list = soup.select('div.c > span.ct')
                item_datetime_list += [
                    str(datetime.datetime.now().year) + "年" +
                    str(date_time.text).split("来自")[0].replace("\xa0","").replace(" ","")
                    for date_time in temp_item_datetime_list
                ]

                temp_item_userid_list = soup.select('div.c > a')
                for i in range(len(temp_item_userid_list)):
                    link = str(temp_item_userid_list[i].get('href'))
                    if link.startswith('/u/'):
                        item_userid_list.append(link.replace("/u/", ""))
        for i in range(len(item_id_list)):
            comment_items.append({
                'comment_id': item_id_list[i],
                'comment_content': item_content_list[i],
                'comment_datetime': int(time.mktime(time.strptime(str(item_datetime_list[i]), "%Y年%m月%d日%H:%M"))),
                'comment_userid': item_userid_list[i]
            })
        return comment_items

def weibostarter(keywords):
    base_url = "https://weibo.cn"
    headers = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:61.0) Gecko/20100101 Firefox/61.0',
        'Cookie': 'SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWjQZI50Arb0eqJ-dsNqj7z5JpX5K-hUgL.Fo-0eh.7So5R1KM2dJLoIXnLxKqL1-eL1h.LxK.LB-BL1KBLxKqLBoeL1K-LxKML1-2L1hBLxKnLB.qLBoMLxK.L1-zLB.-LxKqLBKeLB--LxKqL1--L1KMt; _T_WM=20d15ea5d78554ce77aa1520812a6328; SCF=AsgKsGlIYOASs6D1liNbIvr-IJmdywutRR0TeMd3FONP5evI_ac7tH5VS_E3wvDn0voU223C9LDQTviFGTunUrI.; SUB=_2A25yxxCtDeRhGeNN61sR9i7EwjuIHXVuS7DlrDV6PUJbktCOLRejkW1NSU_KkAfUAvp5v5t2QXWNbJIujshJ8A-7; SSOLoginState=1606639870; ALF=1609231870'
    }
    # keywords = ['化工火灾', '化工中毒']
    # keywords = self.key_words
    date_end = datetime.datetime.strptime(str(datetime.date.today()), '%Y-%m-%d')  # 当前时间
    date_start = date_end - datetime.timedelta(days=5)  # 开始是前10天
    time_spread = datetime.timedelta(days=1)
    start_urls = []
    url_format = "https://weibo.cn/search/mblog?hideSearchFrame=&keyword={}&advancedfilter=1&starttime={}&endtime={}&sort=time&page=1"
    while date_start < date_end:
        next_time = date_start + time_spread * 2  # 一天一天的爬取
        start_urls.extend(
            [url_format.format(keyword, date_start.strftime("%Y%m%d"), next_time.strftime("%Y%m%d"))
             for keyword in keywords]
        )
        date_start = next_time

    print(start_urls)
    task_url = []
    for url in start_urls:
        response = requests.get(url=url, headers=headers, verify=False)
        response.encoding = "utf-8"
        task_url.append(response.url)
        if response.url.endswith('page=1'):
            # print(response.text)
            all_page = re.search(r'/>&nbsp;1/(\d+)页</div>', response.text)

            if all_page:
                all_page = all_page.group(1)
                all_page = int(all_page)
                for page_num in range(2, all_page + 1):
                    page_url = response.url.replace('page=1', 'page={}'.format(page_num))
                    task_url.append(page_url)

    for url in task_url:
        tweet_item_list = []
        time.sleep(1)
        # print(url)
        response = ''
        i = 0
        while(i < 100):
            response = requests.get(url=url, headers=headers)
            if response.status_code!=200:
                print(response.status_code)
                i = i + 1
                time.sleep(1)
                continue
            else:
                break
        response.encoding = "utf-8"
        if response.status_code == 200:
            # print(response.content)
            tree_node = etree.HTML(response.content)
            tweet_nodes = tree_node.xpath('//div[@class="c" and @id]')
            for tweet_node in tweet_nodes:
                try:
                    tweet_item = {}
                    tweet_repost_url = tweet_node.xpath('.//a[contains(text(),"转发[")]/@href')[0]
                    user_tweet_id = re.search(r'/repost/(.*?)\?uid=(\d+)', tweet_repost_url)
                    tweet_item['news_link'] = 'https://weibo.com/{}/{}'.format(user_tweet_id.group(2),
                                                                               user_tweet_id.group(1))
                    tweet_item['news_author'] = user_tweet_id.group(2)
                    # tweet_item['_id'] = user_tweet_id.group(1)
                    create_time_info_node = tweet_node.xpath('.//span[@class="ct"]')[-1]
                    create_time_info = create_time_info_node.xpath('string(.)')
                    if "来自" in create_time_info:

                        tweet_item['news_timeStamp'] = int(
                            time.mktime(time.strptime(str(time_fix(create_time_info.split('来自')[0].strip())), "%Y-%m-%d %H:%M")))

                    else:
                        tweet_item['news_timeStamp'] = int(
                            time.mktime(time.strptime(str(time_fix(create_time_info.strip())), "%Y-%m-%d %H:%M")))

                    comment_link = [tweet_node.xpath('.//a[@class="cc"]')[i].get('href') for i in
                                    range(len(tweet_node.xpath('.//a[@class="cc"]')))][-1]
                    tweet_item['news_comments_link'] = comment_link
                    tweet_item['news_comments'] = getComments(comment_link)
                    tweet_item['news_site'] = 'www.weibo.com'

                    # print("hjsbgdisbidfbsuidbfusdfbui", tweet_item)
                    images = tweet_node.xpath('.//img[@alt="图片"]/@src')
                    if images:
                        tweet_item['image_url'] = images

                    videos = tweet_node.xpath('.//a[contains(@href,"https://m.weibo.cn/s/video/show?object_id=")]/@href')
                    if videos:
                        tweet_item['video_url'] = videos
                    repost_node = tweet_node.xpath('.//a[contains(text(),"原文评论[")]/@href')
                    if repost_node:
                        tweet_item['origin_weibo'] = repost_node[0]

                    all_content_link = tweet_node.xpath('.//a[text()="全文" and contains(@href,"ckAll=1")]')
                    if all_content_link:
                        all_content_url = base_url + all_content_link[0].xpath('./@href')[0]
                        print(all_content_url)
                    else:
                        tweet_html = etree.tostring(tweet_node, encoding='unicode')
                        tweet_item['news_content'] = extract_weibo_content(tweet_html)
                    tweet_item['exceptiondesc']='init'
                    # print(tweet_item)
                    yield tweet_item
                    # tweet_item_list.append(tweet_item)
                except Exception as e:
                    # print(e)
                    pass
        else:
            continue
    # return tweet_item_list

# for i in weibostarter(keywords = ['化工火灾', '化工中毒']):
#     print(i)
