#!/usr/bin/env python
# -*- coding: UTF-8 -*-

import codecs
import copy
import csv
import json
import os
import random
import re
import sys
import time
import datetime 
import traceback
from collections import OrderedDict

import requests
from lxml import etree
from tqdm import tqdm
from requests.adapters import HTTPAdapter


class Weibo(object):
    def __init__(self, config):
        """Weibo类初始化"""
        self.base_url = "https://weibo.cn"
        self.cookie = {'Cookie': config['cookie']}       
        self.user = {}  # 存储爬取到的微博、用户信息
        
        self.contents = [] 
        self.urls = []      
        time_spread = datetime.timedelta(days=1)
        # https://weibo.cn/search/mblog?hideSearchFrame=&keyword=肺炎疫情&advancedfilter=1 &hasori=1 &starttime=20200206&endtime=20200207&sort=hot &hasv=1 &page=2
        url_format = "https://weibo.cn/search/mblog?hideSearchFrame=&keyword={}&advancedfilter=1&starttime={}&endtime={}{}&page=1"
        # 微博搜索的关键词，可以修改 
        # keywords = ["病毒", "新冠病毒", "冠状病毒", "新型冠状病毒", "2019-nCov", "医生", "疫情", "肺炎疫情", "新冠肺炎", "感染", "抗疫"] 
        keywords = ["SARS", "不明肺炎", "华南海鲜市场"]
        
        for query_type in ["", "&hasv=1"]:
            for keyword in keywords:
                # 12.30   1.23，2.8和3.18
                # 搜索的起始日期，可修改 微博的创建日期是2009-08-16 也就是说不要采用这个日期更前面的日期了
                date_start = datetime.datetime.strptime("2019-12-29", '%Y-%m-%d')
                # 搜索的结束日期，可修改
                date_end = datetime.datetime.strptime("2019-12-30", '%Y-%m-%d')
                while date_start < date_end:
                    next_time = date_start + time_spread
                    if  query_type == "" or (query_type == "&hasv=1" and len(keyword) == 2): 
                        url = url_format.format(keyword, date_start.strftime("%Y%m%d"), next_time.strftime("%Y%m%d"), query_type)
                    self.urls.append(url)
                    date_start = next_time
        print("contain {0}urls".format(len(self.urls)))


    def handle_html(self, url):
        """处理html"""
        try:
            kv = {'user-agent':"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52"}
            response = requests.get(url, headers = kv, cookies=self.cookie)
            # gbk encoding error
            # response.encoding = 'gbk'   
            html = response.content
            selector = etree.HTML(html)
            return selector
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()


    # def handle_garbled(self, info):
        # """处理乱码"""
        # try:
            # info = (info.xpath('string(.)').replace(u'\u200b', '').encode(
                # sys.stdout.encoding, 'ignore').decode(sys.stdout.encoding))
            # return info
        # except Exception as e:
            # print('Error: ', e)
            # traceback.print_exc()

      
    def get_other_info(self, user_id):
        """获取用户昵称"""
        try:
            keys = ["nickname", "gender", "province", "city", "brief_intro", 'birthday', \
                    "vip_level", "authentication", "labels"]
            other_info = {key:"" for key in keys}

            url = 'https://weibo.cn/%s/info' % (user_id)
            selector = self.handle_html(url)
            if selector:
                text1 = ";".join(selector.xpath('body/div[@class="c"]//text()'))  # 获取标签里的所有text()
                #print(text1[:100])
                nickname = re.findall('昵称;?[：:]?(.*?);', text1)
                gender = re.findall('性别;?[：:]?(.*?);', text1)
                place = re.findall('地区;?[：:]?(.*?);', text1)
                briefIntro = re.findall('简介;?[：:]?(.*?);', text1)
                birthday = re.findall('生日;?[：:]?(.*?);', text1)
                vip_level = re.findall('会员等级;?[：:]?(.*?);', text1)
                authentication = re.findall('认证;?[：:]?(.*?);', text1)
                labels = re.findall('标签;?[：:]?(.*?)更多>>', text1)
            
                if nickname and nickname[0]:
                    other_info["nickname"] = nickname[0].replace(u"\xa0", "")
                    
                if gender and gender[0]:
                    other_info["gender"] = gender[0].replace(u"\xa0", "")
                    
                if place and place[0]:
                    place = place[0].replace(u"\xa0", "").split(" ")
                    other_info["province"] = place[0]            
                if len(place) > 1:
                    other_info["city"] = place[1]
                    
                if briefIntro and briefIntro[0]:
                    other_info["brief_intro"] = briefIntro[0].replace(u"\xa0", "")
                    
                if birthday and birthday[0]:
                    other_info['birthday'] = birthday[0]
                                   
                if vip_level and vip_level[0]:
                    other_info["vip_level"] = vip_level[0].replace(u"\xa0", "")
                    
                if authentication and authentication[0]:
                    other_info["authentication"] = authentication[0].replace(u"\xa0", "")
                    
                if labels and labels[0]:
                    other_info["labels"] = labels[0].replace(u"\xa0", ",").replace(';', '').strip(',')
                            
                if nickname == u'登录 - 新' or nickname == u'新浪':
                    self.write_log()
                    sys.exit(u'cookie错误或已过期,请按照README中方法重新获取')
                return other_info
            else:
                print("Nonetype! get_other_info fail!")
                return {}            
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()


    def get_user_info(self, selector, user_id):
        """获取用户id、昵称、微博数、关注数、粉丝数"""
        try: 
            keys = ["nickname", "gender", "province", "city", "brief_intro", 'birthday', \
                    "vip_level", "authentication", "labels"]
            other_info  = self.get_other_info(user_id)   # 有可能是空字典
            for key in keys:
                self.user[key] = other_info.get(key, "") # 先初始化为空字符
                      
            if not selector:
                print("get user info fail at weibo info page!")
                return None
            user_info = selector.xpath("//div[@class='tip2']/*/text()")
            # print(user_info)
            weibo_num = int(user_info[0][3:-1])
            following = int(user_info[1][3:-1])
            followers = int(user_info[2][3:-1])
            
            self.user['weibo_num'] = weibo_num
            self.user['following'] = following
            self.user['followers'] = followers
            self.user['user_id'] = user_id
            #print(self.user)
            return self.user
            
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()


    def get_weibo_info(self, user_id):
        """获取微博信息"""
        try:
            url = 'https://weibo.cn/u/{0}'.format(user_id)
            self.user = {} #每次初始化
            selector = self.handle_html(url)
            print("\n爬取用户{0} info".format(user_id))
            return self.get_user_info(selector, user_id)  
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()          


    def extract_weibo_content(self, weibo_html):
        s = weibo_html  # 提取微博文本
        keyword_re = re.compile('<span class="kt">|</span>|原图|<!-- 是否进行翻译 -->|')
        emoji_re = re.compile('<img alt="|" src="//h5\.sinaimg(.*?)/>')
        white_space_re = re.compile('<br />')
        div_re = re.compile('</div>|<div>')
        image_re = re.compile('<img(.*?)/>')
        url_re = re.compile('<a href=(.*?)>|</a>')

        if '转发理由' in s:
            s = s.split('转发理由:', maxsplit=1)[1]
        if 'class="ctt">' in s:
            s = s.split('class="ctt">', maxsplit=1)[1]
        s = s.split('赞', maxsplit=1)[0]
        s = keyword_re.sub('', s)
        s = emoji_re.sub('', s)
        s = url_re.sub('', s)
        s = div_re.sub('', s)
        s = image_re.sub('', s)
        if '<span class="ct">' in s:
            s = s.split('<span class="ct">')[0]
        s = white_space_re.sub(' ', s)
        s = s.replace('\xa0', '')
        s = s.strip(':')
        s = s.strip()
        return s
           
     
    def parse(self, url, index):
        # 根据一个关键字，得到的请求页面
        # "https://weibo.cn/search/mblog?hideSearchFrame=&keyword={}&advancedfilter=1&starttime={}&endtime={}&sort=time&page=1"

        if url.endswith('page=1'):
            # 如果是第1页，递归地获取后面的所有页
            #all_page = re.search(r'/>&nbsp;1/(\d+)页</div>', response.text)
            print("index ", index)
            all_page = 100
            if all_page:
                for page_num in range(2, all_page + 1):
                    page_url = url.replace('page=1', 'page={}'.format(page_num))
                    time.sleep(0.1)
                    self.parse(page_url, index)
        print("parse page ", url[-7:])
        """
        解析本页的数据, 等价于master版本的递归式的抓取每一页的所有用户微博parse_tweet
        这里被用于抓取所查询的关键词的每一页，将关键词类比于一个用户
        """
        tree_node = self.handle_html(url)
        if not tree_node:
            print("no tree_node\n")
            return None
        tweet_nodes = tree_node.xpath('//div[@class="c" and @id]')
        
        content = []
        for tweet_node in tweet_nodes:
            # one page contain 10 node，每一个节点代表一个tweet
            try:
                keys = ["_id", "weibo_url", "user_id", "content"] # _id, weibo_url, user_id, content
                tweet_item = {key:"" for key in keys}
                # tweet_item['crawl_time'] = int(time.time())
                
                tweet_repost_url = tweet_node.xpath('.//a[contains(text(),"转发[")]/@href')[0]
                user_tweet_id = re.search(r'/repost/(.*?)\?uid=(\d+)', tweet_repost_url)
                tweet_item['weibo_url'] = 'https://weibo.com/{}/{}'.format(user_tweet_id.group(2),
                                                                           user_tweet_id.group(1))
                tweet_item['user_id'] = user_tweet_id.group(2)
                tweet_item['_id'] = '{}_{}'.format(user_tweet_id.group(2), user_tweet_id.group(1))

                # 检测由没有阅读全文:
                all_content_link = tweet_node.xpath('.//a[text()="全文" and contains(@href,"ckAll=1")]')
                if all_content_link:
                    all_content_url = self.base_url + all_content_link[0].xpath('./@href')[0]

                    # 有阅读全文的情况，获取全文
                    tree_node = self.handle_html(all_content_url)
                    content_node = tree_node.xpath('//*[@id="M_"]/div[1]')[0]
                    tweet_html = etree.tostring(content_node, encoding='unicode')
                    tweet_item['content'] = self.extract_weibo_content(tweet_html)
                else:
                    tweet_html = etree.tostring(tweet_node, encoding='unicode')
                    tweet_item['content'] = self.extract_weibo_content(tweet_html)
                    
                # _id, weibo_url, user_id, content                
                # 用户资料页 
                user_info = self.get_weibo_info(tweet_item['user_id'])
                if not user_info:
                    continue
                tweet_item.update(user_info)
                content.append(tweet_item)                                           
            except Exception as e:
                self.logger.error(e)
                
        self.contents.extend(content) # 将数据存储到对象中
        page_num = url[url.find("page=")+5:]
        item_num = len(self.contents)
        if page_num in [50, 80] and item_num >= (300*index):
            self.write_info(self.seive(), "tweet_item" + str(index) + "_" + str(item_num) + ".csv")
        print("Contain {0} tweets".format(item_num))

        
    def write_log(self):
        """当程序因cookie过期停止运行时，将相关信息写入log.txt"""
        file_dir = os.path.split(
            os.path.realpath(__file__))[0] + os.sep + 'weibo' + os.sep
        if not os.path.isdir(file_dir):
            os.makedirs(file_dir)
        file_path = file_dir + 'log.txt'
        content = u'cookie已过期，从%s到今天的微博获取失败，请重新设置cookie\n' 
        with open(file_path, 'ab') as f:
            f.write(content.encode(sys.stdout.encoding))


    def write_info(self, users_content, file_name):
        """将爬取的信息写入csv文件"""
        try:
            result_headers = list(users_content[0].keys()) # csv表格的header
            file_path = os.path.split(os.path.realpath(__file__))[0] + os.sep + file_name

            with open(file_path, 'w', newline='', encoding="utf-8") as f:
                writer = csv.writer(f)
                writer.writerow(result_headers)
                # csv表格的body部分
                for i in range(len(users_content)):
                    writer = csv.writer(f)
                    result_data = [users_content[i][key] for key in result_headers]
                    writer.writerow(result_data)
                f.close()
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()


    def seive(self):
        if self.contents:
            container = []
            for item in self.contents:
                if item not in container:
                    container.append(item)
            # 过滤重复数据（在数据库中就不存在这个问题）
            self.contents = container
            return self.contents
            

    def start(self):
        """运行爬虫"""
        try:      
            cnt = 0
            index = 1
            threshold = [800, 1600, 2000, 2600, 3200, 4000, 4500]
            num = len(threshold)
            base_name = "users_contents"
            
            for url in self.urls:
                # 获得每一个微博及用户的信息 还是用数据库好
                self.parse(url, index)
                time.sleep(0.3)
                print("\n")
                index += 1
                if cnt < num-1 and len(self.contents) > threshold[cnt]:
                    self.write_info(self.seive(), base_name + str(cnt) + ".csv") 
                    cnt += 1 # upgrade
            #self.write_info(self.contents, "users_contents.csv")
            self.write_info(self.seive(), "users_contents_plus.csv")
            print("Download all infos!")
        except Exception as e:
            print('Error: ', e)
            traceback.print_exc()


def main():
    try:
        config_path = os.path.split(
            os.path.realpath(__file__))[0] + os.sep + 'config.json'
        if not os.path.isfile(config_path):
            sys.exit(u'当前路径：%s 不存在配置文件config.json' %
                     (os.path.split(os.path.realpath(__file__))[0] + os.sep))
        with open(config_path) as f:
            try:
                config = json.loads(f.read())
            except ValueError:
                sys.exit(u'config.json 格式不正确，请参考 ')
        wb = Weibo(config)
        wb.start()  # 爬取微博信息
    except Exception as e:
        print('Error: ', e)
        traceback.print_exc()


if __name__ == '__main__':
    main()
