import requests
import time
import random
import csv
import pymongo
import os


# pool = Pool(5)  # 创建拥有5个进程数量的进程池
# pool.map(get_detail, ids)
# pool.close()  # 关闭进程池，不再接受新的进程
# pool.join()  # 主进程阻塞等待子进程的退出
# db.weibo_.find().count();

class Weibo():
    number=1
    count = 0
    name = None
    id=None
    crawl_num=0
    def InputUser(self):
        self.name = input("请输入您要爬取的微博的用户名\n")
        self.crawl_num=int(input("请输入您要爬取的微博数量\n"))
        base_url = 'https://m.weibo.cn/api/container/getIndex?containerid=100103type%3D1%26q%3D{}'.format(self.name) \
                   + '&page_type=searchall'
        return base_url

    # 根据输入的用户名查询对应的uid
    def QueryUser(self, base_url):
        try:
            response = requests.get(base_url, timeout=5)
            # 如果状态码异常再请求一次
            if response.status_code != 200:
                response = requests.get(base_url, timeout=5)
            data = response.json()
        except  Exception as e:
            print("网络异常！\n")
        try:
            # 因为大V号、个人号、机构号在json中的位置不一样
            if 'card_group' in data['data']['cards'][0]:
                self.name = data['data']['cards'][0]['card_group'][0]['users'][0]['screen_name']
                self.id = data['data']['cards'][0]['card_group'][0]['users'][0]['id']
                print("查找到个人号" + self.name)

            elif 'user' in data['data']['cards'][1]['card_group'][0]:
                self.name = data['data']['cards'][1]['card_group'][0]['user']['screen_name']
                self.id = data['data']['cards'][1]['card_group'][0]['user']['id']
                print("查找到大v号" + self.name)

            elif 'users' in data['data']['cards'][1]['card_group'][0]:
                self.name = data['data']['cards'][1]['card_group'][0]['users'][0]['name']
                self.id = data['data']['cards'][1]['card_group'][0]['users'][0]['id']
                print("查找到个人号" + self.name)
        except  Exception as e:
            print("获取用户id失败\n")
        print("即将爬取{}条数据\n".format(self.crawl_num))

    def CrawlDetail(self):
        # 根据分析微博接口参数containerid=107603+用户id
        url = "https://m.weibo.cn/api/container/getIndex?uid={}" \
              "&luicode=10000011&lfid=100103type%3D1%26q%3D%E4%B8%8A%E5%B8%9D" \
              "&containerid={}&".format(self.id, "107603" + str(self.id))
        params = {
            'since_id': None
        }
        # 保存微博的列表
        weibo_ = []
        # 因为是爬取接口速度非常快
        # 没有since_id也无法爬取其他页面。所以多线程之类用不上
        while (True):
            time.sleep(random.randint(5, 50) * 0.1)
            try:
                response = requests.get(url, params=params, timeout=5).json()
                # 网络错误再执行一次
                if 'cardlistInfo' not in response['data']:
                    response = requests.get(url, params=params, timeout=5).json()
                # 根据分析,since_id为空时爬取微博结束
                if ('since_id' not in response['data']['cardlistInfo']):
                    # self.InsertMondb(weibo_)
                    self.CreateCsv(weibo_)
                    break
                params['since_id'] = response['data']['cardlistInfo']['since_id']
                length = len(response['data']['cards'])
            except Exception as e:
                print(repr(e))
            try:
                for l in range(self.number, length):
                    # 因为视频url和图片url不是模板，
                    # 需要增加if判断。这里只选取最基本的字段
                    items = {}
                    # 用户对应的id
                    items['id'] = self.id
                    # 用户名
                    items['name'] = self.name
                    # 发布时间
                    items['time'] = response['data']['cards'][l]['mblog']['created_at']
                    # 微博文本内容
                    text = response['data']['cards'][l]['mblog']['text']
                    #文本中有换行符，进行清洗
                    items['text']=text.replace("\n","")
                    # 发布所用客户端
                    items['source'] = response['data']['cards'][l]['mblog']['source']
                    # 点赞数
                    items['attitudes_count'] = response['data']['cards'][l]['mblog']['attitudes_count']
                    # 评论数
                    items['comments_count'] =response['data']['cards'][l]['mblog']['comments_count']
                    # 转发数
                    items['reposts_count'] = response['data']['cards'][l]['mblog']['reposts_count']
                    # 微博详情页
                    items['detail'] = response['data']['cards'][l]['scheme']
                    #print(items)
                    weibo_.append(items)
                    self.count = self.count + 1
                    print("已经爬取了{}条数据\n".format(str(self.count)))
                    if (self.count > self.crawl_num-1):
                        self.CreateCsv(weibo_)
                        return
            except Exception as e:
                print(repr(e))
            self.number = 0
            if weibo_.__len__() > 200:
                # self.InsertMondb(weibo_)
                # weibo_ = []
                self.CreateCsv(weibo_)
                weibo_=[]

    def InsertMondb(self, weibo_):
        try:
            connection = pymongo.MongoClient('mongodb://localhost:27017/')
            db = connection.weibo
        except:
            print("连接数据库失败！\n")
        try:
            db.temp.insert_many(weibo_)
            print("保存爬取" + str(self.count) + "条数据\n")
        except  Exception as e:
            print(e)
            print("插入数据库失败\n")

    def CreateCsv(self, weibo_):
        # 如果文件夹不存在，则创建文件夹
        path = "D:/CrawWeiboData/"
        isExists = os.path.exists(path)
        if isExists == False:
            os.mkdir("D:/CrawWeiboData/")
        # 如果文件第一次打开,w方法
        keyword_list = ['id', 'name', 'time', 'text', 'source', 'attitudes_count', 'comments_count', 'reposts_count',
                        'detail']
        # 如果csv不存在
        path = path + self.name + ".csv"
        print("已经保存" + str(self.count) + "条数据\n")
        if not os.path.exists(path):
            # newline='' 去除空白行
            with open(path, "w", newline='', encoding='utf-8') as csvfile:
                # fieldnames 表头名字
                writer = csv.DictWriter(csvfile, fieldnames=keyword_list)
                writer.writeheader()
                writer.writerows(weibo_)  # 写入数据
        else:
            # 如果csv存在追加数据
            with open(path, "a", newline='', encoding='utf-8') as csvfile:
                writer = csv.DictWriter(csvfile, fieldnames=keyword_list)  # 提前预览列名，当下面代码写入数据时，会将其一一对应。
                writer.writerows(weibo_)  # 写入数据
            print("正在写入数据！！！\n")


if __name__ == '__main__':
    weibo = Weibo()

    id = weibo.QueryUser(weibo.InputUser())
    if weibo.name != None:
        weibo.CrawlDetail()
        print("爬取完毕，总共爬取了" + str(weibo.count) + "条数据")
        print("已经保存到D盘CrawWeiboData文件夹中,如果csv乱码，请用记事本打开\n")
        print("即将自动关闭窗口")
        time.sleep(10)
    else:
        pass
