# -*- coding: utf-8 -*-

import urllib.request
import json
import time
import random
import pymysql.cursors
import re

import sys
import io

sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="UTF-8")

# 定义要爬取的微博大V的微博ID
id = '1259110474'

# 设置代理IP
proxy_addr = "122.241.72.191:808"

# mysql 数据库连接
mysql_config = {
    'user': 'aiiread',
    'password': 'nLP4IgCu',
    'host': '127.0.0.1',
    'db': 'aiiread',
}

conn = pymysql.connect(**mysql_config)


# 定义页面打开函数
def use_proxy(url, proxy_addr):
    req = urllib.request.Request(url)
    req.add_header("User-Agent",
                   "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0")
    proxy = urllib.request.ProxyHandler({'http': proxy_addr})
    opener = urllib.request.build_opener(urllib.request.HTTPHandler)
    urllib.request.install_opener(opener)
    data = urllib.request.urlopen(req).read().decode('utf-8', 'ignore')
    return data


def get_news():
    """
    获取热门微博，并存入数据库
    :return:
    """
    containerid = '102803'
    url = 'https://m.weibo.cn/api/container/getIndex'
    page = 0

    while page < 2:
        weibo_url = url + '?containerid=' + containerid + \
                    '&openApp=0&since_id=' + str(page)
        print(weibo_url)
        try:
            data = use_proxy(weibo_url, proxy_addr)
            content = json.loads(data).get('data')
            cards = content.get('cards')
            if (len(cards) > 0):
                print('微博数' + str(len(cards)))
                for j in range(len(cards)):
                    # print("-----正在爬取第" + str(i) + "页，第" + str(j) + "条微博------")
                    card_type = cards[j].get('card_type')
                    if (card_type == 9):
                        mblog = cards[j].get('mblog')
                        page_info = mblog.get('page_info')
                        if page_info is not None:
                            if page_info.get('type') == 'video':  # 如何微博附带视频，跳过这条躲雨
                                continue
                        insert = {}
                        insert['url'] = cards[j].get('scheme')
                        insert['content'] = mblog.get('text')
                        pics = mblog.get('pics')
                        if pics is not None:
                            images = []
                            for pic in pics:
                                images.append(pic['url'].replace('orj360', 'large'))
                                images.append(pic['url'])
                            insert['main_image'] = images[0]
                            insert['content_images'] = ','.join(images)
                        else:
                            insert['main_image'] = ''
                            insert['content_images'] = ''

                        if 'href="/status' in insert['content']:  # 需要进一爬取全文
                            # print("需要进一爬取全文: ", mblog.get('id'))
                            status_data = use_proxy("https://m.weibo.cn/statuses/extend?id=" + mblog.get('id'),
                                                    proxy_addr)
                            status_data2 = json.loads(status_data).get('data')
                            insert['content'] = status_data2.get(
                                'longTextContent')

                        #  图片链接以'//' 开头的替换成 'http://'
                        insert['content'] = insert['content'].replace(
                            'src="//', 'src="http://')
                        insert['user'] = mblog.get(
                            'user').get('screen_name')  #
                        print(filter_tags(insert['content']))

                        # 如何没有图片或者文字内容过短，不采集
                        if len(insert['main_image']) <= 0 and len(insert['content']) < 20:
                            continue
                        with conn.cursor() as cursor:
                            pass
                            sql = "SELECT `id`  FROM `zzz_article` WHERE `wb_id`=%s"
                            cursor.execute(sql, (mblog.get('id')))
                            already_exist = cursor.fetchone()

                            if already_exist is None:  # 如何没有爬过就插到数据库
                                try:
                                    with conn.cursor() as cursor:
                                        now_time_unix = int(time.time())
                                        sql = "INSERT INTO `zzz_article` (`author`, `category_id`,`type`,`user_id`, `title`," \
                                              " `content`, `content_images`,`main_image`, `createtime`, `updatetime`, `sendtime`, `status`, `wb_id`) values ( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s )"
                                        title = filter_tags(insert['content'])
                                        if len(title) > 20:
                                            title = title[:20] + '...'
                                        cursor.execute(sql,
                                                       (insert['user'], 5, 3, 1, title,
                                                        insert['content'], insert['content_images'],
                                                        insert['main_image'],
                                                        now_time_unix,
                                                        now_time_unix, now_time_unix,
                                                        1,
                                                        mblog.get('id')
                                                        ))

                                    conn.commit()
                                except Exception as mysql_error:
                                    print(mysql_error)

            page = page + 1
            print(page, weibo_url)
            time.sleep(2 + random.random())
        except Exception as e:
            page = page + 1
            print(e)
            pass


def filter_tags(htmlstr):
    re_cdata = re.compile('//<!\[CDATA\[[^>]*//\]\]>', re.I)  # 匹配CDATA
    re_script = re.compile(
        '<\s*script[^>]*>[^<]*<\s*/\s*script\s*>', re.I)  # Script

    re_style = re.compile(
        '<\s*style[^>]*>[^<]*<\s*/\s*style\s*>', re.I)  # style
    re_br = re.compile('<br\s*?/?>')  # 处理换行
    re_h = re.compile('</?\w+[^>]*>')  # HTML标签
    re_comment = re.compile('<!--[^>]*-->')  # HTML注释
    s = re_cdata.sub('', htmlstr)  # 去掉CDATA
    s = re_script.sub('', s)  # 去掉SCRIPT
    s = re_style.sub('', s)  # 去掉style
    s = re_br.sub('\n', s)  # 将br转换为换行
    s = re_h.sub('', s)  # 去掉HTML 标签
    s = re_comment.sub('', s)  # 去掉HTML注释
    blank_line = re.compile('\n+')
    s = blank_line.sub('\n', s)
    s = replaceCharEntity(s)  # 替换实体
    return s


def replaceCharEntity(htmlstr):
    CHAR_ENTITIES = {'nbsp': ' ', '160': ' ',
                     'lt': '<', '60': '<',
                     'gt': '>', '62': '>',
                     'amp': '&', '38': '&',
                     'quot': '"', '34': '"', }
    re_charEntity = re.compile(r'&#?(?P<name>\w+);')
    sz = re_charEntity.search(htmlstr)
    while sz:
        entity = sz.group()  # entity全称，如&gt;
        key = sz.group('name')  # 去除&;后entity,如&gt;为gt
        try:
            htmlstr = re_charEntity.sub(CHAR_ENTITIES[key], htmlstr, 1)
            sz = re_charEntity.search(htmlstr)
        except KeyError:
            htmlstr = re_charEntity.sub('', htmlstr, 1)
            sz = re_charEntity.search(htmlstr)
    return htmlstr


if __name__ == "__main__":
    # print(2 + random.random())
    get_news()

    # file = id + ".txt"
    # get_userInfo(id)
    # get_weibo(id, file)
