# -*- coding: UTF-8 -*-
import datetime
import json
import logging
import requests
import scrapy
from scrapy.spiders import Spider
import time
from taoguba_comment_spider.tfidf import TFIDF
import urllib
import urllib.request
import lxml.html as HTML
from taoguba_comment_spider.DB_connetion_pool import PTConnectionHelper


class GubaZhutiCommentSpider(Spider):
    name = "GubaZhutiCommentSpider"
    custom_settings = {
        "ITEM_PIPELINES": {
            'scrapyelasticsearch.scrapyelasticsearch.ElasticSearchPipeline': 500
        },
        "ELASTICSEARCH_SERVERS": ['http://211.159.152.115:9200'],
        "ELASTICSEARCH_INDEX": 'diggold_guba_zhuti_index_v6',
        "ELASTICSEARCH_TYPE": 'item',
        "ELASTICSEARCH_BUFFER_LENGTH": 1,
        "ELASTICSEARCH_UNIQ_KEY": 'did',
        "DOWNLOAD_DELAY": 15,
        "CONCURRENT_REQUESTS": 100,
        "LOG_LEVEL": 'DEBUG'
    }

    # 全局配置
    global cfg
    cfg = {'userId': '', 'userPwd': '', 'cookiejar': '', 'cookies': '', 'UA': '', 'ip': '',
           'index': 0}

    def start_requests(self):
        self.log("--------GubaZhutiCommentSpider 开启程序--------", logging.INFO)
        cfg['UA'] = "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0"
        stock = self.getStock()
        request = scrapy.http.Request(stock['url'],
                                      meta=stock,
                                      callback=self.parse)
        request.headers.setdefault('User-Agent', cfg['UA'])
        yield request

    def parse(self, response):
        page = int(response.meta['page'])
        self.log("----------------------parse----------------------" + time.strftime('%Y-%m-%d %X', time.localtime()), logging.INFO)
        if response.status == 200:
            self.log("----------------------parse----------------------" + time.strftime('%Y-%m-%d %X', time.localtime()), logging.INFO)
            current_url = str(response.meta['url'])
            self.log("------------------------url:" + current_url + "------symbol:" + str(response.meta['symbol']), logging.INFO)
            history_run = str(response.meta['history_run'])
            if page == 1:
                # 更新数据库history_run
                self.updateStockHistoryRun(str(response.meta['stock_id']), self.time_now().strftime("%Y-%m-%d %H:%M:%S"))
            next_page = False
            bodystr = str(response.body, 'utf-8')
            if bodystr != "":
                hjson = json.loads(bodystr)
                if (str(hjson["status"]).lower() == "true" or str(hjson["status"]) == "success") and 'dto' in hjson and 'dto' in hjson['dto']:
                    next_page = True
                    for record in hjson['dto']['dto']:
                        create_time_long = str(record['postDate'])
                        x = time.localtime(int(create_time_long)/1000)
                        create_time_str = time.strftime('%Y-%m-%d %H:%M:%S', x)
                        if not self.time_compare(create_time_str, history_run):
                            self.log("next_page:false", logging.INFO)
                            next_page = False
                            break
                        title = str(record['subject'])
                        did = str(response.meta['symbol']) + "_" + str(record['topicID']) + "_" + str(record['replyID'])
                        attitude = 0  # 微博赞数
                        repost = 0  # 微博转发数
                        comment = 0  # 统一 reply_count 评论数

                        userid = str(record['userID'])  # 新加
                        username = str(record['userName'])  # 新加
                        if 'imgUrl' in record:
                            headimg = "https://image.taoguba.com.cn/img/" + str(record['imgUrl'])  # 新加
                        else:
                            headimg = ''
                        img = str(record['images'])  # 微博图片
                        weibo_from = '来自论坛'
                        if str(record['topicType']) == 'R':
                            is_repost = '1'
                            total_weibo_url = 'https://www.taoguba.com.cn/Reply/' + str(record['topicID']) + '/' + str(record['replyID']) + '#' + str(record['replyID'])
                            result = self.getAllText(total_weibo_url, str(record['replyID']), 'R')
                            content = result['content']
                            div_html = result['div_html']
                        elif str(record['topicType']) == 'T':  # T
                            is_repost = '0'
                            total_weibo_url = 'https://www.taoguba.com.cn/Article/' + str(record['topicID']) + '/1'
                            result = self.getAllText(total_weibo_url, str(record['topicID']), 'T')
                            content = result['content']
                            div_html = result['div_html']
                        else:  # W
                            weibo_from = '来自淘说说'
                            total_weibo_url = 'https://shuo.taoguba.com.cn/weiboSingle?userID=' + userid + '&feedID=' + str(record['topicID'])
                            result = self.getAllText(total_weibo_url, str(record['topicID']), 'W')
                            content = result['content']
                            div_html = self.buildDivHtml(str(record['topicType']), str(record['replyID']), str(record['topicID']), headimg, userid, username, title, content, create_time_str)
                            is_repost = '0'
                        if content == '':
                            content = str(record['content'])
                            div_html = self.buildDivHtml(str(record['topicType']), str(record['replyID']), str(record['topicID']), headimg, userid, username, title, content, create_time_str)
                        insert_time = str(time.time()).split('.')[0]
                        try:
                            dic = self.getFans(userid)
                            total_count = dic['fans_count']+dic['zan_count']
                            self.log("total_count: "+str(total_count))
                        except:
                            total_count = 0
                        if total_count < 20000 and len(content) < 100:  # 粉丝数+赞数小于2万,内容小于100,跳过, 执行下一条
                            continue
                        score = self.score(len(content), total_count)
                        if score == 0:  # 得分为0,跳过, 执行下一条
                            continue
                        keyword = ''
                        tfidf = TFIDF()
                        for x, w in tfidf.extract_tags(content, 5, withWeight=True):
                            keyword = keyword + str(x) + "," + str(w) + "||"

                        item = CheckItem(did=did, content=content, img=img, attitude=attitude,
                                         create_time_str=create_time_str, repost=repost
                                         , comment=comment, total_weibo_url=total_weibo_url,
                                         weibo_from=weibo_from, is_repost=is_repost
                                         , original_author_name='',
                                         original_author_url='',
                                         repost_reason='',
                                         original_weibo_attitude='0',
                                         original_weibo_repost='0',
                                         original_weibo_comment='0', userid=userid
                                         , username=username
                                         , headimg=headimg, comment_content=''
                                         , title=title, create_time_long=create_time_long, data_source='taoguba',
                                         insert_time=insert_time, score=score, is_article=False, current_url=current_url, keyword=keyword, div_html=div_html)
                        yield item
            if next_page:
                page += 1
                url = self.buildUrl(response.meta["symbol"], page)
                request = scrapy.http.Request(url,
                                              meta={'url': url,
                                                    'stock_id': str(response.meta["stock_id"]), 'symbol': str(response.meta["symbol"]), 'page': str(page),
                                                    'history_run': history_run},
                                              callback=self.parse)
                request.headers.setdefault('User-Agent', cfg['UA'])
                yield request
            else:
                stock = self.getStock()
                request = scrapy.http.Request(stock['url'],
                                              meta=stock,
                                              callback=self.parse)
                request.headers.setdefault('User-Agent', cfg['UA'])
                yield request

    def getStock(self):
        while True:
            results = PTConnectionHelper.executeSql('select id,symbol,history_run from stocks_zhuti where status = 0 order by id ASC limit 1')
            if len(results) < 1:
                time.sleep(5)
                continue
            else:
                cfg['index'] += 1
            sql = "update stocks_zhuti set status = 1  where status = 0 and id = " + str(results[0][0])
            ib = PTConnectionHelper.commitSql(sql)
            if ib < 1:
                self.log("--------锁定第" + str(cfg['index']) + "个URL失败--------", logging.INFO)
                continue
            else:
                stock_id = str(results[0][0])
                symbol = str(results[0][1])
                url = self.buildUrl(symbol, 1)
                history_run = str(results[0][2])
                self.log(
                    "------锁定第" + str(cfg['index']) + "个URL成功------id:" + stock_id + "-----url:" + url,
                    logging.INFO)
                break
        return {'url': url, 'stock_id': stock_id, 'symbol': symbol, 'page': '1', 'history_run': history_run}

    @staticmethod
    def updateStockHistoryRun(a_id, now_run):
        sql = "update stocks_zhuti set history_run = '{0}'  where id = {1}".format(now_run, a_id)
        PTConnectionHelper.commitSql(sql)

    @staticmethod
    def time_now():
        return datetime.datetime.now() + datetime.timedelta(hours=8)

    def buildUrl(self, symbol, page):
        if symbol:
            url = "https://www.taoguba.com.cn/getDiscussion?keywordID=" + str(symbol).lower() + "&pageNo=" + str(page)
            return self.fileterUrl(url)
        else:
            self.log("symbol 为空.pageNo 为" + str(page), logging.ERROR)

    @staticmethod
    def fileterUrl(url):
        try:
            if "_=" in url:
                url = url[:url.index('_=')]
            else:
                if '?' in url:
                    url += "&"
                else:
                    url += "?"
            url = (url + "_=" + str(time.time()))
            return url
        except Exception as e:
            return url

    @staticmethod
    def buildDivHtml(rType, rId, topicID, headimg, userid, username, title, content, create_time_str):
        if rType == "R":
            return '<div class="stockNews"><div class="right" style="text-align: left;width:90%"><div id="forumTitle" class="left">跟帖了<a href="https://www.taoguba.com.cn/Article/' + topicID + '/1" target="_blank">' + title + '</a></div><div class="subInfo left">' + content + '&nbsp;&nbsp;&nbsp;&nbsp;<a href="https://www.taoguba.com.cn/Reply/' + topicID + '/' + rId + '#' + rId + '" target="_blank" style="color:#5193C7">(全文)</a><br><span class="newsDate">' + create_time_str + '  来自论坛</span></div></div><div class="clear"></div></div>'
        elif rType == "W":
            return '<div class="stockNews"><div class="right" style="text-align: left;width:90%"><div id="forumTitle" class="left"></div><div class="subInfo left">' + content + '&nbsp;&nbsp;&nbsp;&nbsp;</div></div><div class="clear"></div></div>'
        elif rType == "T":
            return '<div class="stockNews"><div class="right" style="text-align: left;width:90%"><div id="forumTitle" class="left">发布了<a href="https://www.taoguba.com.cn/Article/' + topicID + '/1" target="_blank">' + title + '</a></div><div class="subInfo left" >' + content + '&nbsp;&nbsp;&nbsp;&nbsp;<a href="https://www.taoguba.com.cn/Article/' + topicID + '/1" target="_blank" style="color:#5193C7">(全文)</a><br><span class="newsDate">' + create_time_str + '   来自论坛</span></div></div><div class="clear"></div></div>'
        else:
            return ""

    def getFans(self, userid):
        url = "https://www.taoguba.com.cn/getBlogerInfo?userID=" + userid

        url = self.fileterUrl(url)
        self.log("getFans ----" * 10)
        try:
            data = urllib.request.urlopen(url).read()
            z_data = data.decode('UTF-8')
            hjson = json.loads(z_data)
            fans_count = 0
            zan_count = 0
        except Exception as e:
            return {'fans_count': 0, 'zan_count': 0}
        if (str(hjson["status"]).lower() == "true" or str(hjson["status"]).lower() == "success") and 'dto' in hjson and 'fansNum' in hjson['dto'] and 'us' in hjson['dto']:
            if self.is_num_by_except(str(hjson['dto']['fansNum'])):
                fans_count = int(str(hjson['dto']['fansNum']))
            if self.is_num_by_except(str(hjson['dto']['us'])):
                zan_count = int(str(hjson['dto']['us']))
        self.log("url ----"*20)
        self.log("url:"+url)
        self.log('fans_count: '+str(fans_count))
        self.log('zan_count: ' + str(zan_count))
        return {'fans_count': fans_count, 'zan_count': zan_count}

    def getAllText(self, url, rId, t):
        cookie_value = self.getJSESSIONIDValue("https://shuo.taoguba.com.cn/getMsgCount?_=1495077360965")
        if t == 'R':
            text = self.getReplyText(url, rId, cookie_value)
        elif t == 'T':
            text = self.getArticleText(url, rId, cookie_value)
        else:
            text = self.getShuoShuoText(url, rId, cookie_value)
        self.log("getAllText end----" * 10)
        return text

    @staticmethod
    def getJSESSIONIDValue(url):
        cookies = {'UM_distinctid': '15bfa5c4ea2bd-0ee2d6c1ac234b-46524130-100200-15bfa5c4ea3290',
                   'bdshare_firstime': '1494554019920',
                   'tgbpwd': '9CC8D031D0Aanelab5ft18pwsh',
                   'tgbuser': '997270'}
        # 在Cookie Version 0中规定空格、方括号、圆括号、等于号、逗号、双引号、斜杠、问号、@，冒号，分号等特殊符号都不能作为Cookie的内容。
        r = requests.get(url, cookies=cookies, verify=False, timeout=3)
        return r.cookies['JSESSIONID']

    def getReplyText(self, url, rId, JSESSIONID_Value):
        cookies = {'UM_distinctid': '15bfa5c4ea2bd-0ee2d6c1ac234b-46524130-100200-15bfa5c4ea3290',
                   'bdshare_firstime': '1494554019920',
                   'tgbpwd': '9CC8D031D0Aanelab5ft18pwsh',
                   'tgbuser': '997270',
                   'JSESSIONID': JSESSIONID_Value}
        # 在Cookie Version 0中规定空格、方括号、圆括号、等于号、逗号、双引号、斜杠、问号、@，冒号，分号等特殊符号都不能作为Cookie的内容。
        try:
            r = requests.get(url, cookies=cookies, timeout=3)
            htree = HTML.fromstring(r.text)
            emts = htree.xpath("//title/text()")
            islogin = True
            result = ''
            div_html = ''
            for emt in emts:
                if emt == '淘股吧_登录' or emt == '错误页面_淘股吧':
                    islogin = False
                    break
            if islogin:
                emts2 = htree.xpath("//p[@id='reply" + rId + "']")
                if len(emts2) > 0:
                    result = str(emts2[0].text_content()).replace('\xa0', '').strip()
                else:
                    self.log("emts2 的长度为0")
                    self.log("getReplyText url:" + url)
                    self.log("getReplyText rId:" + rId)
                try:
                    div_html = r.text.split("<p id='reply" + rId + "'>")[1]
                    div_html = div_html.split('</div>')[0]
                    div_html = "<p>" + div_html
                except:
                    div_html = ''
            return {'content': result, 'div_html': div_html}
        except Exception as e:
            self.log("getReplyText 未处理异常", logging.ERROR)
            self.log("getReplyText url:" + url, logging.ERROR)
            self.log(e, logging.ERROR)
            return {'content': '', 'div_html': ''}

    def getArticleText(self, url, topicID, JSESSIONID_Value):
        cookies = {'UM_distinctid': '15bfa5c4ea2bd-0ee2d6c1ac234b-46524130-100200-15bfa5c4ea3290',
                   'bdshare_firstime': '1494554019920',
                   'tgbpwd': '9CC8D031D0Aanelab5ft18pwsh',
                   'tgbuser': '997270',
                   'JSESSIONID': JSESSIONID_Value}
        # 在Cookie Version 0中规定空格、方括号、圆括号、等于号、逗号、双引号、斜杠、问号、@，冒号，分号等特殊符号都不能作为Cookie的内容。
        try:
            r = requests.get(url, cookies=cookies, timeout=3)
            htree = HTML.fromstring(r.text)
            emts = htree.xpath("//title/text()")
            islogin = True
            result = ''
            div_html = ''
            for emt in emts:
                if emt == '淘股吧_登录' or emt == '错误页面_淘股吧':
                    islogin = False
                    break
            if islogin:
                emts2 = htree.xpath("//*[@id='first']")
                if len(emts2) > 0:
                    result = str(emts2[0].text_content()).replace('\xa0', '').strip()
                else:
                    self.log("emts2 的长度为0")
                    self.log("getArticleText url:" + url)
                    self.log("getArticleText topicID:" + topicID)
                try:
                    div_html = r.text.split('<!-- 主贴内容开始 -->')[1]
                    div_html = div_html.split('<!-- 主贴内容结束 -->')[0]
                except:
                    div_html = ''
            return {'content': result, 'div_html': div_html}
        except Exception as e:
            self.log("getArticleText 未处理异常", logging.ERROR)
            self.log("getArticleText url:" + url, logging.ERROR)
            self.log(e, logging.ERROR)
            return {'content': '', 'div_html': ''}

    def getShuoShuoText(self, url, rId, JSESSIONID_Value):
        cookies = {'UM_distinctid': '15bfa5c4ea2bd-0ee2d6c1ac234b-46524130-100200-15bfa5c4ea3290',
                   'bdshare_firstime': '1494554019920',
                   'tgbpwd': '9CC8D031D0Aanelab5ft18pwsh',
                   'tgbuser': '997270',
                   'JSESSIONID': JSESSIONID_Value}
        # 在Cookie Version 0中规定空格、方括号、圆括号、等于号、逗号、双引号、斜杠、问号、@，冒号，分号等特殊符号都不能作为Cookie的内容。
        try:
            r = requests.get(url, cookies=cookies, timeout=3)
            htree = HTML.fromstring(r.text)
            emts = htree.xpath("//title/text()")
            islogin = True
            result = ''
            for emt in emts:
                if emt == '淘股吧_登录' or emt == '错误页面_淘股吧':
                    islogin = False
                    break
            if islogin:
                emts2 = htree.xpath("//*[@id='hid_cont']")
                if len(emts2) > 0:
                    result = str(emts2[0].attrib['value']).replace('\xa0', '').strip()
                else:
                    self.log("emts2 的长度为0")
                    self.log("getShuoShuoText url:" + url)
                    self.log("getShuoShuoText rId:" + rId)
            return {'content': result}
        except Exception as e:
            self.log("getShuoShuoText 未处理异常", logging.ERROR)
            self.log("getShuoShuoText url:" + url, logging.ERROR)
            self.log(e, logging.ERROR)
            return {'content': ''}

    @staticmethod
    def time_compare(time_str, history_str):
        localtime1 = time.mktime(time.strptime(time_str, '%Y-%m-%d %H:%M:%S'))
        localtime2 = time.mktime(time.strptime(history_str, '%Y-%m-%d %H:%M:%S'))
        return (localtime1 - localtime2) > 0

    def score(self, content_len, fans_count):
        try:
            w1 = 0.3
            w2 = 0.7
            if content_len > 3000:
                content_score = 100
            elif content_len > 2000:
                content_score = 90
            elif content_len > 1500:
                content_score = 70
            elif content_len > 1000:
                content_score = 50
            elif content_len > 500:
                content_score = 30
            else:
                content_score = 10

            if fans_count > 40000:
                fans_score = 100
            elif fans_count > 20000:
                fans_score = 80
            elif fans_count > 10000:
                fans_score = 60
            elif fans_count > 5000:
                fans_score = 40
            elif fans_count > 1000:
                fans_score = 20
            else:
                return 0
            a = w1 * content_score + w2 * fans_score
            b = float('%0.2f' % a)
            return b
        except Exception as e:
            self.log(str(e), logging.ERROR)
            print(e)
            return 0

    @staticmethod
    def is_num_by_except(str_num):
        try:
            int(str_num)
            return True
        except ValueError:
            return False


class CheckItem(scrapy.Item):
    create_time_str = scrapy.Field()  # 时间
    create_time_long = scrapy.Field()  # 时间

    title = scrapy.Field()  # 赋值为'',题目
    is_repost = scrapy.Field()  # 是否是转发
    did = scrapy.Field()  # 统一 retweeteid,每行id
    comment_content = scrapy.Field()  # 赋值为''

    attitude = scrapy.Field()  # 微博赞数
    repost = scrapy.Field()  # 微博转发数
    comment = scrapy.Field()  # 统一 reply_count 评论数

    content = scrapy.Field()  # description

    userid = scrapy.Field()  # 新加
    username = scrapy.Field()  # 新加
    headimg = scrapy.Field()  # 新加

    img = scrapy.Field()  # 微博图片

    total_weibo_url = scrapy.Field()  # 微博全文地址
    weibo_from = scrapy.Field()  # 微博发自

    original_author_name = scrapy.Field()  # 原微博作者名字
    original_author_url = scrapy.Field()  # 原微博作者主页地址
    repost_reason = scrapy.Field()  # 微博转发理由
    original_weibo_attitude = scrapy.Field()  # 原微博赞数
    original_weibo_repost = scrapy.Field()  # 原微博转发数
    original_weibo_comment = scrapy.Field()  # 原微博评论数
    data_source = scrapy.Field()  # 新加 weibo/xueqiu
    score = scrapy.Field()  # 新加 热度得分
    insert_time = scrapy.Field()  # 入库时间
    is_article = scrapy.Field()  # 是否文章
    current_url = scrapy.Field()  # 当前抓取地址
    keyword = scrapy.Field()  # 当前关键词和权重
    div_html = scrapy.Field()  # html
