import datetime
import re
import time

import requests
from lxml import etree
from retrying import retry

from utils.save_to_mysql import Tool
from utils.sp import split


class Spider(object):
    def __init__(self):
        self.headers = {
            'cookie': 'SUB=_2A25wgNEvDeRhGeFM4lcR-CnKwjWIHXVTiv9nrDV6PUJbkdAKLXTckW1NQIzuTxQc5p6eLJJ2RyPNMEdQF2qXOgM7; SUHB=0OM04wZRrZDkdn; SCF=ArTT8BnCEe0kyWO-kBGWD9IgiixwW5Csp0juzjweccPVCRykqR5SRZhGAbO53aunHxVvyj3ZHvr5e50s8WIL9vA.; _T_WM=73872721495; SSOLoginState=1570519941; ALF=1573111941',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'
        }
        self.num = 0
        self.mysql = Tool()
        self.sen_list = self.mysql.get_sen()
        pass

    @retry(stop_max_attempt_number=3)
    def get_sexy_avatar(self, url):
        resp = requests.get(url=url, headers=self.headers)
        resp = etree.HTML(resp.content.decode().replace('<?xml version="1.0" encoding="UTF-8"?>', ''))
        sexy = '男' if ['男'] == ['男' for i in resp.xpath('//div[@class="ut"]//text()') if '男' in i] else '女'
        try:
            avatar = resp.xpath('//img[@alt="头像"]/@src')[0]
        except Exception:
            avatar = ''
        return sexy, avatar

    def parse_time(self, t):
        if '今天' in t:
            t_data = datetime.datetime.now().strftime('%Y-%m-%d') + ' ' + re.findall(r'(\d+:\d+)', t)[0]
            return t_data
        if '分钟前' in t:
            min = re.findall(r'(\d+)分钟前', t)[0]
            t_data = (datetime.datetime.now() - datetime.timedelta(minutes=int(min))).strftime('%Y-%m-%d %H:%M:%S')
            return t_data
        if '月' in t and '日' in t:
            t_data = datetime.datetime.now().strftime('%Y') + '-' + re.findall(r'(\d+)月\d', t)[0] + '-' + \
                     re.findall(r'(\d+)日', t)[0] + ' ' + re.findall(r'(\d{2}:\d{2})', t)[0]
            return t_data
        else:
            return re.findall(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}', t)[0]

    def save_item(self, item, planid):
        self.num += 1
        l, item['sentiment'], item['sentiment_score'] = split(item['content'])
        item['sen'] = 0
        for w in l:
            if l[0] in self.sen_list:
                item['sen'] = 1
                break

        item['category'] = '微博'
        item['gender'], item['avatar'] = self.get_sexy_avatar(item['userid'])
        item['create_date'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        self.mysql.save_weibo(item)
        self.mysql.save_relation(planid, item['post_id'])

    def parse_item(self, obj, wd, planid):
        if len(obj.xpath('./div')) == 1:
            # 普通
            item = {}
            item['keyword'] = wd
            item['post_id'] = obj.xpath('./@id')[0]
            item['source'] = obj.xpath('./div/a[1]/text()')[0]
            item['userid'] = obj.xpath('./div/a[1]/@href')[0]
            item['content'] = ''.join(obj.xpath('./div/span[@class="ctt"]//text()'))

            ret_list = obj.xpath('./div//a/text()')
            for i in ret_list:
                if '赞' in i:
                    item['like_num'] = i.replace('赞[', '').replace(']', '')
                if '转发' in i:
                    item['share_num'] = i.replace('转发[', '').replace(']', '')
                if '评论' in i:
                    item['comment_num'] = i.replace('评论[', '').replace(']', '')
            t = obj.xpath('./div/span[@class="ct"]/text()')[0]
            item['post_date'] = self.parse_time(t)
            self.save_item(item, planid)
        if len(obj.xpath('./div')) == 2:
            # 图文
            if len(obj.xpath('./div[1]/span')) == 2:
                # 转发
                item = {}
                item['keyword'] = wd
                item['post_id'] = obj.xpath('./@id')[0]
                item['source'] = obj.xpath('./div[1]/a[1]/text()')[0]
                item['userid'] = obj.xpath('./div[1]/a[1]/@href')[0]
                item['content'] = obj.xpath('./div[2]/text()')[0] + ' 转发 ' + ''.join(
                    obj.xpath('./div[1]/span[@class="ctt"]//text()'))

                ret_list = obj.xpath('./div[2]//a/text()')
                for i in ret_list:
                    if '赞' in i:
                        item['like_num'] = i.replace('赞[', '').replace(']', '')
                    if '转发' in i:
                        item['share_num'] = i.replace('转发[', '').replace(']', '')
                    if '评论' in i:
                        item['comment_num'] = i.replace('评论[', '').replace(']', '')
                t = obj.xpath('./div[2]/span[@class="ct"]/text()')[0]
                item['post_date'] = self.parse_time(t)
                self.save_item(item, planid)

            else:
                # 图文
                item = {}
                item['keyword'] = wd
                item['post_id'] = obj.xpath('./@id')[0]
                item['source'] = obj.xpath('./div[1]/a[1]/text()')[0]
                item['userid'] = obj.xpath('./div[1]/a[1]/@href')[0]
                item['content'] = ''.join(obj.xpath('./div[1]/span[@class="ctt"]//text()'))
                ret_list = obj.xpath('./div[2]//a/text()')
                for i in ret_list:
                    if '赞' in i:
                        item['like_num'] = i.replace('赞[', '').replace(']', '')
                    if '转发' in i:
                        item['share_num'] = i.replace('转发[', '').replace(']', '')
                    if '评论' in i:
                        item['comment_num'] = i.replace('评论[', '').replace(']', '')
                t = obj.xpath('./div[2]/span[@class="ct"]/text()')[0]
                item['post_date'] = self.parse_time(t)
                self.save_item(item, planid)
        if len(obj.xpath('./div')) == 3:
            # 图文转发
            item = {}
            item['keyword'] = wd
            item['post_id'] = obj.xpath('./@id')[0]
            item['source'] = obj.xpath('./div[1]/a[1]/text()')[0]
            item['userid'] = obj.xpath('./div[1]/a[1]/@href')[0]
            item['content'] = obj.xpath('./div[3]/text()')[0] + ' 转发 ' + ''.join(
                obj.xpath('./div[1]/span[@class="ctt"]//text()'))
            ret_list = obj.xpath('./div[3]//a/text()')
            for i in ret_list:
                if '赞' in i:
                    item['like_num'] = i.replace('赞[', '').replace(']', '')
                if '转发' in i:
                    item['share_num'] = i.replace('转发[', '').replace(']', '')
                if '评论' in i:
                    item['comment_num'] = i.replace('评论[', '').replace(']', '')
            t = obj.xpath('./div[3]/span[@class="ct"]/text()')[0]
            item['post_date'] = self.parse_time(t)
            self.save_item(item, planid)

    def search(self, wd, max_pn, planid):
        pn = 1
        while 1:
            if pn >= max_pn:
                break
            url = 'https://weibo.cn/search/mblog?hideSearchFrame=&keyword={}&page={}'.format(wd, pn)
            pn += 1
            resp = requests.get(url=url, headers=self.headers)
            if resp.status_code != 200:
                raise BaseException
            resp = etree.HTML(resp.content.decode().replace('<?xml version="1.0" encoding="UTF-8"?>', ''))
            ret_list = [i for i in resp.xpath('//div[@class="c"]') if i.xpath('./@id') != []]
            for i in ret_list:
                self.parse_item(i, wd, planid)

    def run(self):
        word_list = self.mysql.get_word_list()
        max_pn = 10
        for w in word_list:
            word = w['keyword']
            log_item = {}
            log_item['start_time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            self.num = 0
            self.search(word, max_pn, w['planid'])
            log_item['finish_time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            log_item['crawl_count'] = self.num
            log_item['crawl_state'] = 'OK'
            log_item['planid'] = w['planid']
            self.mysql.save_log(log_item)


if __name__ == '__main__':
    s = Spider()
    # s.search('蔡徐坤', 10)
    while 1:
        s.run()
        print(s.num)
        time.sleep(60)
    # print(s.get_sexy('https://weibo.cn/u/1989534434'))
