# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from evny.items import EvnyItem
from evny.langconv import *
from evny.settings import *
import hashlib
import time
import datetime
import re
from elasticsearch import Elasticsearch
from lxml import etree
def get_today():
    date = time.strftime('%Y-%m-%d',time.localtime())
    return date.replace('-0','-')
es = Elasticsearch(ES_URL)


class ForumSpider(CrawlSpider):
    name = 'nforum'
    allowed_domains = ['www.eyny.com']

    # 生成四个开始链接
    dic_map = {
    # '1018':'政治',
    # "1019":'社会',
    # "5626":'社论',
    "7305":'讨论'
    }
    start_urls = [
    'https://www.eyny.com/forum.php?mod=forumdisplay&fid=1724&filter=author&orderby=dateline&dateline=86400&typeid={}'.format(typeid) for typeid in dic_map.keys()
    ]
    # start_urls = ['https://www.eyny.com/thread-12091449-1-1.html']

    rules = (
        # 提取开始链接的翻页
        Rule(LinkExtractor(allow=r'&page=.{8}',restrict_xpaths='//div[@id="pgt"]'), follow=True),
        # 提取每一页中的帖子
        Rule(LinkExtractor(allow=r'forum\.php\?mod=viewthread&tid',restrict_xpaths='//*[text()="{}"]/../../..//*'.format(get_today())), follow=True),
        # Rule(LinkExtractor(allow=r'forum\.php\?mod=viewthread&tid',restrict_xpaths='//*[text()="2019-2-20"]/../../..//*'), follow=True),
        # 从帖子页面提取出thread链接,并进行处理(包括翻页)
        Rule(LinkExtractor(allow=r'thread-(.*)-(.*)-(.*)\.html',restrict_xpaths='//*[@id="thread_subject"] | //*[@id="pgt"]'), callback='parse_item', follow=True),
    )

    def start_requests(self):
        for url in self.start_urls:
            if 'thread' in url:
                yield scrapy.Request(url=url,cookies=COOKIES,callback=self.parse_item)
            else:
                yield scrapy.Request(url=url,cookies=COOKIES)

    def parse_item(self, response):
        print(response.url)
        category = Traditional2Simplified( response.xpath('//a[@id="thread_subject"]/../a[1]/text()').extract_first().lstrip('[').rstrip(']'))
        # first_page 第一页第一条为主贴
        first_page = int(re.findall(r'thread-.*-(.*)-.*\.html',response.url)[0])

        # 帖子id
        first_page_url = re.sub('thread-(.*)-(.*)-(.*).html',r'thread-\1-1-\3.html',response.url)
        first_page_url_id = get_md5_id(first_page_url)
        title = response.xpath('//a[@id="thread_subject"]/text()').extract_first()
        post_list = response.xpath('//div[@id="postlist"]/div[starts-with(@id,"post_")]')
        if first_page == 1:
            # 查看数
            buddha_operation = int(response.xpath('//div[@id="postlist"]/table[1]/tr//span[2]/text()').extract_first())
            # 回复数
            comments = int(response.xpath('//div[@id="postlist"]/table[1]/tr//span[last()]/text()').extract_first())
        count = len(post_list)
        for i in range(count):
            post = post_list[i]
            id = post.xpath('./@id').extract_first().replace('post_','')
            # 用户区域
            user = post.xpath('./table[@id]/tr[1]/td[1]')[0]
            # 内容文本区域
            content = post.xpath('./table[@id]/tr[1]/td[2]')[0]
            # 作者昵称
            author = user.xpath('.//div[@class="authi"]/a[starts-with(@href,"space-uid-")]/text()').extract_first()
            # 作者等级
            author_grade = user.xpath('.//a[starts-with(@href,"home.php?mod=spacecp&ac=usergroup&gid")]/font/text()').extract_first()
            # 发布时间
            datetime_str = content.xpath('.//div[@class="authi"]/em[@id]/text()').extract_first()
            if datetime_str == '發表於 ':
                datetime_str = content.xpath('.//div[@class="authi"]/em[@id]/span/@title').extract_first()
            # 转换时间戳
            datetime,timestamp = trans_date_ts(datetime_str)
            # 内容
            # 脏数据过滤,去掉一些无用的标签再获取文本
            context_html = content.xpath('.//td[starts-with(@id,"postmessage_")]').extract_first()
            context_html = re.sub(r'<div class="locked">.*</div>','',context_html,re.S)
            context_html = re.sub(r'<(.*) class="pstatus">.*</\1>','',context_html,re.S)
            context_html = re.sub(r'<script .*</script>','',context_html,re.S)
            aaa = content.xpath(r'.//*[contains(text()," 發表於 ")]')
            if aaa:
                context_html = re.sub(aaa.extract_first(),'',context_html,re.S)
            tree = etree.HTML(context_html)
            context = ''.join(tree.xpath('.//text()')).strip()
            item = EvnyItem(author=author,author_grade=author_grade,context=context,time=datetime,timestamps=timestamp,source='eyny',category=category)
            for k,v in item.items():
                if isinstance(v,str):
                    item[k] = Traditional2Simplified(v)
            if first_page == 1 and i == 0:
                index = INDEX_MAIN
                item['comments'] = comments
                item['buddha_operation'] = buddha_operation
                item['category'] = category
                try:
                    item['title'] = re.findall(r'[\[【]?(.*?)[\]】]?[\[【]',title)[0]
                except:
                    item['title'] = title
                # item['title'] = title
                # item['id'] = id
                index_id = first_page_url_id
            else:
                index = INDEX_SLAVE
                index_id = id
                item['id'] = first_page_url_id
                # item['message_id'] = first_page_url_id
            # item = dict(item)
            es.index(index=index,doc_type=DOC_TYPE,id=index_id,body=dict(item))
            print(item)

            comment_div = post.xpath('.//div[starts-with(@id,"comment_")]')
            comment_list = comment_div.xpath('./div[@class="pstl xs1"]')
            for comment in comment_list:
                comment_author = comment.xpath('.//a[starts-with(@href,"space-uid-")]/text()').extract_first()
                comment_context = ''.join(comment.xpath('./div[@class="psti"]//text()').extract())
                comment_context = re.findall('{}(.*)發表於'.format(comment_author),comment_context,re.S)[0].strip()
                comment_datetime_str = comment.xpath('.//div[@class="psti"]/span[@class="xg1"]/span/@title')
                if not comment_datetime_str:
                    comment_datetime_str = comment.xpath('.//div[@class="psti"]/span[@class="xg1"]/text()')
                comment_datetime,comment_timestamp = trans_date_ts(comment_datetime_str.extract_first().strip())
                comment_message_id = index_id
                comment_index_id = get_md5_id(comment_author+comment_context)
                comment_item = EvnyItem(id=first_page_url_id,author=comment_author,context=comment_context,time=comment_datetime,timestamps=comment_timestamp,message_id=comment_message_id,source='eyny')
                # comment_item = dict(comment_item)
                for k,v in comment_item.items():
                    if isinstance(v,str):
                        comment_item[k] = Traditional2Simplified(v)
                es.index(index=INDEX_SLAVE,doc_type=DOC_TYPE,id=comment_index_id,body=dict(comment_item))
            # 评论需要手动拼接x下一页链接
            comment_next_page = comment_div.xpath('.//*[@class="nxt" and @onclick]')
            if comment_next_page:
                # 拿出属性
                a = comment_next_page[0].xpath('./@onclick').extract_first()
                # 拼接链接
                b = re.findall(r"ajaxget\('(.*)', '(.*)'\)",a)[0]
                comment_next_page_url = 'https://www.eyny.com/{}&inajax=1&ajaxtarget={}'.format(b[0],b[1])
                yield scrapy.Request(url=comment_next_page_url,callback=self.parse_comment,meta={'first_page_url_id':first_page_url_id,'comment_message_id':comment_message_id})


    def parse_comment(self,response):
        first_page_url_id= response.meta['first_page_url_id']
        comment_message_id= response.meta['comment_message_id']
        tree = etree.HTML(etree.fromstring(response.text.encode('utf-8')).text)
        comment_list = tree.xpath('.//div[@class="pstl"]')
        for comment in comment_list:
            comment_author = comment.xpath('.//a[starts-with(@href,"space-uid-")]/text()')[0]
            comment_context = ''.join(comment.xpath('./div[@class="psti"]//text()'))
            comment_context = re.findall('{}(.*)發表於'.format(comment_author),comment_context,re.S)[0].strip()
            comment_datetime_str = comment.xpath('.//div[@class="psti"]/span[@class="xg1"]/span/@title')
            if not comment_datetime_str:
                comment_datetime_str = comment.xpath('.//div[@class="psti"]/span[@class="xg1"]/text()')
            comment_datetime,comment_timestamp = trans_date_ts(comment_datetime_str[0].strip())
            comment_index_id = get_md5_id(comment_author+comment_context)
            comment_item = EvnyItem(id=first_page_url_id,author=comment_author,context=comment_context,time=comment_datetime,timestamps=comment_timestamp,message_id=comment_message_id,source='eyny')
            # comment_item = dict(comment_item)
            for k,v in comment_item.items():
                if isinstance(v,str):
                    comment_item[k] = Traditional2Simplified(v)
            es.index(index=INDEX_SLAVE,doc_type=DOC_TYPE,id=comment_index_id,body=dict(comment_item))
        comment_next_page = tree.xpath('.//a[@class="nxt" and @ajaxtarget]')
        if comment_next_page:
            ajaxtarget = comment_next_page[0].xpath('./@ajaxtarget')[0]
            href = comment_next_page[0].xpath('./@href')[0]
            comment_next_page_url = 'https://www.eyny.com/{}&inajax=1&ajaxtarget={}'.format(href,ajaxtarget)
            yield scrapy.Request(url=comment_next_page_url,callback=self.parse_comment,meta={'first_page_url_id':first_page_url_id,'comment_message_id':comment_message_id})

def date2ts(date):
    try:
        return int(time.mktime(time.strptime(date, '%Y-%m-%d')))
    except:
        return int(time.mktime(time.strptime(date, '%Y-%m-%d %H:%M:%S')))
def ts2date(ts):
    return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts))
def trans_date_ts(date_str):
    print(date_str,'******************************')
    date = date_str.replace('發表於 ','').replace('半小時前','30 分鐘前')
    # 转成时间戳
    now_ts = time.time()
    if '秒前' in date:
        s = int(date.rstrip(' 秒前'))
        ts = now_ts - s
        date = ts2date(ts)
    elif '分鐘前' in date:
        minute = int(date.rstrip(' 分鐘前'))
        ts = now_ts - minute * 60
        date = ts2date(ts)
    elif '小時前' in date:
        hour = int(date.rstrip(' 小時前'))
        ts = now_ts - hour * 3600
        date = ts2date(ts)
    elif '天前' in date:
        hour = int(date.rstrip(' 天前'))
        ts = now_ts - hour * 86400
        date = ts2date(ts)
    elif '昨天' in date:
        new_time = date.lstrip('昨天')
        date = get_before_date(1) + new_time + ":00"
    elif '前天' in date:
        new_time = date.lstrip('前天')
        date = get_before_date(2) + new_time + ":00"
    else:
        format_time = time.strptime(date, '%Y-%m-%d %I:%M %p')
        date = time.strftime('%Y-%m-%d %H:%M:%S', format_time)
    return date, date2ts(date)

def get_before_date(date_cha):
    # date_cha为当天日期的前几天,返回前几天的日期
    # 如当天2018-7-7,date_cha=1,则返回2018-7-6
    today = datetime.date.today()
    oneday = datetime.timedelta(days=date_cha)
    newday = today - oneday
    return str(newday)

def get_md5_id(s):
    md5 = hashlib.md5()
    md5.update(s.encode())
    return md5.hexdigest()
def Traditional2Simplified(sentence):
    '''
    将sentence中的繁体字转为简体字
    :param sentence: 待转换的句子
    :return: 将句子中繁体字转换为简体字之后的句子
    '''
    sentence = Converter('zh-hans').convert(sentence)
    return sentence

if __name__ == '__main__':
    d,timestamp = trans_date_ts('昨天 11:19 PM')
    print(d,timestamp)