# -*- coding: utf-8 -*-
import scrapy

from EastMoney.items import EastmoneyItem, CommentItem


class MoneySpider(scrapy.Spider):
    name = 'money'
    allowed_domains = ['guba.eastmoney.com']
    # 拼接字段,爬取地址 = 【start_url + 具体页数 + end_url】
    start_url = 'http://guba.eastmoney.com/default,0'
    host = 'http://guba.eastmoney.com'
    end_url = '.html'
    # 下一页地址，默认为第一页
    next_page_url = 'http://guba.eastmoney.com/default,0_1.html'
    start_urls = []
    # 数据地址
    data_file = None

    def __init__(self, name=None, code=None, author=None, updateTime=None, readCount=None,
                 commentCount=None, **kwargs):
        self.code = code
        self.author = author
        self.updateTime = updateTime
        self.readCount = readCount
        self.commentCount = commentCount
        # print(self.author)
        super().__init__(name, **kwargs)
        # 日志文件
        self.log_file_name = "money_data_log.log" if code is None else "money_data_log_" + code + ".log"
        self.log_file = open(self.log_file_name, 'a+')
        # 光标设置为文件起始位置
        self.log_file.seek(0)
        # 读取记录文件
        self.log_str = self.log_file.read()
        if code is not None:
            self.next_page_url = 'http://guba.eastmoney.com/list,' + code + '_1.html'
        # 判断是否有记录
        if len(self.log_str) > 0:
            # 有记录取最后一条
            self.next_page_url = self.log_str.strip().split("\n")[-1]
            # self.start_urls = [self.next_page_url]
        else:
            # 无记录写入第一条纪律
            self.log_file.write(self.next_page_url + "\n")
            self.log_file.flush()

        self.start_urls = [self.next_page_url]

    def parse(self, response):
        print("已拉取：" + self.next_page_url)
        if self.code is None:
            article_list = response.css('.newlist li')
            # 拼装 文章梗概 ITEM
            for article in article_list:
                item = EastmoneyItem()
                item['readCount'] = article.css('cite::text').extract()[0]
                item['commentCount'] = article.css('cite::text').extract()[1]
                item['title'] = article.css('span.sub .note::attr(title)').extract_first()
                item['author'] = article.css('cite.aut a::text').extract_first()
                item['updateTime'] = article.css('cite.last::text').extract_first()
                item['url'] = article.css('span.sub .note::attr(href)').extract_first()
                yield scrapy.Request(self.host + item['url'], callback=self.content_parse,
                                     meta={'item': item})
            next_page = response.css('#pageArea .pagernums a')[-1]
            # 获取下一页
        else:
            # print(self.start_urls)
            # print(response.css('.articleh'))
            article_list = response.css('.articleh')
            # 拼装 文章梗概 ITEM
            for article in article_list:
                item = EastmoneyItem()
                item['readCount'] = article.css('.l1.a1::text').extract_first()
                item['commentCount'] = article.css('.l2.a2::text').extract_first()
                item['title'] = article.css('.l3.a3 a::attr(title)').extract_first()
                item['author'] = article.css('.l4.a4 a::text').extract_first()
                item['updateTime'] = article.css('.l5.a5::text').extract_first()
                item['url'] = article.css('.l3.a3 a::attr(href)').extract_first()
                yield scrapy.Request(self.host + item['url'], callback=self.content_parse,
                                     meta={'item': item})
            next_page = response.css('.pagernums')
        if self.code is None:
            if next_page.css('::text').extract_first() == '下一页':
                self.next_page_url = response.urljoin(next_page.css('::attr(href)').extract_first())
        else:
            data = next_page.css('::attr(data-pager)').extract_first().split('|')
            allcount, number, page = int(data[1]), int(data[2]), int(data[3])
            if number * page < allcount:
                page += 1
                self.next_page_url = 'http://guba.eastmoney.com/list,' + self.code + '_' + str(
                    page) + '.html'
            print(self.next_page_url)

            print("下一页：" + self.next_page_url)
            # 把下一页的记录保存起来
            self.log_file.write(self.next_page_url + "\n")
            self.log_file.flush()
            # 手动刷新数据文件
            if self.data_file is not None:
                self.data_file.flush()
            # 触发下一页的爬虫
            yield scrapy.Request(url=self.next_page_url, callback=self.parse)

    def content_parse(self, response):
        item = response.meta['item']
        if self.author is not None and item['author'] != self.author:
            return
        item['content'] = response.css('#zwconbody .stockcodec::text').extract_first()

        for comment_dom in self.create_comment(item['url'],response):
            for field in comment_dom.fields:
                if comment_dom[field] is not None:
                    comment_dom[field] = comment_dom[field].strip()
            yield comment_dom

        comment_page = response.css('#newspage::attr(data-page)').extract_first()
        if comment_page is not None:
            data = comment_page.split('|')
            count, number, page = int(data[1]), int(data[2]), int(data[3])
            if number * page < count:
                page += 1
                comment_page_url = self.host +'/'+ str(data[0]) + str(page) + self.end_url
                yield scrapy.Request(url=comment_page_url, callback=self.comment_parse,
                                     meta={'item': item})

        for field in item.fields:
            if item[field] is not None:
                item[field] = item[field].strip()
        yield item

    def comment_parse(self, response):
        item = response.meta['item']
        for comment_dom in self.create_comment(item['url'],response):
            for field in comment_dom.fields:
                if comment_dom[field] is not None:
                    comment_dom[field] = comment_dom[field].strip()
            yield comment_dom

        comment_page = response.css('#newspage::attr(data-page)').extract_first()
        if comment_page is not None:
            data = comment_page.split('|')
            count, number, page = int(data[1]), int(data[2]), int(data[3])
            if number * page < count:
                page += 1
                comment_page_url = self.host +'/'+ str(data[0]) + str(page) + self.end_url
                yield scrapy.Request(url=comment_page_url, callback=self.comment_parse,
                                     meta={'item': item})

    @staticmethod
    def create_comment(url, response):
        comment_list = response.css('#zwlist .zwli')
        comment_result = []
        for comment_dom in comment_list:
            comment = CommentItem()
            comment['url'] = url
            comment['author'] = comment_dom.css('.zwnick a::text').extract_first()
            comment['date'] = comment_dom.css('.zwlitime::text').extract_first()
            if comment['date'] is not None:
                comment['date'] = comment['date'].replace('发表于', '')
            comment['content'] = comment_dom.css('.short_text::text').extract_first()
            comment['points'] = comment_dom.css('.z_num').extract_first()
            if comment['points'] is None or comment['points'] == '点赞':
                comment['points'] = '0'
            comment_result.append(comment)
        return comment_result

