import datetime

import requests
from scrapy import Selector


# 回答页数据爬取和清洗，单页逻辑，返回一个迭代器
from csdn_spider.models import Answer


def answer_spider(url):
    res = requests.get(url).text
    sel = Selector(text=res)
    all_answers = sel.xpath("//div[contains(@id,'post-')]")
    answer = {}
    for item in all_answers:
        answer['topic_id'] = url.split('/')[-1]
        answer['id'] = item.xpath(".//@data-post-id").extract()[0]
        answer['content'] = item.xpath(".//div[contains(@class,'post_body')]").extract()[0]
        answer['author_name'] = item.xpath(".//div[@class='nick_name']/a/text()").extract()[0]
        answer['author_id'] = item.xpath(".//div[@class='nick_name']/a/@href").extract()[0].split('/')[-1]
        create_time = item.xpath(".//label[@class='date_time']/text()").extract()[0]
        answer['create_time'] = datetime.datetime.strptime(create_time, "%Y-%m-%d %H:%M:%S")
        answer['parised_nums'] = item.xpath(".//label[@class='red_praise digg d_hide']/em/text()").extract()[0].split(' ')[-1]
        yield answer


def save_answer_mysql(base_answer):
    answer = Answer()
    answer.topic_id = base_answer['topic_id']
    answer.id = base_answer['id']
    answer.author_name = base_answer['author_name']
    answer.author_id = base_answer['author_id']
    answer.content = base_answer['content']
    answer.create_time = base_answer['create_time']
    answer.parised_nums = base_answer['parised_nums']
    exited_answer = Answer.select().where(Answer.id == int(base_answer['id']))
    if exited_answer:
        answer.save()
    else:
        answer.save(force_insert=True)

