import requests
from scrapy import Selector

from csdn_spider.models import Author


def author_spider(url):
    res = requests.get(url).text
    sel = Selector(text=res)
    try:
        author = {
            'name': sel.xpath("//div[@class='lt_title']/text()").extract()[2].strip(),
            'id': url.split('/')[-1],
            # 'blog_nums': sel.xpath("//span[@class='count']/text()").extract()[0].strip(),
            'blog_nums': sel.xpath("//div[@class='me_chanel_det_item access']/a/span/text()").extract()[0].strip(),
            'follower_nums': sel.xpath("//div[@class='fans']/a/span/text()").extract()[0].strip(),
            'following_nums': sel.xpath("//div[@class='att']/a/span/text()").extract()[0].strip()
        }
    except Exception as e:
        print(e)
        author = {
            'name': '用户不存在',
            'id': url.split('/')[-1],
            # 'blog_nums': sel.xpath("//span[@class='count']/text()").extract()[0].strip(),
            'blog_nums': 0,
            'follower_nums': 0,
            'following_nums': 0
        }

    return author


def save_author_mysql(base_author):
    author = Author()
    author.id = base_author['id']
    author.name = base_author['name']
    author.blog_nums = int(base_author['blog_nums'])
    author.follower_nums = base_author['follower_nums']
    author.following_nums = base_author['following_nums']
    exited_author = Author.select().where(Author.id == base_author['id'])
    if exited_author:
        author.save()
    else:
        author.save(force_insert=True)
