# -*- coding: utf-8 -*-
# @Date    : 2017-04-25 17:48:30
# @Author  : fancy (fancy@thecover.co)
import os
import json
import random
import urlparse
from datetime import datetime

from scrapy import Request
from scrapy.spiders import Spider
from scrapy.linkextractors import LinkExtractor

from app import db
from app.auth.models import User
from app.forum.models import Post, Comment


GET_FIRST = lambda x: x[0] if x else None

class V2ex(Spider):
    name = 'v2ex'
    download_delay = 0.2
    start_urls = ['https://www.v2ex.com/']
    custom_settings = {
        'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36',
        'DEFAULT_REQUEST_HEADERS': {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',
        }
    }
    post_url = set()
    user_url = set()

    def start_requests(self):
        with open('scrap_data.json', 'r') as f:
            try:
                data = json.loads(f.read())
                self.post_url = set(data['post_url'])
                self.user_url = set(data['user_url'])
            except:
                pass
        yield Request(self.start_urls[0], callback=self.parse)

    def parse(self, response):
        self.extractor = LinkExtractor(allow='https://www.v2ex.com/t/\d+.*')
        items = self.extractor.extract_links(response)
        # for item in response.xpath('//div[@class="cell item"]'):
            # url = response.urljoin(
            #     item.xpath(
            #         './/span[@class="item_title"]/a/@href').extract()[0])
        for item in items:
            if item.url not in self.post_url:
                self.post_url.add(item.url)
                yield Request(item.url, callback=self.detail_parse)

    def detail_parse(self, response):
        try:
            post = Post()
            post.title = GET_FIRST(response.xpath('//h1/text()').extract())
            post.content = GET_FIRST(response.xpath('//div[@class="topic_content"]').extract())
            pub_date = GET_FIRST(response.xpath(
                '//meta[@property="article:published_time"]/@content').extract())
            # pub_date = pub_date.replace('T', ' ').replace('Z', '')
            post.pub_date = datetime.strptime(pub_date, '%Y-%m-%dT%H:%M:%SZ')

            userurl = response.urljoin(
                GET_FIRST(response.xpath('//small[@class="gray"]/a/@href').extract()))
            username = GET_FIRST(response.xpath(
                '//small[@class="gray"]/a/text()').extract())
            avatar_url = GET_FIRST(response.xpath(
                '//div[@class="header"]/div/a/img/@src').extract())
            avatar = '/static/img%s' % urlparse.urlsplit(avatar_url).path
            if userurl not in self.user_url:
                req = Request('http:%s' % avatar_url, callback=self.img_parse)
                req.meta.update({'avatar': avatar})
                yield req
            def get_user(userurl, username, avatar):
                if userurl not in self.user_url:
                    self.user_url.add(userurl)
                    user = User(name=username, website=userurl, avatar=avatar)
                    db.session.add(user)
                else:
                    user = User.query.filter_by(website=userurl).first()
                return user
            user = get_user(userurl, username, avatar)
            post.author = user
            db.session.add(post)
            db.session.commit()

            comments = response.xpath('//div[@id="Main"]/div[@class="box"][2]/div')[1:]
            for item in comments:
                try:
                    avatar_url = GET_FIRST(item.xpath('.//img/@src').extract())
                    avatar = '/static/img%s' % urlparse.urlsplit(avatar_url).path
                    username = GET_FIRST(item.xpath('.//strong/a/text()').extract())
                    userurl = response.urljoin(
                        GET_FIRST(item.xpath('.//strong/a/@href').extract()))
                    content = ''.join(
                        item.xpath('.//div[@class="reply_content"]/*').extract())
                    if userurl not in self.user_url:
                        req = Request('http:%s' % avatar_url, callback=self.img_parse)
                        req.meta.update({'avatar': avatar})
                        yield req
                    user = get_user(userurl, username, avatar)
                    comment = Comment(content=content, author=user, post=post)
                    db.session.add(comment)
                    db.session.commit()
                except Exception, e:
                    print e
                    import pdb; pdb.set_trace()

        except Exception as e:
            print e
            import pdb; pdb.set_trace()

    def img_parse(self, response):
        try:
            path = './app%s' % response.meta['avatar']
            dir_path = os.path.dirname(path)
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
            with open(path, 'w') as f:
                f.write(response.body)
        except Exception, e:
            print e

    def closed(self, reason):
        data = {
            'post_url': list(self.post_url),
            'user_url': list(self.user_url),
        }
        with open('scrap_data.json', 'w') as f:
            f.write(json.dumps(data))


def init_data():
    posts = Post.query.all()
    users = User.query.all()
    user_count = len(users)
    for post in posts:
        # post.views = random.randint(post.comments, 500)
        # post.good = random.randint(0, post.views)
        # post.comments = post.comment.count()
        for i in range(random.randint(0, user_count)):
            u = random.choice(users)
            while u in post.collected_by:
                u = random.choice(users)
            post.collected_by.append(u)
        post.collected_count = len(post.collected_by)
        db.session.commit()
    print 'Done~ %d posts' % len(posts)

def get_cnx(db_uri):
    cnx = sqlite3.connect(db_uri)
    cur = cnx.cursor()
    return cnx, cur

# def trans_data():
#     import sqlite3
#     old_db = 'app.db.bak'
#     new_db = 'app.db'
#     cnx, cur = get_cnx(new_db)
#     cnx1, cur1 = get_cnx(old_db)
#     cur1.execute('select id, name, email, avatar, gender, website, location, signature, brief, password_hash from user')
#     for cur1

if __name__ == '__main__':
    init_data()
    pass
