# coding=utf-8

import re
import os
import time
from functools import partial

import requests
from bs4 import BeautifulSoup
from tornado.httpclient import HTTPClient

from jspider.douban.model.img import Img
from jspider.douban.model.topic import Topic
from jspider.config import IMG_PATH


class AlbumWorker(object):
    """ base worker """

    headers = {'Connection': 'Keep-Alive',
               'Accept': 'text/html',
               'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
               'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'}

    album_page_url_prefix = "http://www.douban.com/photos/album/"
    photo_page_url_prefix = "http://www.douban.com/photos/photo/"
    photo_link_re = re.compile("%s([0-9]{1,})/" % photo_page_url_prefix)

    def __init__(self, album_id):
        self.http_client = HTTPClient()
        self.fetch = partial(requests.get,
                             headers=self.headers,
                             proxies={"http": "http://106.44.176.251:8080/"})
        self.album_id = album_id
        self.album_page_url = "%s/%s" % (self.album_page_url_prefix, album_id)
        self.imgs_dir_name = album_id
        self.crt_page = 0

    def start(self):

        print self.crt_page
        print self.album_page_url

        # 获取 album 中的所有 photo_id
        res = self.fetch(self.album_page_url)
        if res.status_code != 200:
            print res.status_code
            return

        soup = BeautifulSoup(res.text)

        photolst = soup.find(id="content")
        if not photolst:
            return

        photolst_a = photolst.find_all("a", {"class": "photolst_photo"})

        for link in photolst_a:
            time.sleep(1)
            # 过滤 photo
            reg = self.photo_link_re.match(link.get("href"))
            if reg:
                try:
                    if Img.get(origin_id=reg.group(1)):
                        continue

                except:
                    pass

                res = self.fetch(reg.group(0))
                if res.status_code != 200:
                    continue

                soup = BeautifulSoup(res.text)
                link_report = soup.find(id="link-report")
                img = link_report.find("img")

                res = self.fetch(img.get("src"))
                ctr_time = time.time()

                sub_dir_name = int(ctr_time % 1024)
                file_name = "%s/%s/%s/%s.jpg" % (IMG_PATH, self.imgs_dir_name, sub_dir_name, time.time())

                print os.getcwd()
                if not os.path.exists("%s/%s" % (IMG_PATH, self.imgs_dir_name)):
                    os.mkdir("%s/%s" % (IMG_PATH, self.imgs_dir_name))

                if not os.path.exists("%s/%s/%s" % (IMG_PATH, self.imgs_dir_name, sub_dir_name)):
                    os.mkdir("%s/%s/%s" % (IMG_PATH, self.imgs_dir_name, sub_dir_name))

                Img.create(name=file_name, topic=None, origin_id=reg.group(1))

                fp = open(file_name, "w+")
                fp.write(res.content)
                fp.close()

        # 递归所有页

        self.album_page_url = "%s/?start=%s" % (self.album_page_url_prefix, self.crt_page * 18)
        self.crt_page += 1
        return self.start()
