# coding=utf-8

import re
import os
import time
from functools import partial

import requests
from bs4 import BeautifulSoup
from tornado.httpclient import HTTPClient

from jspider.douban.model.img import Img
from jspider.douban.model.topic import Topic
from jspider.config import IMG_PATH


class DoubanWorker(object):
    """ base worker """

    headers = {'Connection': 'Keep-Alive',
               'Accept': 'text/html',
               'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
               'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'}

    imgs_dir_name = ""
    list_page_url = ""
    topic_page_url_prefix = "http://www.douban.com/group/topic/"
    topic_link_re = re.compile("%s([0-9]{1,})/" % topic_page_url_prefix)

    def __init__(self, group_name):
        self.http_client = HTTPClient()
        self.fetch = partial(requests.get,
                             headers=self.headers)
                             # proxies={"http": "http://112.114.63.26:55336/"})
        self.imgs_dir_name = group_name
        self.list_page_url = "http://www.douban.com/group/%s/" % group_name

    def fetch_list_page(self):
        print self.list_page_url
        return self.fetch(self.list_page_url)

    def parse_list_page(self, list_page_body):
        """ 解析页面中的链接 返回全部的 topic_id """

        soup = BeautifulSoup(list_page_body)

        # group-topics > tr
        group_topics = soup.find(id="group-topics")
        group_topics_links = group_topics.find_all("a")

        topic_ids = []
        for link in group_topics_links:
            # 过滤出话题链接
            reg = self.topic_link_re.match(link.get("href"))
            if reg:
                topic_ids.append(reg.group(1))

        return topic_ids

    def fetch_topic_page(self, topic_id):
        url = self.topic_page_url_prefix + str(topic_id)
        return self.fetch(url)

    def parse_topic_page(self, topic_page_body):

        soup = BeautifulSoup(topic_page_body)
        link_report = soup.find(id="link-report")
        imgs = link_report.findAll("img")

        img_srcs = []
        for img in imgs:
            img_srcs.append(img.get("src"))

        return img_srcs

    def fetch_img(self, img_url):
        return self.fetch(img_url)

    def start(self):
        """ 执行一个完整的流程 """

        list_page_res = self.fetch_list_page()
        print list_page_res.status_code
        topic_ids = self.parse_list_page(list_page_res.text)

        for topic_id in topic_ids:
            time.sleep(1)
            try:

                try:
                    # 如果 topic 已经存在 就不再抓取
                    if Topic.get(id=topic_id):
                        continue
                except:
                    pass

                topic_page_res = self.fetch_topic_page(topic_id)
                topic = Topic.create(id=topic_id, origin_content=topic_page_res.text)

                img_urls = self.parse_topic_page(topic_page_res.text)

                for img_url in img_urls:
                    time.sleep(1)

                    try:
                        if Img.get(origin_url=img_url):
                            continue

                    except:
                        pass

                    img = self.fetch_img(img_url)
                    ctr_time = time.time()

                    sub_dir_name = int(ctr_time % 1024)
                    file_name = "%s/%s/%s/%s.jpg" % (IMG_PATH, self.imgs_dir_name, sub_dir_name, time.time())

                    print os.getcwd()
                    if not os.path.exists("%s/%s" % (IMG_PATH, self.imgs_dir_name)):
                        os.mkdir("%s/%s" % (IMG_PATH, self.imgs_dir_name))

                    if not os.path.exists("%s/%s/%s" % (IMG_PATH, self.imgs_dir_name, sub_dir_name)):
                        os.mkdir("%s/%s/%s" % (IMG_PATH, self.imgs_dir_name, sub_dir_name))

                    Img.create(name=file_name, topic=topic)

                    fp = open(file_name, "w+")
                    fp.write(img.content)
                    fp.close()

            except Exception as e:
                print e

        self.http_client.close()
