#!/usr/bin/env python
# -*- coding:utf-8 -*-

import re
import json
from domin.film import Film

from bs4 import BeautifulSoup
import time
import random

import db
import utils

DAO = db.DB()

class IdCrawler():

    def __init__(self, index):
        self.start_url = "http://www.1905.com/mdb/film/list/enindex-{}/".format(index)
        self.page_format = "http://www.1905.com/mdb/film/list/enindex-{}/o0d0p%d.html".format(index)
        # self.start_url = "http://www.1905.com/mdb/film/list/year-%d/"%(year)
        # self.page_format = "http://www.1905.com/mdb/film/list/year-{}/o0d0p%d.html".format(year)
        print("crawler start url:" , self.start_url)


    def crawl(self):
        html = utils.make_request_get(self.start_url)
        total_count = self.parse_total(html)
        total_page = int(int(total_count) / 30) + 1
        total_page = 100 if total_page>100 else total_page
        print("total page: ", total_page)

        self.parse_one_page(html)
        for p in range(1, total_page):
            page_url = self.page_format%(p)
            print(page_url)
            html = utils.make_request_get(page_url)
            try:
                self.parse_one_page(html)
            except Exception as e:
                print(e)
                pass
            time.sleep(random.random()*10)


    def parse_one_page(self, html):
        soup = BeautifulSoup(html, "html5lib")
        div = soup.find("div", attrs={"class":'leftArea'})

        film_lis = div.find_all("li", attrs={"class": "fl line"})
        films = []

        if not film_lis:
            return []

        for li in film_lis:
            fil = Film()
            a = li.find("a")
            href = a.get("href")
            fil.url = href
            f_id = re.search("\d+", href).group()
            title = a.get_text()
            score = li.find("b", attrs={"class":"num"})
            if score:
                fil.socre = float(score.get_text())

            zxbs = li.find_all('p', attrs={"class": "zy"})
            stars = zxbs[0]
            a_list = stars.find_all("a")
            for a in a_list:
                name = a.get_text()
                url = a.get("href")
                fil.stars.append({"name":name, "url": url})

            tys = zxbs[1]
            a_list = tys.find_all('a')
            for a in a_list:
                tname = a.get_text()
                fil.categorys.append(tname)
            fil.id = f_id
            fil.names.append(title)
            films.append(fil.to_dict())
        DAO.insert_film(films)
        return films



    def parse_total(self, html):
        pat = re.compile("共(?P<total_num>\d+)部影片")
        html = html.decode("utf-8")
        match = re.search(pat, html)
        total_num = match.groupdict()["total_num"]
        return total_num


if __name__ == '__main__':
    import string
    for year in string.ascii_uppercase[4:]:
        print(year)
        crawler = IdCrawler(year)
        crawler.crawl()
