#!/usr/bin/env python
# -*- coding:utf8 -*-


import urllib2
import codecs
from bs4 import BeautifulSoup
from mylog import MyLog


class MovieItem(object):
    name = None
    starring = None
    score = None


class GetMovie(object):
    def __init__(self):
        self.base_url = 'https://dianying.2345.com/list/donghua------.html'
        self.urls = []
        self.items = []
        self.log = MyLog()
        self.page_num = self.get_page_num(self.base_url)

        self.urls = self.get_urls(self.page_num, 5)
        self.items = self.spider(self.urls)
        self.pipelines(self.items)


    def get_page_num(self, base_url):
        html_content = self.get_response_content(base_url)
        soup = BeautifulSoup(html_content)
        tags = soup.find_all('a', attrs={'rel': 'nofollow'})
        page_num = tags[-2].get_text()

        self.log.info('Get Page Num: %s ' % page_num)
        return page_num


    def get_urls(self, page_num, num):
        if (page_num >= num):
            final_num = num
        else:
            final_num = page_num

        html_head = r'http://dianying.2345.com/list/donghua-------'
        html_end = '.html'

        urls = []
        for i in xrange(1, final_num + 1):
            url = html_head + str(i) + html_end

            self.log.info('Get Url:%s' % url)
            urls.append(url)

        return urls


    def get_response_content(self, url):
        headers = {'User-Agent': 'Mozilla/5.0 (X11; Fedora; Linu…) Gecko/20100101 Firefox/57.0'}
        request = urllib2.Request(url, headers=headers)

        # proxy = urllib2.ProxyHandler({'http': 'http://202.96.142.2:3128'})
        # opener = urllib2.build_opener(proxy)
        # urllib2.install_opener(opener)

        try:
            response = urllib2.urlopen(url)
        except:
            self.log.error('Url: %s Open Error' % url)
            return None
        else:
            self.log.info('Url: %s Opne Success' % url)

            return response.read()


    def spider(self, urls):
        items = []

        for url in urls:
            html_content = self.get_response_content(url)
            soup = BeautifulSoup(html_content, 'lxml')
            movies = soup.find('ul', attrs={'class': 'v_picTxt pic180_240 clearfix'}).find_all('li')

            for movie in movies:
                self.log.info('Get A Moive')

                if not movie.find('em', attrs={'class': 'emTit'}):
                    continue

                name = movie.find('em', attrs={'class': 'emTit'}).a.get_text()
                self.log.info('nama: %s' % name)

                tag_starring = movie.find('span', attrs={'class': 'sDes'}).find_all('em')
                starring = ''
                for tag in tag_starring:
                    if tag.a:
                        starring += tag.a.get_text()
                        starring += ' '
                self.log.info('starring: %s' % starring)

                score = movie.find('span', attrs={'class': 'pRightBottom'}).em.get_text()
                self.log.info('store: %s' % score)

                item = MovieItem()

                item.name = name
                item.starring = starring
                item.score = score

                items.append(item)

        return items


    def pipelines(self, items):
        filename = 'movie.txt'

        with codecs.open(filename, 'w', 'utf8') as fp:
            for item in items:
                fp.write(item.name + '\r')
                fp.write(item.starring + '\r')
                fp.write(item.score + '\n')
                fp.write('\n')

        self.log.info("Write To File movie.txt Success")

if __name__ == '__main__':
    GM = GetMovie()
