# -*- coding:utf-8 -*-
from bs4 import BeautifulSoup
from baike_spider import GlobleConstant
from baike_spider import ShortUrl
import re,time,random,string
import urlparse


class HtmlParser(object):
    def __init__(self):
        self.STATUS = GlobleConstant.GLOBLECONSTANT()
        self.shortUrl = ShortUrl.short_url()
    def _get_new_urls(self, page_url, soup):
        new_urls = set()
        #<a href="http://movie.douban.com/subject/6722879/?from=subject-page" >
        links = soup.find_all('a', href=re.compile(r"/subject/\d+/\?from=subject-page"))
        # links = soup.find_all('a', href=re.compile(r"/view/\d+\.htm"))
        for link in links:
            new_url = link['href']
            new_full_url = urlparse.urljoin(page_url, new_url)
            new_urls.add(new_full_url)
        return new_urls

    def _get_new_data(self, page_url, soup):
        res_data = {}
        # url
        # res_data['url'] = page_url
        # doubanId
        res_data['doubanId'] = re.compile(r'\D+').sub('',page_url)
        #title<title>***</title>
        title_node = soup.find('title')
        rp = re.compile(r'\(.*?\)')
        res_data['title'] = rp.sub('',title_node.get_text()).encode('utf-8').strip()
        #info
        # director <a href="/celebrity/1022739/" rel="v:directedBy">大卫·O·拉塞尔</a></span></span><br/>
        director_node = soup.find('a',rel="v:directedBy")
        res_data['director'] = director_node.get_text().encode('utf-8').strip()
        # <span class='attrs'><a href="/celebrity/1013757/" rel="v:starring">布莱德利·库珀</a> / <a href="/celebrity/1022616/" rel="v:starring">詹妮弗·劳伦斯</a> / <a href="/celebrity/1054445/" rel="v:starring">罗伯特·德尼罗</a> / <a href="/celebrity/1161151/" rel="v:starring">杰基·韦佛</a> / <a href="/celebrity/1049514/" rel="v:starring">克里斯·塔克</a> / <a href="/celebrity/1054303/" rel="v:starring">阿努潘·凯尔</a> / <a href="/celebrity/1018110/" rel="v:starring">约翰·奥提兹</a> / <a href="/celebrity/1165173/" rel="v:starring">谢伊·惠格姆</a> / <a href="/celebrity/1000001/" rel="v:starring">朱丽娅·斯蒂尔斯</a></span></span>
        #host
        host_node = soup.find('a',rel="v:starring")
        # print host_node.get_text()
        res_data['host'] = host_node.get_text().encode('utf-8').strip()
        #actor
        actor_node = soup.find('span',class_="actor")
        res_data['actor'] =re.compile(r'.*:').sub('',actor_node.get_text()).replace(' ', '').encode('utf-8')
        #cast
        #year  <span class="year">(2012)</span>
        year_node = soup.find('span',class_="year")
        res_data['year'] = int((year_node.get_text().replace(')','')).replace('(','').encode('utf-8'))
        #duration <span property="v:runtime" content="122">122分钟</span><br/>
        duration_node = soup.find('span',property="v:runtime")
        res_data['duration'] = int(re.compile(r'\D.*').sub('',duration_node.get_text()).encode('utf-8'))
        #tags <span property="v:genre">剧情</span> / <span property="v:genre">喜剧</span> / <span property="v:genre">爱情</span> / <span property="v:genre">家庭</span><br/>
        tags_node = soup.findAll('span',property="v:genre")
        strs = ''
        for tag_node in tags_node:
            strs += tag_node.get_text()+"|"
        res_data['tags'] = strs.encode('utf-8').strip()
        # print len(tags_node),strs

        #area <span class="pl">制片国家/地区:</span> 美国<br/>
        #language <span class="pl">语言:</span> 英语<br/>
        #<span class="pl">又名:</span> 失恋自作业(港) / 派特的幸福剧本(台) / 每朵乌云背后都有阳光 / 一线希望 / 闪开，让我拥抱幸福<br/>
        info_node = soup.find('div',id="info")
        info = info_node.get_text().encode("utf-8").replace(' ', '')
        infos = re.split(u':|\n',info)
        for i in range(0,len(infos)-1):
            if re.search(r"制片国家/地区",infos[i]):
                res_data['area'] = infos[i+1].strip()
            if re.search(r"语言",infos[i]):
                res_data['language'] = infos[i+1].strip()
            if re.search(r"又名",infos[i]):
                res_data['epstitle'] = infos[i+1].strip()
        # print res_data['area'], res_data['language']

        #image1 <a class="nbgnbg" href="http://movie.douban.com/subject/3094909/photos?type=R" title="点击看更多海报">
        #<img src="http://img3.douban.com/view/movie_poster_cover/spst/public/p1755867083.jpg" title="点击看更多海报" alt="Silver Linings Playbook" rel="v:image" />
        #</a>
        #icon1 小图片竖图
        img_node = soup.find('img',rel="v:image")
        img = img_node.attrs['src']
        res_data['icon1'] = img.encode('utf-8').strip()
        # print img

        #score  <strong class="ll rating_num" property="v:average">7.8</strong>
        score_node = soup.find('strong', class_="ll rating_num")
        res_data['score'] = score_node.get_text().encode('utf-8')
        #information
        information_node = soup.find('span', property="v:summary")
        res_data['information'] = information_node.get_text().encode('utf-8').strip()
        #others
        status = self.STATUS.STATUS()
        res_data['id'] = res_data['doubanId']
        res_data['sid'] = self.shortUrl.shorturl(page_url)
        res_data['type'] = status['type']
        # res_data['origin_status']= status['origin_status']
        res_data['updateTime'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
        res_data['crawlTime'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
        res_data['videoType'] = status['videoType']
        res_data['contentType'] = status['contentType']
        res_data['status'] = status['status']
        res_data['collect_status'] = status['collect_status']

        return res_data

    def paser(self, page_url, html_cont):
        if page_url is None or html_cont is None:
            return
        soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')
        new_urls = self._get_new_urls(page_url, soup)
        new_data = self._get_new_data(page_url, soup)
        return new_urls, new_data
