#!/usr/local/bin/python3
#encoding:utf-8

#import urllib
from urllib.parse import urlparse
import urllib.parse
import urllib.request
import datetime
from bs4 import BeautifulSoup
import bs4
from Base64EncoderDecoder import *
import codecs

#def piaohua_a_movie(title, url):
#    while(True):
#        try:
#            page = urllib.request.urlopen(url)
#        except IOError:
#            print('Error open URL:'),
#            print(url)
#        else:
#            if page != None:
#                break;
#    soup = BeautifulSoup(page)
#    div_show = soup.find('div', id='show')
#    movie_attr = { "title":"title", "url":"url"}
#    movie_attr['title'] = div_show.find('h3').contents[0].strip()
#    print(movie_attr['title'])
#    div_showinfo = soup.find('div', id="showinfo")
#    for table in div_showinfo.findAll('table'):
#        for anchor in table.findAll('a'):
#            if urlparse(anchor['href']).scheme != 'http':
#                print((urlencode(anchor['href'])))
#    return movie
#        #if table.find('anchor') != None:
#            #print(table.find('anchor'))
#            #print(table.find('anchor')['href'])
#            #str_url = (table.find('anchor')['href'])
#            #print(urlencode(str_url))
def downloadImage(url):
    local_filename = "photos/"+url.split('/')[-1]
    img_file = urllib.request.urlopen(url)
    r = img_file.read()
    fp = open(local_filename, 'wb')
    fp.write(r)
    fp.close()
    return local_filename

def get_photo_show(url):
    page = urllib.request.urlopen(url)
    soup = BeautifulSoup(page)
    photo_src = soup.find('div', id ='content').find('a', class_="mainphoto").find('img')['src']
    return downloadImage(photo_src)
def str2dict(str0):
    movie_dict={'标题':''}
    lines = str0.split('\n')
    for line in lines:
        comma_index = line.find(':')
        if(comma_index != -1):
            movie_dict[line[0:comma_index]] = line[comma_index+1:]
            
if __name__ == "__main__":
    exit()

def search_movie(searchword):
    requrl = u"http://movie.douban.com/subject_search?search_text="+urllib.parse.quote(searchword)
    page = urllib.request.urlopen(requrl)
    soup = BeautifulSoup(page)
    div_im = soup.find('div', class_='pl2')
    for anchor in div_im.findAll('a'):
        item_url = anchor['href']
        item_title = anchor.contents[0].strip().split('\n')[0]
    page = urllib.request.urlopen(item_url)
    del soup
    soup = BeautifulSoup(page)
    movie_title = soup.find('h1').find('span').contents[0].strip()
    str0 = ''
    str0 += '标题:'+ movie_title
    subject_clearfix = soup.find('div', id='info')
    for span in subject_clearfix.children:
        if (type(span) == bs4.element.Tag):
            for span_in in span:
                str0 += span.get_text()
        elif(type(span) == bs4.element.NavigableString):
            str0 += str((span))
        else:
            continue
    link_report_span = soup.find('div', id='link-report').find('span')
    str0 += '简介:'+ (link_report_span.get_text()).strip()+'\n'
    interest_sectl = soup.find('div', id='interest_sectl')
    str0 += '评分:' + interest_sectl.find('strong', class_='ll rating_num').get_text().strip()+'\n'
    str0 += '评分人数:' + interest_sectl.find('a', href='collections').find('span').get_text().strip()+'\n'
    related_pic = soup.find('div', id='related-pic')
    cnt = 0
    for a_in in related_pic.findAll('a'):
        if(str(a_in['href']).find('photos/photo') != -1):
            cnt += 1
            str0 += '电影截图'+str(cnt)+':'+get_photo_show(a_in['href'])+'\n'
    mainpic_src = soup.find('div', id='mainpic').find('a')['href']
    soup0 = BeautifulSoup(urllib.request.urlopen(mainpic_src))
    cnt = 0
    for lis in soup0.find('div', id='content').findAll('li'):
        cnt += 1
        str0 += '电影海报'+str(cnt)+':'+ get_photo_show(lis.find('a')['href']) + '\n'
        if(cnt > 3):
            break
    return str2dict(str0)
