# -*- coding: utf-8 -*-

import urllib2
from bs4 import BeautifulSoup
import sys

import chardet
############################
#key_word="单兵自热"
############################

reload(sys) 
print 
sys.setdefaultencoding('utf8')

#first_url='https://search.bilibili.com/all?keyword=%E5%8D%95%E5%85%B5%E8%87%AA%E7%83%AD&page=2'
#first_url='https://search.bilibili.com/all?keyword=%E5%8D%95%E5%85%B5%E8%87%AA%E7%83%AD'


def get_html_page_num(first_url):
    request=urllib2.Request(first_url)
    request.add_header('Host','search.bilibili.com')
    request.add_header('Connection','keep-alive')
    request.add_header('Cookie','finger=edc6ecda; LIVE_BUVID=AUTO4915336549565020; bsource=seo_baidu; buvid3=1FD1A027-C045-4A2C-9E4F-6CD79672300B163045infoc; sid=7h0xsnj6')
    reponse=urllib2.urlopen(request);

    resp= reponse.read()
    soup = BeautifulSoup(resp,'html.parser')
    
    numobj=soup.find('li',attrs={'class':'page-item last'})
    print numobj.text
    return int(numobj.text)
    

def crawl_web(url,file):
    request=urllib2.Request(url)
    request.add_header('Host','search.bilibili.com')
    request.add_header('Connection','keep-alive')
    request.add_header('Cookie','finger=edc6ecda; LIVE_BUVID=AUTO4915336549565020; bsource=seo_baidu; buvid3=1FD1A027-C045-4A2C-9E4F-6CD79672300B163045infoc; sid=7h0xsnj6')
    reponse=urllib2.urlopen(request);

    resp= reponse.read()

    #print data
    soup = BeautifulSoup(resp,'html.parser')
    
    #with open('test.html','w') as f:
    #    f.write(resp)
    objlist=soup.find_all('div','info')
    for obj in objlist:
        try:
            print "{"
            print "  title: "+obj.find('a','title').text.strip()
            print "  click: "+obj.find('span',attrs={'title':'观看'}).text.strip()
            print "   date: "+obj.find('span',attrs={'title':'上传时间'}).text.strip()
            print "   auth: "+obj.find('span',attrs={'title':'up主'}).text.strip()
            print "}"
            print ""
        except UnicodeError as u:
            continue
        #info="{\n"+"  title: "+obj.find('a','title').text.strip()+"\n"+"  click: "+obj.find('span',attrs={'title':'观看'}).text.strip()+"\n"+"   date: "+obj.find('span',attrs={'title':'上传时间'}).text.strip()+"\n"+"   auth: "+obj.find('span',attrs={'title':'up主'}).text.strip()+"\n}\n"
        
        #info=obj.find('span',attrs={'title':'观看'}).text.strip()
        continue
        info=obj.find('a','title').text.strip()+','+obj.find('span',attrs={'title':'观看'}).text.strip()+','+obj.find('span',attrs={'title':'上传时间'}).text.strip()+','+obj.find('span',attrs={'title':'up主'}).text.strip()+'\n'
        file.write(info)

if __name__ == '__main__':
    print r'''

                       _oo0oo_
                      o8888888o
                      88" . "88
                      (| -_- |)
                      0\  =  /0
                    ___/`---'\___
                  .' \\|     |# '.
                 / \\|||  :  |||# \
                / _||||| -:- |||||- \
               |   | \\\  -  #/ |   |
               | \_|  ''\---/''  |_/ |
               \  .-\__  '-'  ___/-. /
             ___'. .'  /--.--\  `. .'___
          ."" '<  `.___\_<|>_/___.' >' "".
         | | :  `- \`.;`\ _ /`;.`/ - ` : | |
         \  \ `_.   \_ __\ /__ _/   .-` /  /
     =====`-.____`.___ \_____/___.-`___.-'=====
                       `=---='

    '''
    key_word=raw_input("input your search words:".decode(sys.stdin.encoding))
    
    first_url='https://search.bilibili.com/all?keyword='+urllib2.quote(key_word.decode('gb2312').encode('utf-8'))
    num=get_html_page_num(first_url)
    file=open("result.txt","w")
    for i in range(2,num+1):
        url=first_url+"&page="+str(i)
        crawl_web(url,file)
        #print url

