# -*- coding:utf-8 -*-
import json
from bs4 import BeautifulSoup
from PIL import Image
from StringIO import StringIO
import requests
import cookielib
import sys
import re
import chardet
import codecs


'''
获取武汉大学图书馆
'''
req = requests.session()

headers = {
    'Host':'www.lib.whu.edu.cn',
    'Referer': 'http://www.lib.whu.edu.cn/',
    'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'
}

#获取cookie 存储session
def init():
    url = 'http://www.lib.whu.edu.cn/web/default.asp'
    r = req.get(url) 
    print_r(r)


def lib_info():
    #登录
    url = 'http://metalib.lib.whu.edu.cn/pds'
    postdata = {
        'func':'login',
        'calling_system':'idp_proxy',
        'term1':'short',
        'selfreg':'',
        'institute':'WHU',
        'url':'http://apps.lib.whu.edu.cn/web/login.asp',
        'bor_id':'2013282160041',
        'bor_verification':'016959'
    }
    r = req.post(url, data=postdata, headers=headers)
    # with open('./html.txt', 'w') as f:
    #     f.write(r.text)
    match = re.search(r'/goto/logon/(.*)&institute=', r.text)
    if match:
        url = match.group()
        r = req.get('http://metalib.lib.whu.edu.cn'+url)
        content = r.content.decode('gb2312').encode('utf-8')
        # print content
        # with open('./html.txt', 'w') as f:
        #     f.write(content)
        if '我的借阅信息' in content:
            print 'login success'
            # #获取信息
            url = 'http://opac.lib.whu.edu.cn/F/?func=bor-loan'
            r = req.get(url)
            # print r.text
            # 跳转url
            match = re.search(r'http://metalib.lib.whu.edu.cn(.*)func=bor-loan', r.text)
            if match:
                url = match.group()
                # print url
                r = req.get(url)
                # print chardet.detect(r.text)
                # print r.content.decode('utf-8')
                match = re.search(r'location = \'(.*)\';">', r.text)
                if match:
                    url = match.group(1)
                    # print url
                    r = req.get('http://metalib.lib.whu.edu.cn'+url)
                    # print r.content.decode('utf-8')
                    return r.content.decode('utf-8')

def parse_borrow_info(html):
    borrow_list = []
    soup = BeautifulSoup(html)
    name = soup.select('div.title')[0]
    print name.get_text()
    table = soup.select('div#copyright')[0].find_previous_siblings("table")[0]
    # print table
    tr_index = 0
    for tr in table.select('tr'):
        # print tr
        tr_index += 1
        if tr_index == 1:
            continue
        td_set = tr.select('td')
        author = td_set[2].get_text().strip()
        book_name = td_set[3].get_text().strip()
        return_day = td_set[5].get_text().strip()
        lib_name = td_set[8].get_text().strip()
        borrow_list.append({
            'author':author,
            'book_name':book_name,
            'return_day':return_day,
            'lib_name':lib_name,
        })
    print json.dumps(borrow_list, indent=4, ensure_ascii=False)
    return borrow_list


def print_r(r):
    return
    print ('-----------------')
    attrs=['cookies','encoding','headers', 'history', 
       'raw', 'reason', 'request', 'status_code', 'url']
    for att in attrs:
        print (att,'->',getattr(r,att))


if __name__ == '__main__':

    init()
    html = lib_info()
    parse_borrow_info(html)

