import getpass
import http.cookiejar
import urllib, urllib.error
from bs4 import BeautifulSoup
from urllib.request import *

base_url = "http://www.ubware.com"
login_url = "https://www.ubware.com/Member/Logon"
board_url = "https://www.ubware.com/Board/List"
content_url = "https://www.ubware.com/Board/Content?bNum="

cj = http.cookiejar.CookieJar()
opener = build_opener(urllib.request.HTTPCookieProcessor(cj))
# http://docs.python.org/3.2/library/urllib.request.html?highlight=build_opener#openerdirector-objects


# Library class
class Helper:
    def get_title(text):
        try:
            return re.compile('<title>(.+)</title>').findall(text)[0]
        except:
            return ''

    def get_links(text):
        return re.compile('http://[^\'\"]+').findall(text)

    def get_fct_content(text):
        return re.compile("fct_Cotent\(\\\\'([0-9]+)").findall(text)

    def get_parameter(data):
        encode_data = urllib.parse.urlencode(data)
        binary_data = encode_data.encode('utf-8')
        return binary_data

    def create_ubware_logindata():
        print('Create post data for login at UBware.com')
        data = {}
        data['txtID'] = input('ID:')
        data['txtPassword'] = getpass.getpass()
        data['chkSaveID'] = 'false'
        return Helper.get_parameter(data)

    
        
# Document class 
# It's created for each document.
class HtmlDoc:
    def __init__(self, opener, url):
        self.url = url
        self.opener = opener

    def load(self):
        try:
            res = opener.open(self.url)
            self.byte = res.read()
            res.close()
        except urllib.error.HTTPError as e:
            if e.getcode() == 500:
                return e.read()
            else:
                raise
        self.text = str(self.byte)
        self.bs = BeautifulSoup(self.text)
        self.title = self.bs.title.text
        self.links = []
        for link in self.bs.find_all('a'):
            self.links.append(link.get('href'))
        #self.title = Helper.get_title(self.text)
        #self.links = Helper.get_links(self.text)
        self.haslink = len(self.links) != 0

# Crawler - organize and manage whole flows        
class Crawler:
    def __init__(self):
        self.docs = {}
    def run(self, url):
        doc = HtmlDoc(opener, url)
        doc.load()
        self.docs.update({url : doc})
        if (doc.haslink):
            for i in doc.links:
                # It changes a relative changes to a absolute
                # And need to filter valid URIs
                self.run(i)

###################################    
# Main
###################################    
        
# set cookie by login data
data = Helper.create_ubware_logindata()
opener.open(login_url, data) # login - created cookies

c = Crawler()
#c.run(board_url)
