import sys
import re
import bs4
import getpass
import http.cookiejar
import urllib, urllib.error
from urllib.request import *

base_url = "http://www.ubware.com"
login_url = "https://www.ubware.com/Member/Logon"
pure_board_url = "https://www.ubware.com/Board/List"
board_url = "https://www.ubware.com/Board/List?page=%d"
content_url = "https://www.ubware.com/Board/Content?bNum=%d"

P1 = "<tr class=\"ListContentHeight\" align=\"center\">.*?</tr>"
P2 = "<span.*?>"
P3 = "</span>"
rex_fct_content = "fct_Cotent\(\\\\\'\d+"
board_content_urls = set()

#######################################################################################
class Helper:
    def get_fct_content(text):
        return re.compile(rex_fct_content).findall(str(text))


#######################################################################################
class Post:
    def __init__(self, post_data):
        self.post_data = post_data
        td_tag_list = bs4.BeautifulSoup(self.post_data).find_all('td') 
        self.columns = {}
        self.columns['Number'] = td_tag_list[0].string
        self.columns['Type']   = td_tag_list[1].string
        self.columns['Title']  = td_tag_list[2].string
        self.columns['Author'] = td_tag_list[3].string
        self.columns['Date']   = td_tag_list[4].string
        self.columns['Click']  = td_tag_list[5].string
        self.columns['File']   = td_tag_list[6].string

#######################################################################################
class Board:
    def __init__(self, full_data):
        self.full_data = full_data
        postes = re.compile(P1, re.DOTALL).findall(self.full_data)
        self.list = []
        for post_data in postes:
            post_data = re.sub(P2, '', post_data)
            post_data = re.sub(P3, '', post_data)
            p = Post(post_data)
            self.list.append(p)

#######################################################################################
class BoardCrawler:
    def __init__(self, uid="", pwd=""):
        self.requester = Requester(uid, pwd)

    def work(self, max_page_count):
       	open("BoardCrawlerResult.txt", "w") 
       	for page_number in range(1, max_page_count):
            url = board_url % page_number
            print(url)
            self.requester.open(url)
            b = Board(self.requester.read_data)
            for post in b.list:
                open("BoardCrawlerResult.txt", "a+").write(str(post.columns) + "\r\n")

    def Sweep(self, url):
        if url == "":
            url = board_url % 1

        print("Entry URL : " + url)

        self.requester.open(url)
        #print(str(self.requester.read_data).encode('utf-8'))
        fctContents = Helper.get_fct_content(str(self.requester.read_data).encode('utf-8'))
        print("fctContentList(%d)  : %s " % (len(fctContents), str(fctContents)))
        try:
            with open("Result.txt", "wt") as resultfile:
                for content in fctContents:
                    cUrl = content_url % int(content.replace("fct_Cotent(\\\'", ""))
                    board_content_urls.add(cUrl)

                    resultfile.write(cUrl + "\r\n")
        except IOError as e:
            print(e)


#######################################################################################
class Requester:
    def __init__(self, uid="", pwd=""):
        print('Create post data for login at UBware.com')
        data = {}
        if uid == "":
            data['txtID'] = input('ID:')
        else:
            data['txtID'] = uid

        if pwd == "":
            data['txtPassword'] = getpass.getpass()
        else:
            data['txtPassword'] = pwd

        data['chkSaveID'] = 'false'
        data_urlencode = urllib.parse.urlencode(data)
        data_utf8 = data_urlencode.encode('utf-8')

        self.cj = http.cookiejar.CookieJar()
        self.opener = build_opener(urllib.request.HTTPCookieProcessor(self.cj))
        self.opener.open(login_url, data_utf8)

    def open(self, url):
        try:
            response = self.opener.open(url)
            self.read_data = response.read().decode('utf-8')
            response.close()
        except urllib.error.HTTPError as e:
            return False
        except http.client.InvalidURL as e:
            return False
        return True

#######################################################################################
#BoardCrawler().work(10)
 
