from time import sleep;
from lxml import html;
import psycopg2;

#import nltk;
class BlanketCorpusBuilder(object):
    def __init__(self):
        print("BCB created");
    def Build(self, pageList, blanketCategory):
        for uri in pageList:
            file = open("/Users/Hikari/Documents/workspace/WikiDataSearch/WikiBlanket/"+blanketCategory+".corpus", "a");
            file2 = open("/Users/Hikari/Documents/workspace/WikiDataSearch/WikiPages/"+blanketCategory+".pgs", "a");
            try:
                root = html.parse(uri).getroot();
                file2.write(uri+"\n");
                file2.close();
                items = root.xpath('//div/p | //div[@id="bodyContent"]/ul');
                for i in items:
                    file.write(i.text_content());
                    file.write("\n");
                file.close();
                print("Text from \""+uri+"\" added to "+blanketCategory+".corpus");
            except IOError:
                print(uri+ " Not Found. Skipping.");
            self.__InputData();
            
            # To be compliant with wikipedia's rules on crawling...
            # There must be at least one second delay between url requests
            sleep(1.25);
        print("Finished");
    def __InputData(self):
        conn = psycopg2.connect(dbname="SakuraNLP", host="babylon.cise.ufl.edu", user="Sakura", password="shichi");