'''
	Once the database is populated with the revision numbers and usernames, 
	we download the webpage corresponding to each revision. 
	As we need only text and URLs for each page, we do not store the entire page. 
	This helps us save space as well as time when parsing. 
	The text corresponding to the id - 'mw-content-text' is downloaded for further parsing. 
	The program creates a folder for each revision and stores the data in two files - 'links.txt', 'text.txt'.
'''



import connect as con
from lxml import html
import database as db
import os
import config as cg

def extractText(tree, newDir):
	fileName = newDir+"/text.txt";
	f = open(fileName, 'w')
	element = tree.get_element_by_id("mw-content-text" )
	f.write(html.tostring(element))
	
def extractLinks(tree, newDir):
	fileName = newDir+"/links.txt";
	f = open(fileName, 'w')
	for element in tree.iterlinks():
		try:
			f.write(element[2] + '\n')
		except:
			errno = "Issue with ascii stuff"
		
def main():
	conn = db.connectDb()
	data = db.extractDataTbRev(conn)
	os.chdir(cg.data)
	i = 1
	start = 23142
	end = 24000
	#start = 30000
	for item in data:
		if i >= start and i <= end:
			url = cg.historyURL + str(item[0])
			tree = con.connectToPage(url)
			if i%500 == 0:
				outerDir = "#"+str(i/500);
			else:
				outerDir = "#"+str(i/500+1);
				
			if i%500 == 1:
				os.makedirs(outerDir)
				
			os.chdir(outerDir);
			newDir = str(i)+"_"+str(item[0])
			os.makedirs(newDir)
			extractText(tree, newDir)
			extractLinks(tree, newDir)
			os.chdir("../")
		i += 1	
	
main()
	
