# -*- coding: utf-8 -*-
from urllib import urlopen
from BeautifulSoup import BeautifulSoup
import re
import os
import sqlite3
from datetime import datetime


firstRecord=True
tableName="scrapTable"
createDb = sqlite3.connect('webscrap.db')
queryCurs = createDb.cursor()

def removeNonAscii(s): return "".join(i for i in s if ord(i)<128)

def cleanHtmlRegex(i):
    i = str(i)
    regexPatClean = re.compile(r'<[^<]*?/?>')
    i = regexPatClean.sub('', i)
    # Strip ampersand codes and WATCH:
    i = re.sub('&\w+;','',i)
    return re.sub('WATCH:','',i)


def setupDB():
    global firstRecord
    global tableName
    queryCurs.execute("SELECT count(*) FROM sqlite_master WHERE type='table'")
    for i in queryCurs:
        for j in i:
            tableName="scrapTable"+str(j)+"_"+str(datetime.now()).translate(None,":-. ")
    query="create table "+tableName+" (scrapId integer primary key ,pageUrl text,selectFrom text,selectTo text,fileLineNum text,scrapContent text)"
    print "Creating a table",tableName,"for the input file provided(this is done for the first url in the file only)"
    queryCurs.execute(query) 
    firstRecord=False

def updateDB(url,strStart,strEnd,n,cleanSoup):
    global firstRecord
    if firstRecord==True:
        setupDB()
    try:
        queryCurs.execute('insert into %s (pageUrl,selectFrom,selectTo,fileLineNum,scrapContent)values(?,?,?,?,?)'%str(tableName),(str(url), strStart, strEnd, str(n), removeNonAscii(cleanSoup),))
        queryCurs.execute("SELECT count(*) FROM %s"%str(tableName))
        for i in queryCurs:
            for j in i:
                if j!=0:
                    print "Record for scrap at Line",n,"created in the table:",tableName
        
    except:
        print "Error adding record of line",n,"to the database"

def scrap(pageLink,selectFrom,selectTo,i):
    pageContent=urlopen(pageLink).read()
    selectBegin=pageContent.find(selectFrom)
    selectEnd=pageContent.find(selectTo)
    selectedContent=pageContent[selectBegin:selectEnd]
    try:
        soup = BeautifulSoup(selectedContent)
    except(TypeError):
        print "The range of text for",pageLink,"specified in the input file at line",i,"does not exist"
        return
    cleanSoup = cleanHtmlRegex(soup)
    print "The length of the content to be scrapped is",len(cleanSoup)
    updateDB(pageLink,selectFrom,selectTo,i,cleanSoup)
    
def splitLine(line,i):
    try:
        url,strStart,strEnd=line.split("||")
    except(ValueError),e:
        print "Invalid Syntax in input file at line",i,":", e
        return
    print "Line",int(i),":"
    print "URL:",url
    print "SelectFrom:",strStart
    print "SelectTo:",strEnd
    scrap(url,strStart.rstrip(),strEnd.rstrip(),i)
    

def readFile(address):
    try:
        f=open(address)
    except(IOError),e:
        print 'File not found', e
    else:
        for i,line in enumerate(f,1):
            if line==" ":
                continue
            splitLine(line,i)
        print "Scraping for the given input file completed and stored at the database, sample.db with the table name",tableName    

def main():
    print "Create a input file with the url(s) to be scrapped in the following format"
    print "<<link>>||<<selectFrom>>||<<selectTo>>"
    print "Omit '<<' and '>>' "
    print "Enter the full(absolute) path of the input file containing url(s) to be scrapped:"
    filePath=raw_input()
    readFile(filePath)

if __name__ == '__main__':main()
createDb.close()

