#!/usr/bin/python
#from test_data_structs import Tweet
from data_structs import Text
from data_structs import Tweet
#from test_api import crawl_api
from crawl_url import crawl_url
from api_controller import API_Controller
import sys


def crawl(filename,prefix,directory,limit=5000):

    if directory[len(directory)-1]!="/":
       directory = directory + "/"

    api_c = API_Controller(1000,10)
    f = open(filename)
    line = f.readline()
    count = 0
    affix = 0
    of = open(directory+prefix+"_"+str(affix),"w")

    while line!="":
        tweet = Tweet(line)
        text = crawl_url(tweet)
        of.write("URL\t"+tweet.get_url()+"\n")
        of.write("Type\t"+text.get_type()+"\n")
        try:
            of.write("Content\t"+text.get_content()+"\n")
        except Exception as e:
            of.write("Content\t"+text.get_content().encode('utf-8')+"\n")
        of.write("\n\n")
        count = count +1
        if count >= limit:
            count = 0
            of.close()
            affix = affix + 1
            of = open(directory+prefix+"_"+str(affix),"w")
        
        line = f.readline()

    of.close()
    f.close()
        
if  len(sys.argv)<5:
    print "usage: urlfile urldirecotry preifx limit"
else:
    filename = sys.argv[1]
    directory = sys.argv[2] 
    prefix = sys.argv[3]
    limit =  int(sys.argv[4])
    crawl(filename,prefix,directory,limit)
    
