#pgreene livejournal parser
#send auther id, author name, content url (livejournal)
#author id would be key
import urllib, string, sys, re, threading, ranker, datetime, sqlite3, hashlib

#Define a thread to make the process faster
"""
This thread class contains 2 functions; __init__ and run.
When you instantiate a parse_thread you also instantiate the ranker class.
The url you pass the thread is also opened and the html is recieved and split by newlines.
"""
class parse_thread:
  def __init__(self,url,comm_or_post,database="test.db"):
    self.database = database
    self.rankeer = ranker.Ranker(database=self.database)
    self.url = url
    self.cp = comm_or_post
  def run(self):
    read = False
    while read == False:
	try:
     	  f = urllib.urlopen(self.url)
      	  read = True
  	except IOError:
	  pass
    self.s = f.read()
    self.s = string.split(self.s,'\n')
    f.close()
    #regular expressions used for parsing threads
    samster = "<span class='ljuser ljuser-name_' lj:user='([a-zA-Z0-9_]+)'"
    bobz = '\s*<a href="(.*)"'
    ender = '\s+<div>'
    bobi = '<span class="search-meta">'
    space = '(\S.*)'
    datere = '<span class="date">([a-zA-Z0-9 ]+)</span>' 
    now = datetime.datetime.now()
    dizate = []
    dizate.append(now.year)
    dizate.append(now.month)
    dizate.append(now.day)

    temp = dizate
    i = 0
    #variables for dictionary
    account_name = ''
    comment = ''
    com_url = ''
    source = 'livejournal' 
    date = ''

    while i < len(self.s):
  	b = re.search(bobz,self.s[i])
  	if(b != None):
    	  i = i + 1
    	  c = re.search(bobi,self.s[i])
    	  i = i + 1
    	  if(c != None):
    	    com_url = b.group(1)
	    while i < len(self.s):
		d = re.search(samster,self.s[i])
	  	i = i + 1
      	  	if(d != None):
	    	  account_name = d.group(1)
	    	  while i < len(self.s):
		    e = re.search(datere,self.s[i])
		    i = i + 1
		    if(e != None):
			samsterz = {}
 			samsterz['authorname'] = account_name
			samsterz['url'] = com_url
			samsterz['source'] = source
			g = re.match(space,self.s[i])
			i = i + 1
			while(g == None):
			  g = re.match(space,self.s[i])
			  i = i + 1
			samsterz['comment'] = g.group(0)
			#print samsterz
			postval, followers = self.rankeer.calculate_value(samsterz)
			if postval != -1 and followers != -1:
				conn = sqlite3.connect(self.database)
				c = conn.cursor()
				date = datetime.datetime.now().strftime("%y%j%H%M")
				data = (samsterz['authorname'], 'l', followers, date, postval, hashlib.md5(samsterz['comment']).hexdigest(),samsterz['url'])
				print "LJ Putting", data
				c.execute('insert into userdata values (?,?,?,?,?,?,?)', data)
				conn.commit()
				c.close()
		  	break
	    	  break
     	i = i + 1

class livejournal_parse():
  def __init__(self, search_word, database="test.db"):
    #format the search term to comply with a url
    self.search_term = string.replace(search_word,' ','+')
    # Search the posts and their comments for the search term
    self.post_url = "http://www.livejournal.com/search/?q=" + self.search_term + "&area=posts"
    self.comm_url = "http://www.livejournal.com/search/?area=comments&q="+self.search_term
    self.database = database

    #make and start threads
    self.a = parse_thread(self.post_url,'post',self.database)
    self.b = parse_thread(self.comm_url,'comm',self.database)

  def run(self):
    self.a.run()
    self.b.run()

if __name__ == "__main__":
  test = livejournal_parse("clemson", "test.db")
  test.run()
