import urllib2, htmldata
import os
import csv
import subprocess

class articles(object):

    def __init__(self):
        self.ticker = ""
        self.content = ""
        self.csv_files = []
        self.stock_data = {}
        self.uni_dates = []

    def open_ex(self,dir_path):
        subprocess.Popen('explorer '+ self.tempDir()) 
        return

    def tempDir(self):
        if (os.path.exists(os.getenv("temp", ) + '\\news\\') <> True):
            os.mkdir(os.getenv("temp", ) + '\\news\\')
        return os.getenv("temp", ) + '\\news\\'
    
    def parse(self,story_url):
        #add logic to put the stock in the file name
        if os.path.exists(self.file_name(story_url) + ".txt"):
            fp = open(self.file_name(story_url) + ".txt","r")
            tmpstr = fp.read()
            fp.close()
        else:
            tmpstr = ''
            if len(story_url) == 0: 
                story_url = 'http://tmx.quotemedia.com/article.php?newsid=53602945&qm_symbol=BLD:CA'
            contents = urllib2.urlopen(story_url).read()        
            start = contents.find('qmArticle')
            end =contents.find('qm-disclaimer')
            story = contents[start:end]
            from BeautifulSoup import BeautifulSoup
            soup = BeautifulSoup(story)
            for ul in soup.findAll('p') : tmpstr = tmpstr + str(ul)
            fp = open(self.file_name(story_url) + ".txt", "wb")
            fp.writelines(tmpstr)
            fp.close
        tmpstr  = tmpstr [0:tmpstr .find('<b>About')]    
        return tmpstr
    
    def get_date(self,strStory):
        years = ['2001','2002','2003','2004','2005','2006','2007','2008','2009','2010','2011','2012','2013']
        for y in years:
            art_date = int(strStory.find(y))
        tmpstr = strStory[art_date - 9:art_date+4]
        return tmpstr
    
    def file_name(self,story_url):
        start = story_url.find('newsid=')
        end = story_url.find('&')
        return self.tempDir() + '\\' + story_url[start:end]
    
    def word_count(self):
        word_list = self.content.split()
        dict = {}
        r_list = ['and','to','of','in','its','the','for','that','are','has','with','The','a','on','be','as']
        for i in word_list:
            if i in r_list:
                nothing ='nothing'
            else:    
                if dict.has_key(i): 
                    dict[i] += 1
                else:
                    dict[i] = 1
        self.word = dict            
        return dict
    
    def parse_all(self,s):
        #s = 'TA'
        #BLD:CA'
        #x = os.listdir(os.curdir + "/news/")
        url = 'http://tmx.quotemedia.com/news.php?qm_symbol=' + s
        print url
        contents = urllib2.urlopen(url).read() 
        fp = open(self.tempDir() + s + "_parse_output.txt", "wb")
        
        for u in htmldata.urlextract(contents, url):
            if (str(u.url).find('newsid')) > 0: 
                story = self.parse(str(u.url))
                print u.url
                fp.write ('\n')        
                wl = {}
                wl = self.word_count()
                fp.write(self.get_date(story))
                fp.write ('\n')
                fp.write(u.url)
                fp.write ('\n')
                fp.write(story)
                fp.write ('\n')
                fp.write ('Output: \n')
                for word in sorted(wl, key=wl.get, reverse=True):
                    if wl[word] >1 :
                        #print(word,wl[word])
                        fp.write(word + ' ' + str(wl[word]))
                        fp.write ('\n')
                fp.write ('\n')    
        fp.close