import os
import mechanize
import common_tools
import re
import time
import datetime
import Queue
from urlparse import urljoin
from threading import Thread
from Bio import Entrez, Medline
from PyQt4 import QtCore, QtGui, Qt

class PDFSaveThread(QtCore.QThread):
    def __init__(self, config_dict, pdf_link, pdf_filename, cite_item, title, parent = None):
        QtCore.QThread.__init__(self, parent)
        self.config_dict = config_dict
        self.pdf_link = pdf_link
        self.pdf_filename = pdf_filename
        self.cite_item = cite_item
        self.title = title

    def __del__(self):
        self.wait()
        
    def start_process(self):
        self.start()
        
    def run(self):
        try:
            prox_br = get_proxied_browser(self.config_dict)
            page = prox_br.open(self.pdf_link)
            f = open(self.pdf_filename, 'wb'); f.write(page.read()); f.close()
            f = open('%s%s.dxt' % (self.config_dict['storageLocation'], self.cite_item['EID']), 'w')
            f.write(repr(self.cite_item)); f.close()
            self.emit(QtCore.SIGNAL('done'), self.title)
        except:
            self.emit(QtCore.SIGNAL('error'), self.title)
        
class SavedDataThread(QtCore.QThread):
    def __init__(self, config_dict, parent = None):
        QtCore.QThread.__init__(self, parent)
        self.config_dict = config_dict
        
    def __del__(self):
        self.wait()
        
    def start_process(self, threadnum):
        self.threadnum = threadnum
        self.start()
        
    def run(self):
        storage_loc = self.config_dict['storageLocation']
        lst_of_files = []
        file_names = ['%s%s' % (storage_loc, x)  for x in os.listdir(storage_loc) 
                      if x.endswith('.dxt')]
        if file_names == []: return
        for filename in file_names:
            x = eval(open(filename).read())
            lst_of_files += [x]
        lst_of_files = ['CitationControl-Saved'] + lst_of_files
        self.emit(QtCore.SIGNAL('obtained files'), lst_of_files, self.threadnum)

class GenericThread(QtCore.QThread):
    '''
    Very useful generic thread from http://joplaete.wordpress.com/2010/07/21/threading-with-pyqt4/
    '''
    def __init__(self, function, *args, **kwargs):
        QtCore.QThread.__init__(self)
        self.function = function
        self.args = args
        self.kwargs = kwargs

    def __del__(self):
        self.wait()

    def run(self):
        self.function(*self.args,**self.kwargs)
        return
        
class PDFDownloadThread(QtCore.QThread):
    def __init__(self, config_dict, parent = None):
        QtCore.QThread.__init__(self, parent)
        self.config_dict = config_dict

    def __del__(self):
        self.wait()
        
    def start_process(self, ind, tab_index, lst, threadnum, pdfOnlyDownload=False):
        self.ind = ind
        self.tab_index = tab_index
        self.lst = lst
        self.threadnum = threadnum
        self.pdfOnlyDownload = pdfOnlyDownload
        self.start()
        
    def run(self):
        cite_item = self.lst[self.ind]
        pdf_val = download_pdfs(cite_item, self.config_dict, self.pdfOnlyDownload)
        if pdf_val:
            if pdf_val.startswith('http:'):
                cite_item['pdfLocation'] = pdf_val
                self.emit(QtCore.SIGNAL('pdfLinkObtained'), cite_item)
            self.emit(QtCore.SIGNAL('updateImage'), self.ind, self.tab_index, "done")
        else:
            cite_item['pdfLocation'] = pdf_val
            self.emit(QtCore.SIGNAL('pdfLinkObtained'), cite_item)
            self.emit(QtCore.SIGNAL('updateImage'), self.ind, self.tab_index, "error")
        self.emit(QtCore.SIGNAL('done'), self.threadnum)


class PUBMEDSearchThread(QtCore.QThread):
    def __init__(self, treeWidget, config_dict, parent = None):
        QtCore.QThread.__init__(self, parent)
        self.treeWidget = treeWidget
        self.config_dict = config_dict
        
    def __del__(self):
        self.wait()
        
    def start_process(self, keyword, tab_location, threadnum):
        self.keyword = keyword
        self.tab_location = tab_location
        self.threadnum = threadnum
        self.start()
        
    def run(self):
        pubmed_searcher = PUBMEDSearchWidget()
        search_results = pubmed_searcher.search_pubmed(self.keyword, self.config_dict)
        search_results = [self.keyword] + search_results
        self.emit(QtCore.SIGNAL('resultsObtained'),
                  search_results, self.treeWidget, self.tab_location, self.threadnum)

class CitationSearchThread(QtCore.QThread):
    '''
    For Citation Searches.
    '''
    def __init__(self, treeWidget, config_dict, parent = None):
        QtCore.QThread.__init__(self, parent)
        self.treeWidget = treeWidget
        self.config_dict = config_dict
        
    def __del__(self):
        self.wait()
        
    def start_process(self, titlemsg, pmid, tab_location, threadnum):
        self.titlemsg = titlemsg
        self.pmid = pmid
        self.tab_location = tab_location
        self.threadnum = threadnum
        self.start()
        
    def run(self):
        import reference_retrieve
        rcr = reference_retrieve.RefCiteRetrieval(self.pmid, self.titlemsg, self.config_dict)
        search_results = rcr.get_citations()
        search_results = [self.titlemsg] + search_results
        self.emit(QtCore.SIGNAL('resultsObtained'),
                  search_results, self.treeWidget, self.tab_location, self.threadnum)

class ReferenceSearchThread(QtCore.QThread):
    '''
    Same as CitationSearchThread, but for references
    '''
    def __init__(self, treeWidget, config_dict, parent = None):
        QtCore.QThread.__init__(self, parent)
        self.treeWidget = treeWidget
        self.config_dict = config_dict
        
    def __del__(self):
        self.wait()
        
    def start_process(self, titlemsg, pmid, tab_location, threadnum):
        self.titlemsg = titlemsg
        self.pmid = pmid
        self.tab_location = tab_location
        self.threadnum = threadnum
        self.start()
        
    def run(self):
        import reference_retrieve
        rcr = reference_retrieve.RefCiteRetrieval(self.pmid, self.titlemsg, self.config_dict)
        search_results = rcr.get_references()
        search_results = [self.titlemsg] + search_results
        self.emit(QtCore.SIGNAL('resultsObtained'),
                  search_results, self.treeWidget, self.tab_location, self.threadnum)

class RelatedSearchThread(QtCore.QThread):
    '''
    Same as CitationSearchThread, but for references
    '''
    def __init__(self, treeWidget, config_dict, parent = None):
        QtCore.QThread.__init__(self, parent)
        self.treeWidget = treeWidget
        self.config_dict = config_dict
        
    def __del__(self):
        self.wait()
        
    def start_process(self, titlemsg, pmid, tab_location, threadnum):
        self.titlemsg = titlemsg
        self.pmid = pmid
        self.tab_location = tab_location
        self.threadnum = threadnum
        self.start()
        
    def run(self):
        import reference_retrieve
        rcr = reference_retrieve.RefCiteRetrieval(self.pmid, self.titlemsg, self.config_dict)
        search_results = rcr.get_related()
        search_results = [self.titlemsg] + search_results
        self.emit(QtCore.SIGNAL('resultsObtained'),
                  search_results, self.treeWidget, self.tab_location, self.threadnum)

class PUBMEDSearchWidget():
    def __init__(self, parent=None):
        pass
        #QtGui.QWidget.__init__(self, parent)

        
    def search_pubmed(self, searchTerm, config_dict, testPUBMED=False):
        searchTerm = str(searchTerm)
        testing_identifiers = searchTerm.split()
        final_search_term = []
        tempVal = ''
        msg = "Incorrect search criteria"
        details_titles = ['ScopusRecord', 'Title', 'ArticleLink',
                          'Authors', 'Year', 'Journal', 'Issue', 'Pages', 'EID']
        incorr_search = {}
        try:
            for i in details_titles:
                incorr_search[i] = ''
                incorr_search['Title'] = msg

            for identifier in testing_identifiers:
                if 'a:' in identifier:
                    tempVal = identifier.replace('a:', '')
                    if ',' in tempVal:
                        tempVal = tempVal.split(',')
                        tempVal = ' '.join(tempVal)
                    tempVal = tempVal + '[Author]'
                elif 'j:' in identifier:
                    tempVal = identifier.replace('j:', '')
                    if ',' in tempVal:
                        tempVal = tempVal.split(',')
                        tempVal = ' '.join(tempVal)
                    tempVal = '"%s"[Journal]' % tempVal
                elif 'y:' in identifier:
                    tempVal = identifier.replace('y:', '')
                    yrVal = tempVal.split('-')
                    if not len(yrVal) == 2:
                        print incorr_search
                    tempVal = '"%s"[Publication Date] : "%s"[Publication Date]' % (yrVal[0], yrVal[1])
                elif ':' in identifier:
                    print incorr_search
                else:
                    tempVal = identifier
                final_search_term += [tempVal]
            start_val = '(' * (len(final_search_term)-1)
            final_search_term = ') AND '.join(final_search_term)
            final_search_term = start_val + final_search_term
            print final_search_term

            time.sleep(1)
            database = 'pubmed'
            Entrez.email = 'a.varier@gmail.com'
            handle = Entrez.esearch(db=database, term=final_search_term,
                                    tool="BioPython, Bio.Entrez", retmax = 200)
            records = Entrez.read(handle)
            idlist = records['IdList']
            handle = Entrez.efetch(db="pubmed", id=idlist, rettype="medline", retmode="text")
            records = Medline.parse(handle)
            records = list(records)
            details = []
            details_titles = ['PMRecord', 'Title',
                              'Authors', 'Journal', 'Issue', 'Pages', 'Year']
            for record in records:
                tempRecord = {}
                tempRecord['Title'] = record.get("TI")
                tempRecord['PMRecord'] = record.get("PMID")
                tempRecord['Authors'] = ", ".join(record.get('AU'))
                if record.get("JT"):
                    tempRecord['Journal'] = record.get("JT")
                elif record.get("CTI"):
                    tempRecord['Journal'] = ''.join(record["CTI"])
                else:
                    tempRecord['Journal'] = ''
                issue = record.get('VI') if record.get('VI') else ''
                issue = '%s (%s)' % (issue, record.get('IP')) if record.get('IP') else issue

                pages = record.get('PG') if record.get('PG') else ''
                pages = record.get('IS') if pages == '' else pages
                tempRecord['Pages'] = pages
                tempRecord['Issue'] = issue
                tempRecord['Year'] = record.get('DA')[:4]
                tempRecord['ArticleRecord'] = ''
                tempRecord['ArticleLink'] = ('http://www.hubmed.org/fulltext.cgi?uids=' +
                                             tempRecord['PMRecord'])
                tempRecord['EID'] = tempRecord['PMRecord']
                tempRecord['PMC'] = record.get('PMC') if record.get('PMC') else ''
                details += [tempRecord]
            # If there is a mistake/no results found
            if len(records) == 0:
                raise
        except:
            msg = "No results found for %s" % searchTerm
            x = {}
            for i in details_titles:
                x[i] = ''
            x['Title'] = msg
            return [x]
        return details
    
def get_proxied_browser(config_dict):
    if config_dict['proxyrequired'] == 'Yes':
        proxy_site = config_dict['proxysite']
        login = config_dict['username']
        pwd = config_dict['password']
        br = common_tools.setup_browser("%s:%s@%s" % (login, pwd, proxy_site))
    else:
        br = common_tools.setup_browser()
    return br

def timer_stop(timer_st):
    time_end = datetime.datetime.now()
    elapsed_time = time_end - timer_st
    print elapsed_time, time_end
    print "Elapsed time = %s" % elapsed_time.seconds
    
def get_eid_val(url):
    eid_re = re.compile('eid\=(.*?)\&')
    return eid_re.findall(url)[0]

def get_first_link(pge, textFilter=None, textUnfilter = None, urlFilter=None):
    pdf_link = None
    pdf_link_obtained = False
    # if there is no link, then return none
    try:
        for link in pge.links():
            if not pdf_link_obtained:
                if textFilter:
                    if link.text:
                        if textFilter in link.text:
                            if textUnfilter:
                                if not textUnfilter in link.text:
                                    pdf_link = link.url
                                    pdf_link_obtained = True
                if urlFilter:
                    if urlFilter in link.url:
                        pdf_link = link.url
                        pdf_link_obtained = True
        if pdf_link:
            if not pdf_link.startswith('http://'):
                pdf_link = urljoin(pge.geturl(), pdf_link)
        return pdf_link
    except:
        print 'No pdf link here'
        return pdf_link

def get_all_links(pge, textFilter=None, textUnfilter = None, urlFilter=None):
    output_links = []
    for link in pge.links():
        if textFilter:
            if textFilter in link.text:
                if textUnfilter:
                    if not textUnfilter in link.text:
                        output_links += [link.url]
        if urlFilter:
            if urlFilter in link.url:
                output_links += [link.url]
    for link in output_links:
        if link:
            if not link.startswith('http://'):
                link = urljoin(pge.geturl(), link)
    return output_links
                
        
def download_pdfs(cite, config_dict, pdfAloneDownload=False):
    '''
    Downloads pdf for a given record.
    The input argument is a dictionary, which has at least the title, article link, scopus record.
    The following steps will be done with the record. See below for specific locations where this occur
    1. Get the url for the article link
    2. Open the url in an appropriate browser
    3. Get a pdf link from the page (This will return a pdf link if available or None)
    4. Try to download the pdf file. Sometimes the link refers to another page with a pdf link.
    In each case here, try to make sure that the file is a pdf file.
    '''

    #1.
    nameval = cite['PMRecord']
    publisher_link = ('http://www.hubmed.org/fulltext.cgi?uids=' +
                      nameval)
    linkList = Entrez.read(Entrez.elink(db="pubmed",
                                   cmd="prlinks", 
                                   from_uid=nameval))
    linkList = linkList[0]['IdUrlList']['IdUrlSet'][0]['ObjUrl']
    publisher_link = str(linkList[0]['Url']) if linkList else publisher_link
    pdf_link = None
    #2.
    # Sometimes, a non-proxied browser becomes necessary. Get the page to open either in a pro
    # xied or a non proxied browser. In fact, the page usually needs to be opened in a non-proxied browser
    # but then the referred page is opened in a proxied one. This might sound confusing but see below.
    
    prox_br = get_proxied_browser(config_dict)
    try:
        non_prox_br = common_tools.setup_browser()
        non_prox_br.open(publisher_link)
        new_url = non_prox_br.geturl()
        prox_br.open(new_url)
    except:
        try:
            prox_br.open(publisher_link)
        except:
            print 'The outward link from Scopus cannot be opened'
            pass
    # if scopus did not have a publisher link, try the web engine.
    if publisher_link.strip() == '':
        print 'There was no publisher link at scopus. So, trying web engine search.'
        pdf_link = web_engine_search(cite['Title'], non_prox_br)
        if not pdf_link: print 'web engine couldnt find one'; return None
    if 'hubmed' in prox_br.geturl():
        page = prox_br.open(publisher_link)
        html = page.read()
        print 'hubmed'
        print prox_br.geturl()
    #3.
    # Once the browser has opened the page, get the first pdf link on this page.
    # which follows certain filters.
    if not pdf_link:
        pdf_link = get_first_link(prox_br, textFilter = 'PDF', textUnfilter = 'IMG')
    if not pdf_link:
        pdf_link = get_first_link(prox_br, textFilter = 'pdf', textUnfilter = 'IMG')
        if not pdf_link:
            pdf_link = get_first_link(prox_br, urlFilter = 'pdf')
    if not pdf_link:
        print 'There was a publisher link, but couldnt get a pdf link. So, trying web engine ..'
        pdf_link = web_engine_search(cite['Title'], non_prox_br)
        if not pdf_link: print 'web engine couldnt find one either'; return None
    #4.
    # When trying to open the pdf link, if your encounter a pdf file, great.
    # Read the file again (I've noticed that the first read sometimes affects the pdf file.)
    # and write it to a pdf file with the eid from above.
    # If this is not a pdf file, then it usually has pdfs just in its links. In that case,
    # just get it and do the above. If it is a different page, that links to a pdf, get that too.
    # if there are frames or haven't encountered a pdf yet, you are out of luck.
    try:
        pdf_link = get_actual_pdf_link(prox_br, non_prox_br, pdf_link, cite, nameval)
        if not pdf_link: return None
        if pdfAloneDownload:
            return pdf_link
        try:
            page = prox_br.open(pdf_link)
        except:
            page = non_prox_br.open(pdf_link)
        time.sleep(1)
        pdf_name = '%s%s.pdf' % (config_dict['storageLocation'], nameval)
        f = open(pdf_name, 'wb'); f.write(page.read()); f.close()
        f = convert_pdf_to_text(pdf_name, config_dict)
        while f: pass
        x = open('%s%s.txt' % (config_dict['storageLocation'], nameval)).read()
        x = ' '.join(x.split())
        x = x.split('. ')
        x = '\n'.join(x)
        cite['pdfLocation'] = pdf_link
        f = open('%s%s.txt' % (config_dict['storageLocation'], nameval), 'w'); f.write(x); f.close()
        f = open('%s%s.dxt' % (config_dict['storageLocation'], nameval), 'w'); f.write(repr(cite)); f.close()
        if not config_dict['savePdfsCheck'] == 'Yes':
            os.remove(pdf_name)
        return pdf_name
    except:
        print 'Not possible for %s due to an error' % (nameval)
        return None

def get_actual_pdf_link(prox_br, non_prox_br, pdf_link, cite, nameval):
    '''
    Getting the actual pdf link from the page.
    '''
    try:
        page = prox_br.open(pdf_link)
    except:
        page = non_prox_br.open(pdf_link)
    if not page.read()[:4] == '%PDF':
        # .pdf in the link
        if '.pdf' in pdf_link:
            test_re = re.compile('.*?\.pdf')
            pdf_link = test_re.findall(pdf_link)[0]
            page = prox_br.open(pdf_link)
            if not page.read()[:4] == '%PDF':
                if 'jstor' in pdf_link and 'pdfplus' in pdf_link:
                    pdf_link = pdf_link + '?acceptTC=true'
                    print pdf_link
                else:
                    test_re = re.compile('\"(.*?pdf.*?)\"')
                    page = prox_br.open(pdf_link)
                    pageRead = page.read()
                    pdf_link = test_re.findall(pageRead)
                    if len(pdf_link) == 0: return None
                    pdf_link = pdf_link[0]
                page = prox_br.open(pdf_link)
                if not page.read()[:4] == '%PDF':
                    print 'No pdf file though .pdf was in the link - %s' % nameval
                    return None
        else:
            # pdf link possibly in a referring page
            page = prox_br.open(pdf_link)
            pdf_link = get_first_link(prox_br, urlFilter = '.pdf')
            if pdf_link:
                page = prox_br.open(pdf_link)
            # Else just add .pdf to the end and see if that works
            if not page.read()[:4] == '%PDF':
                pdf_link = page.geturl() + '.pdf'
        # A the end, you'll have a pdf link for sure or no pdf for this file.
        page = prox_br.open(pdf_link)
        if not page.read()[:4] == '%PDF':
            print 'Tried everything but no pdf for this file - %s' % nameval
            print 'Lets try a google search'
            pdf_link = web_engine_search(cite['Title'], non_prox_br)
            if not pdf_link: return None
    return pdf_link

def web_engine_search(title, br):
    '''
    If nothing else worked, then look for online pdfs from google, yahoo or bing searches.
    '''
    title = '+'.join(title.split())
    web_engine_urls = ['http://www.search.yahoo.com/search?q=%s',
                       'http://www.google.com/search?q=%s',
                       'http://www.bing.com/search?q=%s']
    for url in web_engine_urls:
        try:
            br.open(url % title)
            for link in br.links(url_regex='.pdf'):
                pdf_url = link.url
                print pdf_url
                if pdf_url.endswith('.pdf'):
                    return link.url
        except:
            continue
    return None

def convert_pdf_to_text(pdf, config_dict):
    import os
    os.system('%s %s' % (config_dict['pdfToTextLocation'], pdf))
    
if __name__ == "__main__":
    timer_st = datetime.datetime.now()
    ref_details = open('Misc/ref_details', 'r')
    ref_details = eval(ref_details.read())

    cite_details = open('Misc/cite_details', 'r')
    cite_details = eval(cite_details.read())

    number_of_threads = 15

    myQ = Queue.Queue(number_of_threads)
    
    for __ in range(number_of_threads):

        t = Thread(target=pdf_download_threader, args=(myQ,))
        t.setDaemon(True)
        t.start()

    total_citations = 10
    
    for cite in cite_details:
        myQ.put(cite)

    myQ.join()
    print 'done'

    storage_loc = 'storage/'
    for pdf_file in os.listdir(storage_loc):
        pdf_filename = storage_loc + pdf_file
        convert_pdf_to_text(pdf_filename)
        
    timer_stop(timer_st)
