# Copyright 2010 Silvere Vialet-Chabrand <svialet@nancy.inra.fr>
# This file is a library to make search and retrieve bibliographic
# informations from ISI database.
#
# PYTIRL is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# PYTIRL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PYTIRL; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA.

#
# PYTIRL: PYTHON ISI REQUEST LIBRARY
#
# By Silvere Vialet-Chabrand
# Version 0.01
#

import urllib2
import xml.dom.minidom

# Class to store biblio informations
class Records:
    def __init__(self):
        self.abstract = ''
        self.authors = ''
        self.bib_date = ''
        self.bib_misc = ''
        self.bib_pages = ''
        self.bib_vol = ''
        self.book_series = ''
        self.copyright = ''
        self.doctype = ''
        self.ids = ''
        self.issn = ''
        self.issue_title = ''
        self.item_title = ''
        self.keywords = ''
        self.keywords_plus = ''
        self.languages = ''
        self.links = ''
        self.meeting_abstract = ''
        self.refs = ''
        self.reprint = ''
        self.research_addrs = ''
        self.reviewed_work = ''
        self.source_series = ''
        self.source_title = ''
        self.timescited = ''
        self.ut = ''
        self.article_no = ''

# Main Class        
class ISI:
    def __init__(self, usrname, passwrd, host):
        """ ISI connection """

        # Proxy connection informations
        self.USERNAME = usrname
        self.PASSWORD = passwrd
        self.PROXY_HOST = host
        self.PROXY_URL = 'http://' + self.USERNAME + ':' \
                            + self.PASSWORD + '@' + self.PROXY_HOST

        # Keys that represent headers of database
        self.keys = 'abstract,authors,bib_date,bib_misc,bib_pages,' \
                    + 'bib_vol,book_series,copyright,doctype,ids,' \
                    + 'issn,issue_title,item_title,keywords,' \
                    + 'keywords_plus,languages,links,' \
                    + 'meeting_abstract,refs,reprint,research_addrs,' \
                    + 'reviewed_work,source_series,source_title,' \
                    + 'timescited,ut,article_no'

        # Information about the request
        self.sid = '' 
        self.qid = '' 
        self.count = '' 
        self.shown = ''
        self.natural = '' 
        self.ut = '' 
        self.category = '' 
        self.heading = '' 
                               
    def getText(self, nodelist):
        """ Extract text from nodes """
        rc = ''
        for node in nodelist:
            if node.nodeType == node.TEXT_NODE:
                rc = rc + node.data
        return rc

    def getValueAt(self, doc, name, pos):
        """ Extract value with key at give position """
        return self.getText(doc.getElementsByTagName(name)[pos].childNodes)
    
    def parseInfosXml(self, doc):
        """ Parse xml from response infos """
        self.sid = self.getValueAt(doc, "SID", 0)
        self.qid = self.getValueAt(doc, "QID", 0)
        self.count = self.getValueAt(doc, "COUNT", 0)
        self.shown = self.getValueAt(doc, "SHOWN", 0)
        self.natural = self.getValueAt(doc, "NATURAL", 0)
        self.ut = self.getValueAt(doc, "ut", 0)
        self.category = self.getValueAt(doc, "category", 0)
        self.heading = self.getValueAt(doc, "heading", 0)
        
    def parseDetailsXml(self, nodelist, rec):
        """ Parse xml from records response """

        # Run over REC nodes to retrieve bibliographic informations
        
        for node in nodelist:
            if node.localName == None:
                pass
            elif str(node.localName) == 'abstract':
                if node.attributes['avail'].value == 'Y':
                    for child in node.childNodes:
                        for pg in child.childNodes:
                            if pg.nodeType == pg.TEXT_NODE \
                                and pg.data.strip() != '':
                                rec.abstract += pg.data + ' '
            elif str(node.localName) == 'authors':
                for child in node.childNodes:
                    for name in child.childNodes:
                        if name.nodeType == name.TEXT_NODE \
                           and name.data.strip() != '':
                            rec.authors += name.data + '; '
                rec.authors = rec.authors[0:-2]
            elif str(node.localName) == 'bib_date':
                for child in node.childNodes:
                    if child.nodeType == child.TEXT_NODE \
                       and child.data.strip() != '':
                        rec.bib_date += child.data + ' '
            elif str(node.localName) == 'bib_misc':
                for child in node.childNodes:
                    if child.nodeType == child.TEXT_NODE \
                       and child.data.strip() != '':
                        rec.bib_misc += child.data + ' '
            elif str(node.localName) == 'bib_pages':
                for child in node.childNodes:
                    if child.nodeType == child.TEXT_NODE \
                       and child.data.strip() != '':
                        rec.bib_pages += child.data + ' '
            elif str(node.localName) == 'bib_vol':
                for child in node.childNodes:
                    if child.nodeType == child.TEXT_NODE \
                       and child.data.strip() != '':
                        rec.bib_vol += child.data + ' '
            elif str(node.localName) == 'book_series':
                for child in node.childNodes:
                    if child.nodeType == child.TEXT_NODE \
                       and child.data.strip() != '':
                        rec.book_series += child.data + ' '
            elif str(node.localName) == 'copyright':
                for child in node.childNodes:
                    for name in child.childNodes:
                        if name.nodeType == name.TEXT_NODE \
                           and name.data.strip() != '':
                            rec.copyright += name.data + '; '
                rec.copyright = rec.copyright[0:-2]
            elif str(node.localName) == 'doctype':
                for child in node.childNodes:
                    if child.nodeType == child.TEXT_NODE \
                       and child.data.strip() != '':
                        rec.doctype += child.data + ' '
            elif str(node.localName) == 'ids':
                for child in node.childNodes:
                    if child.nodeType == child.TEXT_NODE \
                       and child.data.strip() != '':
                        rec.ids += child.data + ' '
            elif str(node.localName) == 'issn':
                for child in node.childNodes:
                    if child.nodeType == child.TEXT_NODE \
                       and child.data.strip() != '':
                        rec.issn += child.data + ' '
            elif str(node.localName) == 'issue_title':
                for child in node.childNodes:
                    if child.nodeType == child.TEXT_NODE \
                       and child.data.strip() != '':
                        rec.issue_title += child.data + ' '
            elif str(node.localName) == 'item_title':
                for child in node.childNodes:
                    if child.nodeType == child.TEXT_NODE \
                       and child.data.strip() != '':
                        rec.item_title += child.data + ' '
            elif str(node.localName) == 'keywords':
                for child in node.childNodes:
                    for name in child.childNodes:
                        if name.nodeType == name.TEXT_NODE \
                           and name.data.strip() != '':
                            rec.keywords += name.data + '; '
                rec.keywords = rec.keywords[0:-2]
            elif str(node.localName) == 'keywords_plus':
                for child in node.childNodes:
                    for name in child.childNodes:
                        if name.nodeType == name.TEXT_NODE \
                           and name.data.strip() != '':
                            rec.keywords_plus += name.data + '; '
                rec.keywords_plus = rec.keywords_plus[0:-2]
            elif str(node.localName) == 'languages':
                for child in node.childNodes:
                    for name in child.childNodes:
                        if name.nodeType == name.TEXT_NODE \
                           and name.data.strip() != '':
                            rec.languages += name.data + ' '
            elif str(node.localName) == 'links':
                for child in node.childNodes:
                    if child.nodeType == child.TEXT_NODE \
                       and child.data.strip() != '':
                        rec.links += child.data + ' '
            elif str(node.localName) == 'meeting_abstract':
                for child in node.childNodes:
                    if child.nodeType == child.TEXT_NODE \
                       and child.data.strip() != '':
                        rec.meeting_abstract += child.data + ' '
            elif str(node.localName) == 'refs':
                for child in node.childNodes:
                    for name in child.childNodes:
                        if name.nodeType == name.TEXT_NODE \
                           and name.data.strip() != '':
                            rec.refs += name.data + '; '
                rec.refs = rec.refs[0:-2]
            elif str(node.localName) == 'reprint':
                for child in node.childNodes:
                    for name in child.childNodes:
                        if name.nodeType == name.TEXT_NODE \
                           and name.data.strip() != '':
                            rec.reprint += name.data + '; '
                rec.reprint = rec.reprint[0:-2]
            elif str(node.localName) == 'research_addrs':
                for child in node.childNodes:
                    for name in child.childNodes:
                        for addr in name.childNodes:
                            if addr.nodeType == addr.TEXT_NODE \
                               and addr.data.strip() != '':
                                rec.research_addrs += addr.data + '; '
                rec.research_addrs = rec.research_addrs[0:-2]
            elif str(node.localName) == 'reviewed_work':
                for child in node.childNodes:
                    if child.nodeType == child.TEXT_NODE \
                       and child.data.strip() != '':
                        rec.reviewed_work += child.data + ' '
            elif str(node.localName) == 'source_series':
                for child in node.childNodes:
                    if child.nodeType == child.TEXT_NODE \
                       and child.data.strip() != '':
                        rec.source_series += child.data + ' '
            elif str(node.localName) == 'source_title':
                for child in node.childNodes:
                    if child.nodeType == child.TEXT_NODE \
                       and child.data.strip() != '':
                        rec.source_title += child.data + ' '                
            elif str(node.localName) == 'ut':
                for child in node.childNodes:
                    if child.nodeType == child.TEXT_NODE \
                       and child.data.strip() != '':
                        rec.ut += child.data + ' '
            elif str(node.localName) == 'article_no':
                for child in node.childNodes:
                    if child.nodeType == child.TEXT_NODE \
                       and child.data.strip() != '':
                        rec.article_no += child.data + ' '

    def getInfos(self, opener, url_search):
        """ Send infos request with formated url """
        
        f = opener.open('http://estipub.isiknowledge.com/esti/cgi?' \
                        + 'action=search&viewType=xml&mode=GeneralSearch' \
                        + '&product=WOS&customersID=ISIResearchSoft' \
                        + '&ServiceName=GeneralSearch&filter=ut' \
                        + url_search + '&Start=1&Func=Links&End=1' \
                        + '&Logout=Yes&editions=D%20S%20H&DestApp=WOS')
        doc = xml.dom.minidom.parse(f)
        self.parseInfosXml(doc)        

    def getDetails(self, opener, url_search, start, stop):
        """ Send records request with formated url """
        
        f = opener.open('http://estipub.isiknowledge.com/esti/cgi?' \
                        + 'action=search&viewType=xml&mode=GeneralSearch' \
                        + '&product=WOS&customersID=ISIResearchSoft' \
                        + '&ServiceName=GeneralSearch&filter=' + self.keys \
                        + url_search + '&Start=' + start + '&Func=Links&End=' \
                        + stop + '&Logout=Yes&editions=D%20S%20H&DestApp=WOS')
        doc = xml.dom.minidom.parse(f)

        # Parse and store in list all records from ISI
        rec_list = []
        for i in xrange(0,len(doc.getElementsByTagName('REC'))):
            rec = Records()
            self.parseDetailsXml(doc.getElementsByTagName('REC')[i].childNodes, rec)
            rec.timescited += \
                str(doc.getElementsByTagName('REC')[i].attributes['timescited'].value)
            rec_list.append(rec)

        return rec_list

    def search(self, topic = '', author = '', years = '', address = '', journal = ''):
        """ Build and run search request """

        # Build url with search information
        url_search = ''
        if topic != '':
            url_search += '&topic=' + topic.replace(' ', '%20')
        if author != '':
            url_search += '&author=' + author.replace(' ', '%20')
        if years != '':
            url_search += '&years=' + years.replace(' ', '%20')
            url_search += '&Period=Year%20Selection'
        if address != '':
            url_search += '&address=' + address.replace(' ', '%20')
        if journal != '':
            url_search += '&journal=' + journal.replace(' ', '%20')

        if url_search != '':
            # Proxy connection
            proxy_handler = urllib2.ProxyHandler({'http': self.PROXY_URL})
            opener = urllib2.build_opener(proxy_handler)

            # Change user-agent but not necessary
            opener.addheaders = [('User-agent', 'RIS Internet connection')]

            # Retrieve informations about search (count...)
            self.getInfos(opener, url_search)
                
            print 'Found:' + self.count
            start = '1'
            stop = '100'

            if int(self.count) < 100:
                stop = self.count

            # Retrieve records
            records = self.getDetails(opener, url_search, start, stop)

            # Print titles in records list
            for rec in records:
                print rec.item_title
    
    
if __name__ == '__main__':
        # Main class take as argument the name, password and the host
        # of proxy to connect to ISI
        # Note1: you can extract host from .pac file (autoconfiguration proxy file)
        # Note2: host url without 'http://' at beginning
	isi = ISI('username', 'password', 'host:3128')

	# Search take arguments: topic, author, years, address, journal
	# You can mixed argument with logical operator like AND / OR
	isi.search('stomata', 'Zeiger AND Talbott', '2002')
