# This file is part of Sonedyan.
#
# Sonedyan is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation;
# either version 3 of the License, or (at your option) any
# later version.
#
# Sonedyan is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE.  See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public
# License along with Octave; see the file COPYING.  If not
# see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2009-2012 Jimmy Dubuisson <jimmy.dubuisson@gmail.com>

# convert Arxiv dataset to graphml

import urllib
from xml.dom.minidom import parseString

linksFile = open("Cit-HepPh.txt", "r")
datesFile = open("cit-HepPh-dates.txt", "r")
graphmlFile = open("arxiv-dataset.xml", "w")

vertices = set() 

# load dates
dates = {}
line = datesFile.readline().strip()

while line:
	if (not(line.startswith("#"))):
	    key, value = line.split()
	    dates[key] = value
	line = datesFile.readline().strip()

datesFile.close()

def getText(nodelist):
    rc = []
    for node in nodelist:
        if node.nodeType == node.TEXT_NODE:
            rc.append(node.data)
    return ''.join(rc)

dateMissingIds = []

def getArticlePubDate(id):
    try:
	d = dates[id]
	return d
    except KeyError:
	try:
	    d = dates["11" + id]
	    return d
	except KeyError:
	    try:
		#http://export.arxiv.org/api/query?search_query=all:12018
		url = 'http://export.arxiv.org/api/query?search_query=all:' + id
		data = urllib.urlopen(url).read()
		dom = parseString(data)
		d = dom.getElementsByTagName("published")[0]
		return getText(d.childNodes)[0:10]
	    except IndexError:
		dateMissingIds.append(id)
		return "unknown"
		
    
# load links
edges = {}
ecounter = 0
line = linksFile.readline().strip()

while line:
	if (not(line.startswith("#"))):
	    key, value = line.split()
	    edges[ecounter] = line
	    vertices.add(key)
	    vertices.add(value)
	    ecounter = ecounter + 1
	line = linksFile.readline().strip()
	
linksFile.close()

graphmlFile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
graphmlFile.write("<graphml xmlns=\"http://graphml.graphdrawing.org/xmlns/graphml\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"  xsi:schemaLocation=\"http://graphml.graphdrawing.org/xmlns/graphml\">\n")
graphmlFile.write("<key id=\"d\" for=\"node\" attr.name=\"date\" attr.type=\"string\"/>\n")
graphmlFile.write("<graph edgedefault=\"directed\">\n")

for i in vertices:
	d = getArticlePubDate(i)
	graphmlFile.write("<node id=\"" + i + "\">\n")
	graphmlFile.write("<data key=\"d\">" + d + "</data>\n")
	graphmlFile.write("</node>\n")
	    
for k in edges.iterkeys():
	key, value = edges[k].split()
	
	#if (not(key in dateMissingIds) and not(value in dateMissingIds)):
	graphmlFile.write("<edge id=\"" + key + "|" + value + "\" directed=\"true\" source=\"" + key + "\" target=\"" + value + "\"/>\n")

graphmlFile.write("</graph>\n")
graphmlFile.write("</graphml>\n")

graphmlFile.close()

print("# missing dates: " + str(len(dateMissingIds)))
print("Corresponding IDs:")

for d in dateMissingIds:
	print d 

print("# vertices: " + str(len(vertices)))
print("# edges: " + str(len(edges)))
