import os
import csv
import xml.dom.minidom
from itertools import chain, combinations
import urllib.request as libreq
from urllib.parse import urlencode

base_url = 'http://export.arxiv.org/api/{method}?{parameters}'
xml_file = './data/arXiv.xml'


def build_search_url(method, parameters):
    parameters = urlencode(parameters)
    query_url = base_url.format(method=method, parameters=parameters)
    return query_url


def save_data(query_url):
    with libreq.urlopen(query_url) as data:
        r = data.read().decode('utf-8')
        with open(xml_file, 'w') as f:
            s = f.write(r)
            print('Write file size: {}'.format(s))
            assert s > 10000


def xml_parser():
    authors = []
    dom = xml.dom.minidom.parse(xml_file)
    root = dom.documentElement
    entries = root.getElementsByTagName('entry')
    for entry in entries:
        co_authors = []
        authors_nodes = entry.getElementsByTagName('author')
        for author_node in authors_nodes:
            author_name = author_node.getElementsByTagName('name')[0].childNodes[0].data
            if author_name.strip() != '':
                co_authors.append(author_name)
        authors.append(co_authors)

    return authors


def save_nodes(authors):
    nodes = list(set(list(chain.from_iterable(authors))))
    print('Number of nodes:{}'.format(len(nodes)))
    with open('./data/nodes.csv', 'w') as f:
        for node in nodes:
            f.write(node + os.linesep)


def save_edges(authors):
    n = 0
    with open('./data/edges.csv', 'w') as f:
        f_csv = csv.writer(f)
        for ag in authors:
            if len(ag) <= 1:
                continue
            for edge in combinations(ag, 2):
                f_csv.writerow(edge)
                n += 1

    print('Number of edges:{}'.format(n))
