import pymysql as MySQLdb
import json
import re

def get_from_database(id):
    conn = MySQLdb.connect(host='127.0.0.1', user='root', passwd='passw0rd', db='csx_citegraph', port=3306)
    cur = conn.cursor()
    try:
        cur.execute("drop table tmp_for_nodes")
    except:
        'do nothing at all'
    create_tmp_table = """create table tmp_for_nodes (
        id BIGINT
        )"""
    cur.execute(create_tmp_table)
    conn.commit()
    cur.execute("SELECT count(*) from clusters where id = " + id)
    result = cur.fetchall()
    assert result[0][0] != 0
    cur.execute("insert into tmp_for_nodes values (%s)" % id)
    cur.execute("create index id_index on tmp_for_nodes(id)")
    conn.commit()
    prev_count = 0
    curr_count = 1
    iter_times = 1
    while prev_count != curr_count:
        prev_count = curr_count
        iteration_insert = """insert into tmp_for_nodes
        SELECT citing
        from citegraph, clusters A, clusters B
        where citing = A.id and cited = B.id
        and B.cyear <= A.cyear
        and not citing in (SELECT * from tmp_for_nodes) 
        and cited in (SELECT * from tmp_for_nodes)"""
        cur.execute(iteration_insert)
        conn.commit()
        iter_times += 1
        cur.execute("SELECT count(*) from tmp_for_nodes")
        curr_count = cur.fetchall()[0][0]
        if curr_count >= 30000 and iter_times >= 3:
            break
    nodelist = {}
    fetchall_nodes = """SELECT clusters.id, clusters.cvenue, clusters.cauth, clusters.ctitle, clusters.cyear, citeseerx.papers.abstract
    from clusters left join citeseerx.papers on citeseerx.papers.cluster = clusters.id
    where clusters.id in (SELECT * from tmp_for_nodes)
    group by clusters.id"""
    cur.execute(fetchall_nodes)
    pattern = r'[^(\x20-\x7F)]'
    nodes = cur.fetchall()
    for node in nodes:
        try:
            tmp_firstauthor = node[2].split(',')[0]
        except:
            tmp_firstauthor = ''
        try:
            tmp_str_year = str(node[4])
        except:
            tmp_str_year = ''
        tmp_venue = node[1]
        if tmp_venue == None:
            tmp_venue = ''
        try:
            tmp_authors = str(node[2])
        except:
            tmp_authors = ''
        try:
            tmp_title = str(node[3])
        except:
            tmp_title = ''
        try:
            tmp_abstract = str(node[5])
        except:
            tmp_abstract = ''
        tmp_node_dict = {
            'summary': re.sub(pattern, '', tmp_firstauthor + " [" + tmp_venue + " " + tmp_str_year + "]"),
            'id': str(node[0]),
            'arnetid': str(node[0]),
            'citation_count': 0,
            'count': 0,
            'venue': re.sub(pattern, '', tmp_venue),
            'authors': re.sub(pattern, '', tmp_authors),
            'title': re.sub(pattern, '', tmp_title),
            'cluster': 0,
            'year': node[4],
            'firstauthor': tmp_firstauthor,
            'focus': 'others',
            'abstract': re.sub(pattern, '', tmp_abstract)
        }
        nodelist[ tmp_node_dict['id'] ] = tmp_node_dict
    edgelist = []
    fetchall_edges = """SELECT id, citing, cited
    from csx_citegraph.citegraph
    where citing in (SELECT * from tmp_for_nodes)
    and cited in (SELECT * from tmp_for_nodes)"""
    cur.execute(fetchall_edges)
    edges = cur.fetchall()
    for edge in edges:
        tmp_edge_dict = {
            'id': str(edge[0]),
            'source': str(edge[2]),
            'target': str(edge[1])
        }
        edgelist.append(tmp_edge_dict)
        try:
            nodelist[ tmp_edge_dict['source'] ]['citation_count'] += 1
        except:
            nothing = None    #actually do nothing at all
    nodelist[id]['focus'] = 'focused'
    result_dict = {}
    result_dict['node'] = nodelist
    result_dict['edge'] = edgelist
    #return result_dict
    outfile = open("sourcedata/" + id + "_raw.json", 'w')
    str2write = json.dumps(result_dict, ensure_ascii = False)
    outfile.write(str2write)
    #print(str2write)
    outfile.close()
