import pandas as pd
from py2neo import Graph, Node, Relationship, NodeMatcher
import jieba

# 连接图形库，配置neo4j
graph = Graph("bolt://localhost:7687", auth=("neo4j", "12345678"))
# 清空全部数据
graph.delete_all()
# 开启一个新的事务
graph.begin()

# csv源数据读取
storageData = pd.read_csv('movies_copy.csv', encoding='utf-8')
# 获取所有列标签
columnLst = storageData.columns.tolist()
# 获取数据数量
num = len(storageData['title'])

def create_movie():
    for i in range(num):
        data_title = storageData['title'][i]
        dict_ = {}
        count = 0
        for column in columnLst:
            dict_[column] = str(storageData[column][i])
        # print(dict_)
        dict_.pop('id')
        dict_.pop('title')
        #建立了电影的节点
        node1 = Node('movie', name=data_title, **dict_)
        graph.create(node1)

#统计语言种类
def count_lang():
    data_lang = storageData['lang']
    appear_lang = []
    for lang_ in data_lang:
        if lang_ not in appear_lang:
            appear_lang.append(lang_)
    lang_count = 0
    #根据语言种类来建立节点和关系
    # 3279  print(lang_count)
    # 3334  print(num)
    matcher = NodeMatcher(graph)
    for lang in appear_lang:
        # print(lang)
        #找到语言相同的list
        nodelist = list(matcher.match('movie', lang=lang))
        # print("有", len(nodelist), "个", lang, "语言的节点")
        lang_count += len(nodelist)
        #建立该语言节点
        node_lang = Node('lang', name=lang)
        graph.create(node_lang)
        #遍历由该语言找到的节点
        for node in nodelist:
            relation_lang = Relationship(node, 'lang', node_lang)
            graph.create(relation_lang)

#统计国家种类
def count_country():
    data_country = storageData['country']
    appear_country = []
    for country_ in data_country:
        if country_ not in appear_country:
            appear_country.append(country_)
    country_count = 0
    # 2470 print(country_count)
    matcher = NodeMatcher(graph)
    for country in appear_country:
        nodelist = list(matcher.match('movie', country=country))
        # print("有", len(nodelist), "个", country, "国家的节点")
        country_count += len(nodelist)
        node_country = Node('country', name=country)
        graph.create(node_country)
        for node in nodelist:
            relation_country = Relationship(node, 'country', node_country)
            graph.create(relation_country)

#统计年份种类
def count_year():
    data_year = storageData['year']
    appear_year = []
    for year_ in data_year:
        if year_ not in appear_year:
            appear_year.append(year_)
    # print(appear_year)
    # [2021, 2019, 1994, 2018, 2016, 2001, 1997, 1998, 2004, 2006, 1995, 1993, 2009, 2002, 2015, 1988, 2011, 2005,
    # 2014, 2017, 2020, 2010, 2008, 2007, 1999, 1987, 2000, 1972, 2003, 1990, 2013, 1992, 1986, 1953, 1991, 2012, 1989, 1939, 1974, 1957,
    # 1965, 1980, 1975, 1983, 1984, 1936, 1996, 1982, 1979, 1950, 1960, 1958, 1971, 1966, 1937, 1985, 1954, 1931, 1961, 1967, 1942, 1976, 1959, 1978, 1977, 1941,
    # 1948, 1964, 1946, 1940, 1925, 1981, 1929, 1955, 1963, 1951, 1973, 1968, 1902, 1934, 1962, 1927, 1956, 1921]
    matcher = NodeMatcher(graph)
    for year in appear_year:
        nodelist = list(matcher.match('movie', year=str(year)))
        node_year = Node('year', name=year)
        graph.create(node_year)
        for node in nodelist:
            relation_year = Relationship(node, 'year', node_year)
            graph.create(relation_year)

#统计类型种类
def count_type():
    data_types = storageData['types']
    # 对每行进行分词
    for i in range(len(data_types)):
        data_types[i] = jieba.lcut(data_types[i])
        data_types[i] = [item for item in data_types[i] if item != ',']
    # 去重
    appear_type = set()
    for types_ in data_types:
        # 使用集合的 update 方法，可以自动处理重复元素
        appear_type.update(types_)
    appear_type_list = list(appear_type)
    # print(appear_type_list)
    # ['恐怖', '剧情', '冒险', '家庭', '真人秀', '惊悚', '灾难', '运动', '纪录片', '悬疑',
    # '历史', '黑色', '动作', '儿童', '戏曲', '音乐', '古装', '科幻', '武侠', '爱情', '脱口秀', '西部',
    # '歌舞', '传记', '鬼怪', '同性', '喜剧', '动画', '奇幻', '短片', '电影', '情色', '战争', '犯罪']
    # 提取所有节点
    matcher = NodeMatcher(graph)
    nodelist = matcher.match('movie')
    # 节点类型分词
    for node in nodelist:
        # print(node['types']) 剧情,历史,战争
        # print(type(node['types'])) <class 'str'>
        node['types'] = jieba.cut(node['types'])
        node['types'] = [item for item in node['types'] if item != ',']
        # print(node['types']) ['剧情', '历史', '战争']
    # 创建关系
    for type_ in appear_type_list:
        node_type = Node('type', name=type_)
        graph.create(node_type)
        for node in nodelist:
            if type_ in node['types']:
                relation_type = Relationship(node, 'type', node_type)
                graph.create(relation_type)

create_movie()
count_lang()
count_country()
count_type()
count_year()








