import os
import random
import fileinput
# 使用jieba中的词性标注功能
import jieba.posseg as pseg


# 原始文章路径
article_path = "./fashion"
# 生成的csv文件名字(该文件在./labels目录下)
csv_name = "时尚.csv"
# jieba中预定义的名词性类型,分别表示: 人名，名词，地名，机构团体名，其他专有名词
n_e = ["nr", "n", "ns", "nt", "nz"]

# 写入csv的路径
csv_path = "./labels"

# 用户自定义词典路径
userdict_path = "../userdict.txt"

def get_vocabulary(article_path, csv_name):
    """函数将读取文章路径下的所有文章文本,并转化为词汇写入词汇csv文件"""
    if not os.path.exists(article_path): return
    if not os.path.exists(csv_path): os.mkdir(csv_path)
    def _get_n_list(text):
        """用于获取名词列表"""
        # 使用jieba的词性标注方法切分文本,获得具有词性属性flag和词汇属性word的对象,
        # 从而判断flag是否在我们定义的名词性列表中,来返回对应的词汇
        r = []
        for g in pseg.lcut(text):  #进行切分词操作，并进行词性标注
            if g.flag in n_e:
                r.append(g.word)
        return r

    with open(os.path.join(csv_path, csv_name), "a") as u:
        for article in os.listdir(article_path):
            with open(os.path.join(article_path, article), "r") as f:
                text = f.read()
            # 只获取长度大于等于2的名词
            n_list = list(filter(lambda x: len(x)>=2, set(_get_n_list(text))))
            list(map(lambda x: u.write(x + "\n"), n_list))

    with open(os.path.join(csv_path, csv_name), "r") as o:
        word = o.read()
        with open(userdict_path, "a") as f:
            f.write(word)
    return


def create_vocabulary_node_and_rel():
    """该函数用于创建词汇节点和关系"""
    _driver = GraphDatabase.driver(**NEO4J_CONFIG)
    with _driver.session() as session:
        # 删除所有词汇节点及其相关的边
        cypher = "MATCH(a:Vocabulary) DETACH DELETE a"
        session.run(cypher)

        def _create_v_and_r(csv):
            """读取单个csv文件,并写入数据库创建节点并与对应的标签建立关系"""
            path = os.path.join(csv_path, csv)
            # 使用fileinput的FileInput方法从持久化文件中读取数据,
            # 并进行strip()操作去掉两侧空白符, 再通过set来去重.
            word_list = list(
                set(map(lambda x: x.strip(), fileinput.FileInput(path))))

            def __create_node(word):
                """创建csv中单个词汇的节点和关系"""
                # 定义词汇的初始化权重,即词汇属于某个标签的初始概率，
                # 因为词汇本身来自该类型文章，因此初始概率定义在0.5-1之间的随机数
                weight = round(random.uniform(0.5, 1), 3)
                # 使用cypher语句创建词汇节点,然后匹配这个csv文件名字至后四位即类别名，
                # 在两者之间MERGE一条有权重的边
                cypher = "CREATE(a:Vocabulary{name:%r}) WITH a \
                          MATCH(b:Label{title:%r}) \
                          MERGE(a)-[r:Related{weight:%f}]-(b)" % (word, csv[:-4], weight)
                session.run(cypher)
            # 遍历词汇列表
            list(map(__create_node, word_list))
        # 遍历标签列表
        label_list = os.listdir(csv_path)
        list(map(_create_v_and_r, label_list))