from gensim.models import Word2Vec
# define training data

sentences = [['ma', 'as', 'sy', 'yt', 'tv', 've', 'es'],
            ['sf', 'mt', 'td', 'dv', 'et'],
            ['tt', 'ty', 'ya'],
            ['ya', 'ad', 'df', 'fa', 'sg'],
            ['ad', 'df', 'fa'],
            ['fi', 'ia', 'sg', 'gr', 'rt', 'tg']]

# train model
model = Word2Vec(sentences,vector_size=10, min_count=1) # vector_size：10维，min_count:最小单词数，小于次将被忽略
# summarize the loaded model
print(model)
# summarize vocabulary
words = model.wv.key_to_index
print(words)
# access vector for one word
print('gr的词向量：', model.wv['gr'])
print('gr的索引号：', model.wv.key_to_index['gr'])
# save model
model.save('model.bin')

# load model
new_model = Word2Vec.load('model.bin')
print(new_model)