# -*- coding: utf-8 -*-
"""
Created on Fri Oct 29 08:25:21 2021

@author: joker
"""

# 创建词向量
# 语义查询
answer_vector = wv['woman'] + wv['Europe'] + wv['physics'] + wv['scientist']
# 去除性别偏量
answer_vector = wv['woman'] + wv['Europe'] + wv['physics'] + wv['scientist'] - wv['male'] - 2 * wv['man'] 
# 类比问题
answer_vector = wv['Louis_Pasteur'] - wv['germs'] + wv['physics']
# 计算属性向量
from nlpia.book.examples.ch06_nessvectors import nessvector 
nessvector('Marie_Curie').round(2) 
# 使用gensim库的word2vec模块
from gensim.models.keyedvectors import KeyedVectors 
word_vectors = KeyedVectors.load_word2vec_format('C:/ProgramData/Anaconda3/Lib/site-packages/nlpia/bigdata/GoogleNews-vectors-negative300.bin.gz', binary=True)
word_vectors.most_similar(positive=['cooking', 'potatoes'], topn=5) 
# 生成定制化词向量表示
token_list = [ 
 ['to', 'provide', 'early', 'intervention/early', 'childhood', 'special', 
 'education', 'services', 'to', 'eligible', 'children', 'and', 'their', 
 'families'], 
 ['essential', 'job', 'functions'], 
 ['participate', 'as', 'a', 'transdisciplinary', 'team', 'member', 'to', 
 'complete', 'educational', 'assessments', 'for']  
]
# 加载Word2vec模块
from gensim.models.word2vec import Word2Vec
# 向量维度
num_features = 300 
# 最低词频
min_word_count = 3 
# 使用的CPU核数
num_workers = 2 
# 窗口大小
window_size = 6 
# 高频词条降采样率
subsampling = 1e-3 
# Word2vec 模型实例化
model = Word2Vec(token_list, workers=num_workers, vector_size=num_features, min_count=min_word_count, window=window_size, sample=subsampling) 
# 丢弃神经网络中不需要的输出权重
model.init_sims(replace=True)
# 保存已训练的模型
model_name = "my_domain_specific_word2vec_model" 
model.save(model_name)


# 词关系可视化
# 加载预训练向量
from gensim.models.keyedvectors import KeyedVectors 
wv = KeyedVectors.load_word2vec_format('C:/ProgramData/Anaconda3/Lib/site-packages/nlpia/bigdata/GoogleNews-vectors-negative300.bin.gz', binary=True)
len(wv.key_to_index) 
# Word2vec 词频
import pandas as pd 
vocab = pd.Series(wv.key_to_index)
vocab.iloc[1000000:100006] 
# Illinois 与 Illini 的距离
import numpy as np 
np.linalg.norm(wv['Illinois'] - wv['Illini']) 
# 余弦相似度
cos_similarity = np.dot(wv['Illinois'], wv['Illini']) / ( np.linalg.norm(wv['Illinois']) * np.linalg.norm(wv['Illini'])) 
cos_similarity 
# 余弦距离
1 - cos_similarity
# 美国城市数据
from nlpia.data.loaders import get_data 
cities = get_data('cities') 
cities.head(1).T
# 美国州数据
us = cities[(cities.country_code == 'US') & (cities.admin1_code.notnull())].copy() 
states = pd.read_csv('C:/Users/joker/Desktop/自然语言处理实战/Data/Ch06/states.csv') 
states = dict(zip(states.Abbreviation, states.State)) 
us['city'] = us.name.copy() 
us['st'] = us.admin1_code.copy() 
us['state'] = us.st.map(states) 
us[us.columns[-3:]].head()
# 查看城市名 州名
vocab = pd.np.concatenate([us.city, us.st, us.state]) 
vocab = np.array([word for word in vocab if word in wv]) 
vocab[:5] 
# 通过州词向量增强的城市词向量
city_plus_state = [] 
for c, state, st in zip(us.city, us.state, us.st): 
    if c not in vocab: 
        continue 
    row = [] 
    if state in vocab: 
        row.extend(wv[c] + wv[state]) 
    else: 
        row.extend(wv[c] + wv[st]) 
    city_plus_state.append(row) 
us_300D = pd.DataFrame(city_plus_state)
# 美国城市气泡图
from sklearn.decomposition import PCA 
from matplotlib import pyplot as plt
# 使用PCA生成可视化的二维向量
pca = PCA(n_components=2) 
us_300D = get_data('cities_us_wordvectors') 
us_2D = pca.fit_transform(us_300D.iloc[:, :300]) 
# 美国城市词向量气泡图
df = get_data('cities_us_wordvectors_pca2_meta') 
colors = np.random.rand(len(df.timezone))
size = df.population/df.population.max()*1000
plt.scatter(x=df.longitude, y=df.latitude, s=size, c=colors, alpha=0.6)  
# 横坐标轴标题
plt.xlabel('longitude')  
# 纵坐标轴标题
plt.ylabel('latitude')  
plt.show()

# 利用Doc2vec计算文档相似度
import multiprocessing 
# 统计CPU核数
num_cores = multiprocessing.cpu_count() 
from gensim.models.doc2vec import TaggedDocument,Doc2Vec 
# 粗分词器 去除单字母词和标点符号
from gensim.utils import simple_preprocess 
corpus = ['This is the first document ...','another document ...'] 
training_corpus = [] 
for i, text in enumerate(corpus): 
    tagged_doc = TaggedDocument(simple_preprocess(text), [i]) 
    training_corpus.append(tagged_doc) 
# 实例化一个 Doc2vec对象
# 训练周期为10
model = Doc2Vec(vector_size=100, min_count=2, workers=num_cores, epochs=10) 
# 编译词汇表
model.build_vocab(training_corpus) 
model.train(training_corpus, total_examples=model.corpus_count, epochs=model.epochs) 
# 文档向量推理
model.infer_vector(simple_preprocess('This is a completely unseen document'), steps=10)







