#ecoding=utf8
#添加了部分注释
'''
Created on 26 Feb 2016

@author: af
'''

from os import path
all_models = ['text_classification', 'network_lp_regression', 'network_lp_regression_collapsed', 'network_lp_classification']
models_to_run = ['network_lp_classification']#这里选取模型方法
if 'text_classification' not in models_to_run and 'network_lp_classification' not in models_to_run:
    do_not_discretize = True
else:
    do_not_discretize = False

DATASET_NUMBER = 1#这里来选取数据包。。
TEXT_ONLY = False
DATA_HOME = 'F:\_programWork\GEOdatesets'
DATASETS = ['cmu', 'na', 'world']
ENCODINGS = ['latin1', 'utf-8', 'utf-8']
buckets = [300, 2400, 2400]
reguls = [5e-5, 1e-6, 2e-7]
celeb_thresholds = [5, 15, 15]
BUCKET_SIZE = buckets[DATASET_NUMBER - 1]
celeb_threshold = celeb_thresholds[DATASET_NUMBER - 1]
GEOTEXT_HOME = path.join(DATA_HOME, DATASETS[DATASET_NUMBER - 1])
data_encoding = ENCODINGS[DATASET_NUMBER - 1]
users_home = path.join(GEOTEXT_HOME, 'processed_data')
testfile = path.join(users_home, 'user_info.test.gz')
devfile = path.join(users_home, 'user_info.dev.gz')
trainfile = path.join(users_home, 'user_info.train.gz')

print "dataset: " + DATASETS[DATASET_NUMBER - 1]
lngs = []
ltts = []
pointText = {}
keys = []
userFirstTime = {}
userLocation = {}#这里键为userid，值为经纬度
locationUser = {}#这里键为经纬度，值为在该位置的所有用户id
userlat = {}
userlon = {}
testUsers = {}
trainUsers = {}
devUsers = {}
classLatMedian = {}#存入每个类别的纬度中位数，标签:median
classLonMedian = {}#存入每个类别的经度中位数，标签:median
classLatMean = {}#存入每个类别的纬度平均数，标签:mean
classLonMean = {}#存入每个类别的经度平均数，标签:mean
trainClasses = {}#保存训练集中的所有{id:label}键值对
devClasses = {}#保存dev集中的类别
testClasses = {}#保存test集中的类别
categories = []#储存所有的标签
mentions = []
testText = {}
devText = {}
trainText = {}



X_train = None
X_dev = None
X_test = None
Y_train = None
Y_dev = None
Y_test = None
U_train = None
U_dev = None
U_test = None


n_comp = 500
factorizers = []
results = {}
mention_graph = None
partitionMethods = ['kmeans', 'ward', 'average', 'complete', 'median','spectral', 'kmeans', 'meanShift', 'Birch']
partitionMethod = partitionMethods[4]#这里选取划分方法
binary = False
sublinear=True
penalty = 'l1'
fit_intercept = True
norm = 'l2'
use_idf = True
node_orders = ['l2h', 'h2l', 'random']
feature_names = None