{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "76d75080",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "# 计算机网络基础"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "1035dfa3",
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "import json\n",
    "from xml.dom.minidom import Document\n",
    "\n",
    "data = [{'name': '小王', 'weight': '40', 'age': '50'},{'name': '小贾','weight': '50', 'age': '23'}]\n",
    "\n",
    "# 保存json\n",
    "with open('result/domain.json', 'w', encoding='utf-8') as f:\n",
    "    json.dump(data, f)\n",
    "    \n",
    "\n",
    "    \n",
    "# 保存xml\n",
    "doc = Document()  #创建DOM文档对象\n",
    "user = doc.createElement('userTable') #创建根元素\n",
    "doc.appendChild(user)\n",
    "\n",
    "for i in data: # 遍历数据\n",
    "    user_elem = doc.createElement('user')\n",
    "    user_elem.setAttribute('name',i['name'])\n",
    "    user_elem.setAttribute('weight',i['weight'])\n",
    "    user_elem.setAttribute('age',i['age'])\n",
    "    \n",
    "    user.appendChild(user_elem) # 保存一个子节点\n",
    "    \n",
    "with open('result/domain.xml', 'w', encoding='utf-8') as f:\n",
    "    f.write(doc.toprettyxml(indent = ''))\n",
    "    f.close()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "96990b38",
   "metadata": {
    "hidden": true
   },
   "source": [
    "网络访问流程如下：\n",
    "1. 客户端打开coggle.club\n",
    "2. 客户端通过DNS映射到具体服务器IP\n",
    "3. 客户端发起一个HTTP会话，与服务端进行TCP握手\n",
    "4. 在客户端传输层打包请求信息，然后按照IP端口发送到服务端上\n",
    "5. 服务端处理请求后返回响应内容，如html静态内容\n",
    "6. 客户端接收响应，解释内容\n",
    "7. 若为短链接，断开TCP连接\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d28f6421",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "# HTTP协议与requests"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9ea6f44e",
   "metadata": {
    "hidden": true
   },
   "source": [
    "get与post的区别如下："
   ]
  },
  {
   "cell_type": "markdown",
   "id": "30638d49",
   "metadata": {
    "hidden": true
   },
   "source": [
    "|区别|get|post|\n",
    "|--|--|--|\n",
    "|后退刷新|不重新加载|重新加载数据|\n",
    "|书签|可收藏为书签|不可收藏为书签|\n",
    "|缓存|可以被缓存|不能被缓存|\n",
    "|数据限制|只接受ACSII|无限制|\n",
    "|可见性|URL可见|URL不可见|\n",
    "|安全性|相对较差，因为参数暴露在浏览器URL中|更安全，参数不会被浏览器URL直接显示|\n",
    "|功能性|通常用于开发数据读取类接口|通常用于开发数据写入类接口|"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "4fd76098",
   "metadata": {
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "响应码: 200\n",
      "响应体: <!DOCTYPE html>\n",
      "<!--STATUS OK--><html> <head><meta http-equiv=content-type content=text/html;charset=utf-8><meta http-equiv=X-UA-Compatible content=IE=Edge><meta content=always name=referrer><link rel=stylesheet type=text/css href=https://ss1.bdstatic.com/5eN1bjq8AAUYm2zgoY3K/r/www/cache/bdorz/baidu.min.css><title>ç¾åº¦ä¸ä¸ï¼ä½ å°±ç¥é</title></head> <body link=#0000cc> <div id=wrapper> <div id=head> <div class=head_wrapper> <div class=s_form> <div class=s_form_wrapper> <div id=lg> <img hidefocus=true src=//www.baidu.com/img/bd_logo1.png width=270 height=129> </div> <form id=form name=f action=//www.baidu.com/s class=fm> <input type=hidden name=bdorz_come value=1> <input type=hidden name=ie value=utf-8> <input type=hidden name=f value=8> <input type=hidden name=rsv_bp value=1> <input type=hidden name=rsv_idx value=1> <input type=hidden name=tn value=baidu><span class=\"bg s_ipt_wr\"><input id=kw name=wd class=s_ipt value maxlength=255 autocomplete=off autofocus=autofocus></span><span class=\"bg s_btn_wr\"><input type=submit id=su value=ç¾åº¦ä¸ä¸ class=\"bg s_btn\" autofocus></span> </form> </div> </div> <div id=u1> <a href=http://news.baidu.com name=tj_trnews class=mnav>æ°é»</a> <a href=https://www.hao123.com name=tj_trhao123 class=mnav>hao123</a> <a href=http://map.baidu.com name=tj_trmap class=mnav>å°å¾</a> <a href=http://v.baidu.com name=tj_trvideo class=mnav>è§é¢</a> <a href=http://tieba.baidu.com name=tj_trtieba class=mnav>è´´å§</a> <noscript> <a href=http://www.baidu.com/bdorz/login.gif?login&amp;tpl=mn&amp;u=http%3A%2F%2Fwww.baidu.com%2f%3fbdorz_come%3d1 name=tj_login class=lb>ç»å½</a> </noscript> <script>document.write('<a href=\"http://www.baidu.com/bdorz/login.gif?login&tpl=mn&u='+ encodeURIComponent(window.location.href+ (window.location.search === \"\" ? \"?\" : \"&\")+ \"bdorz_come=1\")+ '\" name=\"tj_login\" class=\"lb\">ç»å½</a>');\n",
      "                </script> <a href=//www.baidu.com/more/ name=tj_briicon class=bri style=\"display: block;\">æ´å¤äº§å</a> </div> </div> </div> <div id=ftCon> <div id=ftConw> <p id=lh> <a href=http://home.baidu.com>å",
      "³äºç¾åº¦</a> <a href=http://ir.baidu.com>About Baidu</a> </p> <p id=cp>&copy;2017&nbsp;Baidu&nbsp;<a href=http://www.baidu.com/duty/>ä½¿ç¨ç¾åº¦åå¿",
      "è¯»</a>&nbsp; <a href=http://jianyi.baidu.com/ class=cp-feedback>æè§åé¦</a>&nbsp;äº¬ICPè¯030173å·&nbsp; <img src=//www.baidu.com/img/gs.gif> </p> </div> </div> </div> </body> </html>\n",
      "\n"
     ]
    }
   ],
   "source": [
    "import requests\n",
    "resp = requests.get('https://www.baidu.com/')\n",
    "print('响应码: {}'.format(resp.status_code))\n",
    "print('响应体: {}'.format(resp.text))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a9565add",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "# bs4基础使用"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "1638bd6f",
   "metadata": {
    "hidden": true,
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最后结果: {'base': ['BaseEstimator', 'BiclusterMixin', 'ClassifierMixin', 'ClusterMixin', 'DensityMixin', 'RegressorMixin', 'TransformerMixin', 'clone', 'is_classifier', 'is_regressor'], 'feature_selection': ['SelectorMixin', 'GenericUnivariateSelect', 'SelectPercentile', 'SelectKBest', 'SelectFpr', 'SelectFdr', 'SelectFromModel', 'SelectFwe', 'SequentialFeatureSelector', 'RFE', 'RFECV', 'VarianceThreshold', 'chi2', 'f_classif', 'f_regression', 'r_regression', 'mutual_info_classif', 'mutual_info_regression'], 'calibration': ['CalibratedClassifierCV', 'calibration_curve', 'CalibrationDisplay'], 'cluster': ['AffinityPropagation', 'AgglomerativeClustering', 'Birch', 'DBSCAN', 'FeatureAgglomeration', 'KMeans', 'MiniBatchKMeans', 'MeanShift', 'OPTICS', 'SpectralClustering', 'SpectralBiclustering', 'SpectralCoclustering', 'affinity_propagation', 'cluster_optics_dbscan', 'cluster_optics_xi', 'compute_optics_graph', 'dbscan', 'estimate_bandwidth', 'k_means', 'kmeans_plusplus', 'mean_shift', 'spectral_clustering', 'ward_tree'], 'compose': ['ColumnTransformer', 'TransformedTargetRegressor', 'make_column_transformer', 'make_column_selector'], 'covariance': ['EmpiricalCovariance', 'EllipticEnvelope', 'GraphicalLasso', 'GraphicalLassoCV', 'LedoitWolf', 'MinCovDet', 'OAS', 'ShrunkCovariance', 'empirical_covariance', 'graphical_lasso', 'ledoit_wolf', 'oas', 'shrunk_covariance'], 'cross_decomposition': ['CCA', 'PLSCanonical', 'PLSRegression', 'PLSSVD'], 'datasets': ['clear_data_home', 'dump_svmlight_file', 'fetch_20newsgroups', 'fetch_20newsgroups_vectorized', 'fetch_california_housing', 'fetch_covtype', 'fetch_kddcup99', 'fetch_lfw_pairs', 'fetch_lfw_people', 'fetch_olivetti_faces', 'fetch_openml', 'fetch_rcv1', 'fetch_species_distributions', 'get_data_home', 'load_boston', 'load_breast_cancer', 'load_diabetes', 'load_digits', 'load_files', 'load_iris', 'load_linnerud', 'load_sample_image', 'load_sample_images', 'load_svmlight_file', 'load_svmlight_files', 'load_wine', 'make_biclusters', 'make_blobs', 'make_checkerboard', 'make_circles', 'make_classification', 'make_friedman1', 'make_friedman2', 'make_friedman3', 'make_gaussian_quantiles', 'make_hastie_10_2', 'make_low_rank_matrix', 'make_moons', 'make_multilabel_classification', 'make_regression', 'make_s_curve', 'make_sparse_coded_signal', 'make_sparse_spd_matrix', 'make_sparse_uncorrelated', 'make_spd_matrix', 'make_swiss_roll'], 'decomposition': ['DictionaryLearning', 'FactorAnalysis', 'FastICA', 'IncrementalPCA', 'KernelPCA', 'LatentDirichletAllocation', 'MiniBatchDictionaryLearning', 'MiniBatchSparsePCA', 'NMF', 'PCA', 'SparsePCA', 'SparseCoder', 'TruncatedSVD', 'dict_learning', 'dict_learning_online', 'fastica', 'non_negative_factorization', 'sparse_encode'], 'discriminant_analysis': ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis'], 'dummy': ['DummyClassifier', 'DummyRegressor'], 'ensemble': ['AdaBoostClassifier', 'AdaBoostRegressor', 'BaggingClassifier', 'BaggingRegressor', 'ExtraTreesClassifier', 'ExtraTreesRegressor', 'GradientBoostingClassifier', 'GradientBoostingRegressor', 'IsolationForest', 'RandomForestClassifier', 'RandomForestRegressor', 'RandomTreesEmbedding', 'StackingClassifier', 'StackingRegressor', 'VotingClassifier', 'VotingRegressor', 'HistGradientBoostingRegressor', 'HistGradientBoostingClassifier'], 'exceptions': ['ConvergenceWarning', 'DataConversionWarning', 'DataDimensionalityWarning', 'EfficiencyWarning', 'FitFailedWarning', 'NotFittedError', 'UndefinedMetricWarning'], 'experimental': ['enable_hist_gradient_boosting', 'enable_iterative_imputer', 'enable_halving_search_cv'], 'feature_extraction': ['DictVectorizer', 'FeatureHasher', 'image', 'image.extract_patches_2d', 'image.grid_to_graph', 'image.img_to_graph', 'image.reconstruct_from_patches_2d', 'image.PatchExtractor', 'text', 'text.CountVectorizer', 'text.HashingVectorizer', 'text.TfidfTransformer', 'text.TfidfVectorizer'], 'gaussian_process': ['GaussianProcessClassifier', 'GaussianProcessRegressor', 'kernels.CompoundKernel', 'kernels.ConstantKernel', 'kernels.DotProduct', 'kernels.ExpSineSquared', 'kernels.Exponentiation', 'kernels.Hyperparameter', 'kernels.Kernel', 'kernels.Matern', 'kernels.PairwiseKernel', 'kernels.Product', 'kernels.RBF', 'kernels.RationalQuadratic', 'kernels.Sum', 'kernels.WhiteKernel'], 'impute': ['SimpleImputer', 'IterativeImputer', 'MissingIndicator', 'KNNImputer'], 'inspection': ['partial_dependence', 'permutation_importance', 'PartialDependenceDisplay', 'plot_partial_dependence'], 'isotonic': ['IsotonicRegression', 'check_increasing', 'isotonic_regression'], 'kernel_approximation': ['AdditiveChi2Sampler', 'Nystroem', 'PolynomialCountSketch', 'RBFSampler', 'SkewedChi2Sampler'], 'kernel_ridge': ['KernelRidge'], 'linear_model': ['LogisticRegression', 'LogisticRegressionCV', 'PassiveAggressiveClassifier', 'Perceptron', 'RidgeClassifier', 'RidgeClassifierCV', 'SGDClassifier', 'SGDOneClassSVM', 'LinearRegression', 'Ridge', 'RidgeCV', 'SGDRegressor', 'SGDRegressor', 'SGDClassifier', 'ElasticNet', 'ElasticNetCV', 'Lars', 'LarsCV', 'Lasso', 'LassoCV', 'LassoLars', 'LassoLarsCV', 'LassoLarsIC', 'OrthogonalMatchingPursuit', 'OrthogonalMatchingPursuitCV', 'ARDRegression', 'BayesianRidge', 'MultiTaskElasticNet', 'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV', 'SGDRegressor', 'HuberRegressor', 'QuantileRegressor', 'RANSACRegressor', 'TheilSenRegressor', 'PoissonRegressor', 'TweedieRegressor', 'GammaRegressor', 'PassiveAggressiveRegressor', 'enet_path', 'lars_path', 'lars_path_gram', 'lasso_path', 'orthogonal_mp', 'orthogonal_mp_gram', 'ridge_regression'], 'manifold': ['Isomap', 'LocallyLinearEmbedding', 'MDS', 'SpectralEmbedding', 'TSNE', 'locally_linear_embedding', 'smacof', 'spectral_embedding', 'trustworthiness'], 'metrics': ['check_scoring', 'get_scorer', 'make_scorer', 'accuracy_score', 'auc', 'average_precision_score', 'balanced_accuracy_score', 'brier_score_loss', 'classification_report', 'cohen_kappa_score', 'confusion_matrix', 'dcg_score', 'det_curve', 'f1_score', 'fbeta_score', 'hamming_loss', 'hinge_loss', 'jaccard_score', 'log_loss', 'matthews_corrcoef', 'multilabel_confusion_matrix', 'ndcg_score', 'precision_recall_curve', 'precision_recall_fscore_support', 'precision_score', 'recall_score', 'roc_auc_score', 'roc_curve', 'top_k_accuracy_score', 'zero_one_loss', 'explained_variance_score', 'max_error', 'mean_absolute_error', 'mean_squared_error', 'mean_squared_log_error', 'median_absolute_error', 'mean_absolute_percentage_error', 'r2_score', 'mean_poisson_deviance', 'mean_gamma_deviance', 'mean_tweedie_deviance', 'd2_tweedie_score', 'mean_pinball_loss', 'coverage_error', 'label_ranking_average_precision_score', 'label_ranking_loss', 'cluster', 'adjusted_mutual_info_score', 'adjusted_rand_score', 'calinski_harabasz_score', 'davies_bouldin_score', 'completeness_score', 'cluster.contingency_matrix', 'cluster.pair_confusion_matrix', 'fowlkes_mallows_score', 'homogeneity_completeness_v_measure', 'homogeneity_score', 'mutual_info_score', 'normalized_mutual_info_score', 'rand_score', 'silhouette_score', 'silhouette_samples', 'v_measure_score', 'consensus_score', 'DistanceMetric', 'pairwise.additive_chi2_kernel', 'pairwise.chi2_kernel', 'pairwise.cosine_similarity', 'pairwise.cosine_distances', 'pairwise.distance_metrics', 'pairwise.euclidean_distances', 'pairwise.haversine_distances', 'pairwise.kernel_metrics', 'pairwise.laplacian_kernel', 'pairwise.linear_kernel', 'pairwise.manhattan_distances', 'pairwise.nan_euclidean_distances', 'pairwise.pairwise_kernels', 'pairwise.polynomial_kernel', 'pairwise.rbf_kernel', 'pairwise.sigmoid_kernel', 'pairwise.paired_euclidean_distances', 'pairwise.paired_manhattan_distances', 'pairwise.paired_cosine_distances', 'pairwise.paired_distances', 'pairwise_distances', 'pairwise_distances_argmin', 'pairwise_distances_argmin_min', 'pairwise_distances_chunked', 'plot_confusion_matrix', 'plot_det_curve', 'plot_precision_recall_curve', 'plot_roc_curve', 'ConfusionMatrixDisplay', 'DetCurveDisplay', 'PrecisionRecallDisplay', 'RocCurveDisplay'], 'mixture': ['BayesianGaussianMixture', 'GaussianMixture'], 'model_selection': ['GroupKFold', 'GroupShuffleSplit', 'KFold', 'LeaveOneGroupOut', 'LeavePGroupsOut', 'LeaveOneOut', 'LeavePOut', 'PredefinedSplit', 'RepeatedKFold', 'RepeatedStratifiedKFold', 'ShuffleSplit', 'StratifiedKFold', 'StratifiedShuffleSplit', 'StratifiedGroupKFold', 'TimeSeriesSplit', 'check_cv', 'train_test_split', 'GridSearchCV', 'HalvingGridSearchCV', 'ParameterGrid', 'ParameterSampler', 'RandomizedSearchCV', 'HalvingRandomSearchCV', 'cross_validate', 'cross_val_predict', 'cross_val_score', 'learning_curve', 'permutation_test_score', 'validation_curve'], 'multiclass': ['OneVsRestClassifier', 'OneVsOneClassifier', 'OutputCodeClassifier'], 'multioutput': ['ClassifierChain', 'MultiOutputRegressor', 'MultiOutputClassifier', 'RegressorChain'], 'naive_bayes': ['BernoulliNB', 'CategoricalNB', 'ComplementNB', 'GaussianNB', 'MultinomialNB'], 'neighbors': ['BallTree', 'KDTree', 'KernelDensity', 'KNeighborsClassifier', 'KNeighborsRegressor', 'KNeighborsTransformer', 'LocalOutlierFactor', 'RadiusNeighborsClassifier', 'RadiusNeighborsRegressor', 'RadiusNeighborsTransformer', 'NearestCentroid', 'NearestNeighbors', 'NeighborhoodComponentsAnalysis', 'kneighbors_graph', 'radius_neighbors_graph'], 'neural_network': ['BernoulliRBM', 'MLPClassifier', 'MLPRegressor'], 'pipeline': ['FeatureUnion', 'Pipeline', 'make_pipeline', 'make_union'], 'preprocessing': ['Binarizer', 'FunctionTransformer', 'KBinsDiscretizer', 'KernelCenterer', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', 'MaxAbsScaler', 'MinMaxScaler', 'Normalizer', 'OneHotEncoder', 'OrdinalEncoder', 'PolynomialFeatures', 'PowerTransformer', 'QuantileTransformer', 'RobustScaler', 'SplineTransformer', 'StandardScaler', 'add_dummy_feature', 'binarize', 'label_binarize', 'maxabs_scale', 'minmax_scale', 'normalize', 'quantile_transform', 'robust_scale', 'scale', 'power_transform'], 'random_projection': ['GaussianRandomProjection', 'SparseRandomProjection', 'johnson_lindenstrauss_min_dim'], 'semi_supervised': ['LabelPropagation', 'LabelSpreading', 'SelfTrainingClassifier'], 'svm': ['LinearSVC', 'LinearSVR', 'NuSVC', 'NuSVR', 'OneClassSVM', 'SVC', 'SVR', 'l1_min_c'], 'tree': ['DecisionTreeClassifier', 'DecisionTreeRegressor', 'ExtraTreeClassifier', 'ExtraTreeRegressor', 'export_graphviz', 'export_text', 'plot_tree'], 'utils': ['arrayfuncs.min_pos', 'as_float_array', 'assert_all_finite', 'Bunch', 'check_X_y', 'check_array', 'check_scalar', 'check_consistent_length', 'check_random_state', 'class_weight.compute_class_weight', 'class_weight.compute_sample_weight', 'deprecated', 'estimator_checks.check_estimator', 'estimator_checks.parametrize_with_checks', 'estimator_html_repr', 'extmath.safe_sparse_dot', 'extmath.randomized_range_finder', 'extmath.randomized_svd', 'extmath.fast_logdet', 'extmath.density', 'extmath.weighted_mode', 'gen_batches', 'gen_even_slices', 'graph.single_source_shortest_path_length', 'indexable', 'metaestimators.if_delegate_has_method', 'metaestimators.available_if', 'multiclass.type_of_target', 'multiclass.is_multilabel', 'multiclass.unique_labels', 'murmurhash3_32', 'resample', '_safe_indexing', 'safe_mask', 'safe_sqr', 'shuffle', 'sparsefuncs.incr_mean_variance_axis', 'sparsefuncs.inplace_column_scale', 'sparsefuncs.inplace_row_scale', 'sparsefuncs.inplace_swap_row', 'sparsefuncs.inplace_swap_column', 'sparsefuncs.mean_variance_axis', 'sparsefuncs.inplace_csr_column_scale', 'sparsefuncs_fast.inplace_csr_row_normalize_l1', 'sparsefuncs_fast.inplace_csr_row_normalize_l2', 'random.sample_without_replacement', 'validation.check_is_fitted', 'validation.check_memory', 'validation.check_symmetric', 'validation.column_or_1d', 'validation.has_fit_parameter', 'all_estimators', 'parallel_backend', 'register_parallel_backend']}\n"
     ]
    }
   ],
   "source": [
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "\n",
    "url = 'https://www.baidu.com/'\n",
    "# 爬取页面\n",
    "resp_body = requests.get('url').text\n",
    "soup = BeautifulSoup(resp_body, 'html.parser')\n",
    "# 提取所有api的a标签\n",
    "all_a = soup.find_all('a', class_='reference internal')\n",
    "# 遍历a标签获取内容\n",
    "result = {}\n",
    "for a in all_a:\n",
    "    title = a.get('title')\n",
    "    if title != None:\n",
    "        packages = title.replace('sklearn.', '').split('.')\n",
    "        package = packages[0]\n",
    "        if len(packages) != 1:\n",
    "            api = '.'.join(packages[1:])\n",
    "            if package in result.keys():\n",
    "                result[package].append(api)\n",
    "            else:\n",
    "                result[package] = [api]\n",
    "print('最后结果:', result)           \n",
    "            \n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c02d34d3",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "# bs4高阶使用"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 131,
   "id": "d3a8bcbb",
   "metadata": {
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "所有名词的分类: {'general-concepts': 'General Concepts', 'class-apis-and-estimator-types': 'Class APIs and Estimator Types', 'target-types': 'Target Types', 'methods': 'Methods', 'parameters': 'Parameters', 'attributes': 'Attributes', 'data-and-sample-properties': 'Data and sample properties'}\n",
      "所有名词: ['1d', '1d array', '2d', '2d array', 'API', 'array-like', 'attribute', 'attributes', 'backwards compatibility', 'Public API only', 'As documented', 'Deprecation', 'Keyword arguments', 'Bug fixes and enhancements', 'Serialization', 'callable', 'categorical feature', 'clone', 'cloned', 'common tests', 'deprecation', 'dimensionality', 'docstring', 'double underscore', 'double underscore notation', 'dtype', 'data type', 'duck typing', 'early stopping', 'estimator instance', 'examples', 'experimental', 'evaluation metric', 'evaluation metrics', 'estimator tags', 'feature', 'features', 'feature vector', 'fitting', 'fitted', 'function', 'gallery', 'hyperparameter', 'hyper-parameter', 'impute', 'imputation', 'indexable', 'induction', 'inductive', 'joblib', 'label indicator matrix', 'multilabel indicator matrix', 'multilabel indicator matrices', 'leakage', 'data leakage', 'memmapping', 'memory map', 'memory mapping', 'missing values', 'narrative docs', 'narrative documentation', 'np', 'online learning', 'out-of-core', 'outputs', 'pair', 'parameter', 'parameters', 'param', 'params', 'pairwise metric', 'pairwise metrics', 'pd', 'precomputed', 'rectangular', 'sample', 'samples', 'sample property', 'sample properties', 'scikit-learn-contrib', 'scikit-learn enhancement proposals', 'SLEP', 'SLEPs', 'semi-supervised', 'semi-supervised learning', 'semisupervised', 'sparse matrix', 'sparse graph', 'matrix semantics', 'graph semantics', 'supervised', 'supervised learning', 'target', 'targets', 'transduction', 'transductive', 'unlabeled', 'unlabeled data', 'unsupervised', 'unsupervised learning', 'classifier', 'classifiers', 'clusterer', 'clusterers', 'density estimator', 'estimator', 'estimators', 'feature extractor', 'feature extractors', 'meta-estimator', 'meta-estimators', 'metaestimator', 'metaestimators', 'outlier detector', 'outlier detectors', 'predictor', 'predictors', 'regressor', 'regressors', 'transformer', 'transformers', 'vectorizer', 'vectorizers', 'cross-validation splitter', 'CV splitter', 'cross-validation generator', 'cross-validation estimator', 'scorer', 'binary', 'continuous', 'continuous multioutput', 'continuous multi-output', 'multioutput continuous', 'multi-output continuous', 'multiclass', 'multi-class', 'multiclass multioutput', 'multi-class multi-output', 'multioutput multiclass', 'multi-output multi-class', 'multilabel', 'multi-label', 'multioutput', 'multi-output', 'binary classification', 'multiclass classification', 'multilabel classification', 'multioutput classification', 'outlier detection', 'classifier', 'clusterer', 'outlier detector', 'regressor', 'None (default)', 'An integer', 'A ', ' instance']\n"
     ]
    }
   ],
   "source": [
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "import re\n",
    "\n",
    "# 获取类别\n",
    "def get_groups(soup):\n",
    "    group_result = {}\n",
    "    div = soup.find_all('div', class_='sk-sidebar-toc')\n",
    "    div = BeautifulSoup(str(div[0]), 'html.parser')\n",
    "    a = div.find_all('a', class_='reference internal')\n",
    "    for i in a:\n",
    "        if i.get('href') != '#':\n",
    "            group_result[str(i.get('href')).replace('#','')] = i.contents[0]\n",
    "    return group_result\n",
    "# 获取词汇\n",
    "def get_words(soup, group_result):\n",
    "    word_list = []\n",
    "    word_href = {}\n",
    "    for k,v in group_result.items():\n",
    "        div = soup.find_all('section', id=k)\n",
    "        div = BeautifulSoup(str(div[0]), 'html.parser')\n",
    "\n",
    "        dt_list = div.find_all('dt')\n",
    "        for dt in dt_list:\n",
    "            word = ''\n",
    "            herf_list = []\n",
    "            contents = dt.contents\n",
    "\n",
    "            for i in contents:\n",
    "                if isinstance(i,str):\n",
    "                    word_list.append(i)\n",
    "                    word = i\n",
    "                else:\n",
    "                    try:\n",
    "                        herf_list.append(i.get('href'))\n",
    "                    except Exception as ex:\n",
    "                        continue\n",
    "\n",
    "            if len(herf_list) != 0:\n",
    "                word_href[word] = herf_list\n",
    "\n",
    "    return word_list, word_href\n",
    "\n",
    "# 爬取页面\n",
    "resp_body = requests.get('https://scikit-learn.org/stable/glossary.html').text\n",
    "soup = BeautifulSoup(resp_body, 'html.parser')\n",
    "\n",
    "# 获取名字分类\n",
    "group_result = get_groups(soup)\n",
    "print('所有名词的分类:',group_result)\n",
    "word_list, word_href = get_words(soup, group_result)\n",
    "print('所有名词:',word_list)\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b4f706d9",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "# 正则表达式"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 172,
   "id": "83092a63",
   "metadata": {
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "全英文的词： ['API', 'array-like', 'attribute', 'attributes', 'backwards compatibility', 'Public API only', 'As documented', 'Deprecation', 'Keyword arguments', 'Bug fixes and enhancements', 'Serialization', 'callable', 'categorical feature', 'clone', 'cloned', 'common tests', 'deprecation', 'dimensionality', 'docstring', 'double underscore', 'double underscore notation', 'dtype', 'data type', 'duck typing', 'early stopping', 'estimator instance', 'examples', 'experimental', 'evaluation metric', 'evaluation metrics', 'estimator tags', 'feature', 'features', 'feature vector', 'fitting', 'fitted', 'function', 'gallery', 'hyperparameter', 'hyper-parameter', 'impute', 'imputation', 'indexable', 'induction', 'inductive', 'joblib', 'label indicator matrix', 'multilabel indicator matrix', 'multilabel indicator matrices', 'leakage', 'data leakage', 'memmapping', 'memory map', 'memory mapping', 'missing values', 'narrative docs', 'narrative documentation', 'np', 'online learning', 'out-of-core', 'outputs', 'pair', 'parameter', 'parameters', 'param', 'params', 'pairwise metric', 'pairwise metrics', 'pd', 'precomputed', 'rectangular', 'sample', 'samples', 'sample property', 'sample properties', 'scikit-learn-contrib', 'scikit-learn enhancement proposals', 'SLEP', 'SLEPs', 'semi-supervised', 'semi-supervised learning', 'semisupervised', 'sparse matrix', 'sparse graph', 'matrix semantics', 'graph semantics', 'supervised', 'supervised learning', 'target', 'targets', 'transduction', 'transductive', 'unlabeled', 'unlabeled data', 'unsupervised', 'unsupervised learning', 'classifier', 'classifiers', 'clusterer', 'clusterers', 'density estimator', 'estimator', 'estimators', 'feature extractor', 'feature extractors', 'meta-estimator', 'meta-estimators', 'metaestimator', 'metaestimators', 'outlier detector', 'outlier detectors', 'predictor', 'predictors', 'regressor', 'regressors', 'transformer', 'transformers', 'vectorizer', 'vectorizers', 'cross-validation splitter', 'CV splitter', 'cross-validation generator', 'cross-validation estimator', 'scorer', 'binary', 'continuous', 'continuous multioutput', 'continuous multi-output', 'multioutput continuous', 'multi-output continuous', 'multiclass', 'multi-class', 'multiclass multioutput', 'multi-class multi-output', 'multioutput multiclass', 'multi-output multi-class', 'multilabel', 'multi-label', 'multioutput', 'multi-output', 'binary classification', 'multiclass classification', 'multilabel classification', 'multioutput classification', 'outlier detection', 'classifier', 'clusterer', 'outlier detector', 'regressor', 'An integer', 'A ', ' instance']\n",
      "A B 开头的词： ['API', 'array-like', 'attribute', 'attributes', 'backwards compatibility', 'As documented', 'Bug fixes and enhancements', 'binary', 'binary classification', 'An integer', 'A ']\n"
     ]
    }
   ],
   "source": [
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "import re\n",
    "\n",
    "\n",
    "# 爬取页面\n",
    "resp_body = requests.get('https://scikit-learn.org/stable/glossary.html').text\n",
    "soup = BeautifulSoup(resp_body, 'html.parser')\n",
    "\n",
    "# 获取名字分类\n",
    "group_result = get_groups(soup)\n",
    "word_list, word_href = get_words(soup, group_result)\n",
    "\n",
    "\n",
    "list1 = []\n",
    "list2 = []\n",
    "reg1 = r'^[a-zA-Z_]+$';\n",
    "reg2 = r'^[AaBb].*'\n",
    "for word in word_list:\n",
    "    if re.match(reg1, word.replace(' ','').replace('-','')):  \n",
    "        list1.append(word)\n",
    "    if re.match(reg2, word):\n",
    "        list2.append(word)\n",
    "        \n",
    "print('全英文的词：', list1)\n",
    "print('A B 开头的词：', list2)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 160,
   "id": "653d18d3",
   "metadata": {
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Please reset your right Email address!\n"
     ]
    }
   ],
   "source": [
    "import re  \n",
    "text = '2aasfdasdfg222'\n",
    "if re.match(r'^[a-zA-Z_]+$',text):  \n",
    "    print('Email address is Right!')  \n",
    "else:  \n",
    "    print('Please reset your right Email address!')  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 170,
   "id": "cd5e484b",
   "metadata": {
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "API\n",
      "array-like\n",
      "attribute\n",
      "attributes\n",
      "backwards compatibility\n",
      "As documented\n",
      "Bug fixes and enhancements\n",
      "binary\n",
      "binary classification\n",
      "An integer\n",
      "A \n"
     ]
    }
   ],
   "source": [
    "for word in word_list:\n",
    "    if re.match(r'^[AaBb].*',word):  \n",
    "        print(word)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "908f658c",
   "metadata": {},
   "source": [
    "# Python网络编程基础"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d6f5013c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 服务端\n",
    "import socket\n",
    "# 建立一个服务端\n",
    "server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n",
    "server.bind(('localhost',9090)) #绑定要监听的端口\n",
    "server.listen(5) #开始监听 表示可以使用五个链接排队\n",
    "while True:# conn就是客户端链接过来而在服务端为期生成的一个链接实例\n",
    "    conn,addr = server.accept() #等待链接,多个链接的时候就会出现问题,其实返回了两个值\n",
    "    print(conn,addr)\n",
    "    while True:\n",
    "        data = conn.recv(1024)  #接收数据\n",
    "        print('recive:',data.decode()) #打印接收到的数据\n",
    "        conn.send(data.upper()) #然后再发送数据\n",
    "    conn.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4c55caad",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 客户端\n",
    "import socket# 客户端 发送一个数据，再接收一个数据\n",
    "client = socket.socket(socket.AF_INET,socket.SOCK_STREAM) #声明socket类型，同时生成链接对象\n",
    "client.connect(('localhost',9090)) #建立一个链接，连接到本地的6969端口\n",
    "while True:\n",
    "    msg = '你好呀！'  #strip默认取出字符串的头尾空格\n",
    "    client.send(msg.encode('utf-8'))  #发送一条信息 python3 只接收btye流\n",
    "    data = client.recv(1024) #接收一个信息，并指定接收的大小 为1024字节\n",
    "    print('recv:',data.decode()) #输出我接收的信息\n",
    "client.close() #关闭这个链接"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4e788c96",
   "metadata": {},
   "source": [
    "# tornado基础使用"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ee52d82c",
   "metadata": {},
   "outputs": [],
   "source": [
    "import tornado.ioloop\n",
    "import tornado.web\n",
    "    \n",
    "class MainHandler(tornado.web.RequestHandler):\n",
    "    def get(self):\n",
    "        a = self.get_query_argument(\"a\")  # 获取的为列表格式\n",
    "        b = self.get_query_argument(\"b\")  # 只会获取传来的最后一个值\n",
    "        self.write(self._sum(a,b))\n",
    "\n",
    "    def post(self):\n",
    "        a = self.get_query_argument(\"a\")   # 获取的为列表格式\n",
    "        b = self.get_query_argument(\"b\")    # 只会获取传来的最后一个值\n",
    "        self.write(self._sum(a,b))\n",
    "\n",
    "    def _sum(self, a,b):\n",
    "        return str(int(a)+int(b))\n",
    "\n",
    "\n",
    "def make_app():\n",
    "    return tornado.web.Application([\n",
    "        (r\"/\", MainHandler),\n",
    "    ])\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    app = make_app()\n",
    "    app.listen(5000)\n",
    "    tornado.ioloop.IOLoop.current().start()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ce220e3c",
   "metadata": {},
   "source": [
    "# tornado用户注册/登录"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bea10687",
   "metadata": {},
   "outputs": [],
   "source": [
    "#!/usr/bin/env python\n",
    "# -*- coding: utf-8 -*-\n",
    "# @File  : main.py\n",
    "# @Author: Richard Chiming Xu\n",
    "# @Date  : 2022/3/17\n",
    "# @Desc  : 服务入口\n",
    "\n",
    "import tornado.ioloop\n",
    "import tornado.web\n",
    "import pandas as pd\n",
    "import json\n",
    "import uuid\n",
    "\n",
    "\n",
    "# 定义统一响应\n",
    "class Response():\n",
    "    def __init__(self, code, data, msg):\n",
    "        self.code = code\n",
    "        self.data = data\n",
    "        self.msg = msg\n",
    "\n",
    "# 响应体转json\n",
    "def convert2json(Response):\n",
    "  return {\n",
    "    'code': Response.code,\n",
    "    'data': Response.data,\n",
    "    'msg': Response.msg\n",
    "  }\n",
    "\n",
    "class RegisterHandler(tornado.web.RequestHandler):\n",
    "\n",
    "    def post(self, *args, **kwargs):\n",
    "        request_body = json.loads(self.request.body.decode())\n",
    "        user = request_body['username']\n",
    "        pwd = request_body['password']\n",
    "        # 检查用户名密码\n",
    "        if ' ' in user and len(user) > 18:\n",
    "            return self.write(json.dumps(Response(-1, None, '非法用户名!'),default=convert2json))\n",
    "        if ' ' in pwd and len(pwd) > 18:\n",
    "            return self.write(json.dumps(Response(-1, None, '非法密码!'),default=convert2json))\n",
    "        # 模拟注册\n",
    "        db_data = pd.read_csv('user.csv') # 模拟读取DB\n",
    "\n",
    "        db_data = db_data.append({\n",
    "            'uid': str(uuid.uuid4()),\n",
    "            'user': user,\n",
    "            'pwd': pwd\n",
    "        },ignore_index=True)\n",
    "        db_data.to_csv('user.csv', index = False) # 模拟写入DB\n",
    "\n",
    "        self.write(json.dumps(Response(0, request_body,'注册成功!'),default=convert2json))\n",
    "\n",
    "\n",
    "class LoginHandler(tornado.web.RequestHandler):\n",
    "\n",
    "    def post(self, *args, **kwargs):\n",
    "        request_body = json.loads(self.request.body.decode())\n",
    "        user = request_body['username']\n",
    "        pwd = request_body['password']\n",
    "\n",
    "        db_data = pd.read_csv('user.csv')  # 模拟读取DB\n",
    "        user = db_data[(db_data['user'] == user) & (db_data['pwd'] == pwd)]\n",
    "        if len(user) > 0:\n",
    "            self.write(json.dumps(Response(0, request_body,'登录成功!'),default=convert2json))\n",
    "        else:\n",
    "            self.write(json.dumps(Response(0, request_body, '登录失败!'), default=convert2json))\n",
    "def make_app():\n",
    "    return tornado.web.Application([\n",
    "        (r\"/register\", RegisterHandler, ),\n",
    "        (r\"/login\", LoginHandler,)\n",
    "    ])\n",
    "\n",
    "'''\n",
    "    注册接口：http://localhost:5000/register\n",
    "    {\n",
    "        \"username\": \"ls\",\n",
    "        \"password\": \"root\"\n",
    "    }\n",
    "    \n",
    "    登录接口：http://localhost:5000/login\n",
    "    {\n",
    "        \"username\": \"ls\",\n",
    "        \"password\": \"root\"\n",
    "    }\n",
    "'''\n",
    "if __name__ == \"__main__\":\n",
    "    app = make_app()\n",
    "    app.listen(5000)\n",
    "    tornado.ioloop.IOLoop.current().start()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1d885164-93ac-46e4-871a-8e0eb5ff53eb",
   "metadata": {},
   "source": [
    "# tornado部署机器学习模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "e417ea40",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>label</th>\n",
       "      <th>review</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1</td>\n",
       "      <td>很快，好吃，味道足，量大</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1</td>\n",
       "      <td>没有送水没有送水没有送水</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>1</td>\n",
       "      <td>非常快，态度好。</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>1</td>\n",
       "      <td>方便，快捷，味道可口，快递给力</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>1</td>\n",
       "      <td>菜味道很棒！送餐很及时！</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11982</th>\n",
       "      <td>0</td>\n",
       "      <td>以前几乎天天吃，现在调料什么都不放，</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11983</th>\n",
       "      <td>0</td>\n",
       "      <td>昨天订凉皮两份，什么调料都没有放，就放了点麻油，特别难吃，丢了一份，再也不想吃了</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11984</th>\n",
       "      <td>0</td>\n",
       "      <td>凉皮太辣,吃不下都</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11985</th>\n",
       "      <td>0</td>\n",
       "      <td>本来迟到了还自己点！！！</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11986</th>\n",
       "      <td>0</td>\n",
       "      <td>肉夹馍不错，羊肉泡馍酱肉包很一般。凉面没想象中好吃。送餐倒是很快。</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>11987 rows × 2 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "       label                                    review\n",
       "0          1                              很快，好吃，味道足，量大\n",
       "1          1                              没有送水没有送水没有送水\n",
       "2          1                                  非常快，态度好。\n",
       "3          1                           方便，快捷，味道可口，快递给力\n",
       "4          1                              菜味道很棒！送餐很及时！\n",
       "...      ...                                       ...\n",
       "11982      0                        以前几乎天天吃，现在调料什么都不放，\n",
       "11983      0  昨天订凉皮两份，什么调料都没有放，就放了点麻油，特别难吃，丢了一份，再也不想吃了\n",
       "11984      0                                 凉皮太辣,吃不下都\n",
       "11985      0                              本来迟到了还自己点！！！\n",
       "11986      0         肉夹馍不错，羊肉泡馍酱肉包很一般。凉面没想象中好吃。送餐倒是很快。\n",
       "\n",
       "[11987 rows x 2 columns]"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "import pandas as pd\n",
    "import jieba\n",
    "import re\n",
    "import numpy as np\n",
    "import joblib\n",
    "\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "\n",
    "# 读取数据\n",
    "def get_stopwords():\n",
    "    stop_words = []\n",
    "    with open('baidu_stopwords.txt', 'r', encoding='utf-8') as f:\n",
    "        for line in f.readlines():\n",
    "            stop_words.append(line.replace('\\n', ''))\n",
    "    return stop_words\n",
    "\n",
    "# jieba分词\n",
    "def cut(content, stop_words):\n",
    "    # 去除符号\n",
    "    content = re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——！，。？、~@#￥%……&*（）]\", \"\",content)\n",
    "    \n",
    "    result = []\n",
    "    try:\n",
    "        seg_list = jieba.lcut(content, cut_all=True)\n",
    "        for i in seg_list:\n",
    "            if i not in stop_words:\n",
    "                result.append(i)\n",
    "        \n",
    "    except AttributeError as ex:\n",
    "        print(content)\n",
    "        raise ex\n",
    "    return ' '.join(result)\n",
    "\n",
    "# 读取数据\n",
    "train_data = pd.read_csv('data/waimai_10k.csv')\n",
    "\n",
    "# 获取停用词\n",
    "stop_words = get_stopwords()\n",
    "# 分词\n",
    "train_data['words'] = train_data['review'].apply(lambda x: cut(x, stop_words))\n",
    "\n",
    "# 计算tfidf\n",
    "vectorizer = TfidfVectorizer()\n",
    "X = vectorizer.fit_transform(train_data['words'].tolist())\n",
    "\n",
    "# 训练模型\n",
    "model = LogisticRegression(random_state=0).fit(X, train_data['label'])\n",
    "\n",
    "# 保存模型\n",
    "joblib.dump(vectorizer,'tfidf_model.pkl')\n",
    "joblib.dump(model,'classify_model.pkl')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "c9b7caf6-f0e0-43b5-b513-bc6633c7faf6",
   "metadata": {},
   "outputs": [],
   "source": [
    "import tornado.ioloop\n",
    "import tornado.web\n",
    "import json\n",
    "import joblib\n",
    "import re\n",
    "import jieba\n",
    "\n",
    "# 读取数据\n",
    "def get_stopwords():\n",
    "    stop_words = []\n",
    "    with open('../baidu_stopwords.txt', 'r', encoding='utf-8') as f:\n",
    "        for line in f.readlines():\n",
    "            stop_words.append(line.replace('\\n', ''))\n",
    "    return stop_words\n",
    "\n",
    "\n",
    "# jieba分词\n",
    "def cut(content, stop_words):\n",
    "    # 去除符号\n",
    "    content = re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——！，。？、~@#￥%……&*（）]\", \"\", content)\n",
    "\n",
    "    result = []\n",
    "    try:\n",
    "        seg_list = jieba.lcut(content, cut_all=True)\n",
    "        for i in seg_list:\n",
    "            if i not in stop_words:\n",
    "                result.append(i)\n",
    "\n",
    "    except AttributeError as ex:\n",
    "        print(content)\n",
    "        raise ex\n",
    "    return ' '.join(result)\n",
    "\n",
    "\n",
    "\n",
    "class MainHandler(tornado.web.RequestHandler):\n",
    "\n",
    "    def post(self):\n",
    "        # 获取文本\n",
    "        request_body = json.loads(self.request.body.decode())\n",
    "        text = request_body['text']\n",
    "        # 加载模型\n",
    "        vectorizer = joblib.load('../model/tfidf_model.pkl')\n",
    "        model = joblib.load('../model/classify_model.pkl')\n",
    "        # 获取停用词\n",
    "        stop_words = get_stopwords()\n",
    "        # 分词\n",
    "        words = cut(text, stop_words)\n",
    "        # 预测\n",
    "        X = vectorizer.transform([words])\n",
    "        self.write(str(model.predict(X)))\n",
    "\n",
    "\n",
    "def make_app():\n",
    "    return tornado.web.Application([\n",
    "        (r\"/\", MainHandler),\n",
    "    ])\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    app = make_app()\n",
    "    app.listen(5000)\n",
    "    tornado.ioloop.IOLoop.current().start()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5cffc01d-57f3-47a0-a913-f4abd1e8a921",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
