export const code1 = `import win32com.client as win32<br>
from win32com.client import constants<br>
import os<br>
# 打开word应用程序<br>
word = win32.gencache.EnsureDispatch('Word.Application')<br>
# 是否可视化<br>
word.Visible = 0<br>
# 源文件路径<br>
file_path = r'D:/源文件/文件名称.docx'  #根据操作的文件名称显示<br>
# 打开<br>
doc = word.Documents.Open(file_path)<br>
# 光标start的查找<br>
# 赋值对象<br>
search_range = doc.Content<br>
# 查找内容<br>
search_range.Find.Execute(FindText="截取文本开始位置")  #截取文本开始位置需学生输入<br>
# 选中查找到的内容<br>
search_range.Select()<br>
# 光标左移<br>
word.Selection.MoveLeft()<br>
# 将光标位置赋予start<br>
start = word.Selection.Start.numerator<br>
print(start)<br>
<br>
# 光标end的查找  同上<br>
search_range = doc.Content<br>
search_range.Find.Execute(FindText="截取文本结束位置")  #截取文本结束位置需学生输入<br>
search_range.Select()<br>
word.Selection.MoveLeft()<br>
end = word.Selection.Start.numerator<br>
print(end)`


export const code2 = `import sys<br>
import os<br>
from collections import OrderedDict<br>
<br>
# 内容顺序很重要，所以使用 <br>
collections.OrderedDict.fromkeys(context):<br>
#print("".join(OrderedDict.fromkeys(context)))<br>
<br>
# 传入相对路径<br>
targetdir = ""<br>
targetName = ""<br>
<br>
if (len(sys.argv) &gt; 1):<br>
    targetdir = sys.argv[1]<br>
    targetName = sys.argv[2]<br>
else:<br>
    targetdir = "..\\FontZip\\Font"<br>
    targetName = "FilterFontZip.txt"<br>
<br>
# print(targetdir, targetName)<br>
<br>
def getfilepaths(suffixNameList):<br>
    paths = []<br>
    l = os.walk(targetdir)<br>
    for root, dirs, files in l:  # os.path.curdir<br>
        for file in files:<br>
            expandName = os.path.splitext(file)[1]<br>
            if expandName in suffixNameList:  # 匹配后缀名称<br>
                paths.append(os.path.join(root, file))<br>
        break<br>
    return paths<br>
<br>
def readfile(path, contentList):<br>
    print(path)<br>
    try:<br>
        f = open(path, mode="r", encoding="utf-8")<br>
    except UnicodeDecodeError as e:<br>
        f = open(path, mode="r")<br>
    lines = f.readlines()<br>
    # contentList.extend("".join(OrderedDict.fromkeys(lines)))<br>
    contentList.extend("".join(set(lines)))<br>
    f.close()<br>
    return contentList<br>
<br>
def readallfiles(filepaths):<br>
contentList = []<br>
    for path in filepaths:<br>
        readfile(path, contentList)<br>
    return contentList<br>
<br>
def writefile(filepath, textlist):<br>
    texts = "".join(textlist).replace("\n", "").replace("\t", "").replace("\r", "")<br>
    f = open(filepath, mode="w+", encoding="utf-8")<br>
    f.seek(0)<br>
    f.truncate()  # 清空文件内容<br>
    # f.write("".join(OrderedDict.fromkeys(texts)))<br>
    f.write("".join(set(texts)))<br>
    f.close()<br>
<br>
def filterFont():<br>
    filePaths = getfilepaths(['.json', '.txt'])<br>
    mergeList = readallfiles(filePaths)<br>
    writePath = os.path.join(targetdir, targetName)<br>
    writefile(writePath, mergeList)<br>
<br>
if __name__ == '__main__':<br>
    filterFont()`

export const code3 = `f_read = open(r'./文本数据任务名称.txt', 'r', encoding='utf-8')  # 将需要去除重复值的txt文本输入<br>
f_write = open(r'./文本数据任务名称.txt', 'w',encoding='utf-8')  # 去除重复值之后，生成新的txt文本 --“去除重复值后的文本.txt”,如果写入到原txt会把之前的数据覆盖<br>
data = set()  # 创建一个存放数据的集合<br>
for a in [a.strip('\n') for a in list(f_read)]:  # 把读取进来的数据变成列表，再逐个循环读取列表里面的数据（去除开头和结尾的换行）<br>
    if a not in data:  # 判断循环的数据在不在集合里，不在就添加进集合<br>
        data.add(a)<br>
        f_write.write(a + '\n')<br>
f_read.close()<br>
f_write.close()<br>
print('完成')`

export const code4 = `fi = open('test.txt', 'r') # 打开需要处理的test.txt<br>
txt = fi.readlines()<br>
with open('test_OK.txt', 'a') as f:#创建处理去重复后的结果保存文档，防止找不到文件出错<br>
    f.close()<br>
for w in txt:<br>
    fi2 = open('test_OK.txt', 'r')<br>
    txt2 = fi2.readlines()<br>
    with open('test_OK.txt', 'a') as f:  # 打开目标文件开始写入<br>
        if w not in txt2:    #如果从源文档中读取的内容不在目标文档中则写入，否则跳过，实现去除重复功能！<br>
            f.write(w)  <br>
        else:<br>
            print("已去除重复--&gt;"+w)<br>
        f.close()<br>
fi.close()`

export const code5 = `
import numpy as np<br>
<br>
def correct_error(sentence1,sentence2):<br>
        :param sentence1: 正确文本<br>
        :param sentence2:错误转换的文本<br>
        :return: 正确字周围的词数组，错误字周围的词数组<br>
    correct = []<br>
    for i in range(len(sentence1)):#遍历文本<br>
        if sentence1[i] not in sentence2:<br>
            correct1 = sentence1[i - 2:i + 2]  # 存在错误的正确字，附近的词 （i - 2:i + 2，我取长度为4即一个成语的长度<br>
            correct.append(correct1)<br>
    error = []<br>
    for j in range(len(sentence2)):<br>
        if sentence2[j] not in sentence1:<br>
            error1 = sentence2[j - 2:j + 2]  # 存在错误的错误字，附近的词<br>
            error.append(error1)<br>
    return correct,error<br>
<br>
#余弦距离计算相似度<br>
def cosine_similarity(sentence1: str, sentence2: str) -> float:<br>
    :param sentence1: 正确词<br>
    :param sentence2:错误词<br>
    :return: 两个词 的相似度<br>
    seg1 = [word for word in sentence1]<br>
    seg2 = [word for word in sentence2]<br>
    word_list = list(set([word for word in seg1 + seg2]))#建立词库<br>
    word_count_vec_1 = []<br>
    word_count_vec_2 = []<br>
    for word in word_list:<br>
        word_count_vec_1.append(seg1.count(word))#文本1统计在词典里出现词的次数<br>
        word_count_vec_2.append(seg2.count(word))#文本2统计在词典里出现词的次数<br>
<br>
    vec_1 = np.array(word_count_vec_1)<br>
    vec_2 = np.array(word_count_vec_2)<br>
    #余弦公式<br>
<br>
    num = vec_1.dot(vec_2.T)<br>
    denom = np.linalg.norm(vec_1) * np.linalg.norm(vec_2)<br>
    cos = num / denom<br>
<br>
    sim = cos#<br>
    return sim<br>
<br>
def result(sentence1,sentence2):<br>
    correct,error=correct_error(sentence1,sentence2)<br>
<br>
    finally_result=[]#存放[[正确词1，错误词1],[正确词2，错误词2]]<br>
    similarity = []<br>
    for str1 in correct:#遍历每一个候选正确词<br>
        for str2 in error:#遍历每一个候选错误词<br>
            similarity1 = cosine_similarity(str1, str2)#计算候选正确词 与候选错误词之间的相似度<br>
<br>
            similarity.append(similarity1)#相似度存放进数组<br>
        print('相似度',similarity)<br>
        if max(similarity)==0.0:<br>
            break<br>
        else:<br>
            max_index = similarity.index(max(similarity))  # 相似度最大的错误词所在索undefined`


export const code6 = `from nltk import *<br>
from nltk.corpus import brown<br>
#每次访问数据需要添加数据至路径当中<br>
corpus = brown.sents()<br>
#.sent()整个语料库中的句子,sents(fileids=[f1,f2..],categories=[c1,c2...])<br>
import numpy as np<br>
<br>
# 读入字典<br>
#set() 函数创建一个无序不重复元素集，可进行关系测试，删除重复数据，还可以计算交集、差集、并集等<br>
vocabs = set([lines.rstrip() for lines in open('vocab.txt')])<br>
<br>
# 生成最短编辑距离的正确单词<br>
# 1.生成候选集合和候选项<br>
def generate1(wrong_word):<br>
    letters = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
               'v', 'w', 'x', 'y', 'z'}<br>
    right_word_split = [[wrong_word[:i], wrong_word[i:]] for i in range(len(wrong_word) + 1)]  # 集合没有切片操作<br>
    insert = {R + M + L for R, L in right_word_split for M in letters}  # 使用}为集合，使用[为list<br>
    replace = {R + M + L[1:] for R, L in right_word_split for M in letters}<br>
    # 集合{'aauaa', 'aafa', 'aaaza', 'uaaaa', 'faaa', 'afaa', 'aaada', 'aaa', 'afaaa', 'aaaua', 'alaa', 'amaaa', 'aaaax', 'xaaa', 'daaaa', 'asaaa'<br>
    delete = {R + L[1:] for R, L in right_word_split for M in letters}  # 集合<br>
    # 因为是集合，不支持相加操作<br>
    candidates = insert | replace | delete  # 集合形式相与<br>
    candidate_word = set([c for c in candidates if c in vocabs])  # 集合形式<br>
    return candidate_word, candidates<br>
<br>
#2.为多次迭代做准备<br>
def generate2(candidates):<br>
    # 定义一个空字典c = {}<br>
    c = set()  # 定义一个空集合<br>
    d = set()<br>
    for candidate in candidates:<br>
        a, b = generate1(candidate)<br>
        c.update(a)  # 只有列表才能使用append，字典，集合使用update.<br>
        d.update(b)<br>
    return c, d<br>
<br>
def recognition(wrongword):<br>
    wrong_word = wrongword<br>
    cw, c = generate1<br>
    if cw:<br>
        return cw<br>
    else:<br>
        for k in range(len(wrong_word)):<br>
            a, c = generate2(c)<br>
            cw.update(a)<br>
            if cw:<br>
                return cw<br>
<br>
#判别模型P(错误|正确)<br>
spell_err = open('文件名称.txt').readlines()<br>
print(spell_err)<br>
prob = {}             #初始大索引<br>
for lines in spell_err:                  #lines为字符串<br>
    line = lines.rstrip('\\n').split(':') #rstrip删除结尾指定字符，line为列表<br>
    correct = line[0]                    #correct为字符串<br>
    wrong =[x.strip() for x in line[1].split(',')]    #wrong为列表,为了消除wrong中字符的前面空格<br>
    prob[correct] = {} #初始小索引<br>
    for wro in wrong:<br>
     prob[correct][wro] = 1.0/len(wrong)<br>
<br>
#语言模型<br>
unigram_freqency = {} #字典类型<br>
bigram_freqency = {}<br>
for lines in corpus:<br>
  lines = ['']+lines<br>
  for i in range(len(lines)-1):<br>
    a = lines[i]<br>
    b = lines[i:i + 2]<br>
    c = "".join(b)  # 将列表b转换成字符串c，其中""里输入的是分别字符串的符号<br>
    #lines[i]为str类型<br>
    #lines[i:i+2]为list类型<br>
    if a in unigram_freqency:<br>
     unigram_freqency[a] += 1<br>
    else:<br>
     unigram_freqency[a] = 1<br>
    if c in bigram_freqency:<br>
     bigram_freqency[c] += 1<br>
    else:<br>
     bigram_freqency[c] = 1<br>
<br>
def combination_prob(wrong_word,corrected_candidate,left_word,right_word):<br>
 p1 = {}<br>
 for candidate in corrected_candidate:<br>
     #求P(正确)<br>
    forward_combination = left_word + ' ' + candidate<br>
    backward_combination = candidate + ' ' + right_word<br>
    p1[candidate] = 0<br>
    if candidate in unigram_freqency and forward_combination in bigram_freqency:<br>
        p1[candidate] += np.log(bigram_freqency[forward_combination] + 1) / (unigram_freqency[candidate] + len(unigram_freqency))<br>
    else:<br>
        if candidate in unigram_freqency:<br>
           p1[candidate] += np.log(1 / (unigram_freqency[candidate] + len(unigram_freqency)))<br>
        else:<br>
           p1[candidate] += np.log(1 / len(unigram_freqency))<br>
<br>
    if candidate in unigram_freqency and backward_combination in bigram_freqency:<br>
        p1[candidate] += np.log((bigram_freqency[backward_combination]+1)/(unigram_freqency[candidate]+len(unigram_freqency)))<br>
    else:<br>
        if candidate in unigram_freqency:<br>
            p1[candidate] += np.log(1/(unigram_freqency[candidate]+len(unigram_freqency)))<br>
        else:<br>
            p1[candidate] += np.log(1/(len(unigram_freqency)))<br>
<br>
    #求P(错误|正确)<br>
    if candidate in prob and wrong_word in prob[candidate]:<br>
        p1[candidate] += np.log(prob[candidate][wrong_word])<br>
    else:<br>
        p1[candidate] += np.log(0.01)<br>
 return p1<br>
<br>
# 判断字是否出错<br>
# s = np.loadtxt('fanren.txt', dtype=str, delimiter='0' )<br>
# 因为np.load只能处理长度相同的字符串<br>
lines = open('testdata.txt').readlines()<br>
# 逐行遍历<br>
for l in lines:<br>
    # 将每一行去空格再分割成3部分<br>
    part = l.rstrip().split('\\t')<br>
    # 取其中的第三部分也即是句子中的每一个单词<br>
    full_part = part[2]<br>
    testword = [''] + full_part.split()<br>
    corrected_word = {}<br>
    for i in range(1,len(testword)-1):<br>
        if testword[i] not in vocabs:# 如果测试词不在词库内，该词为错误的词，也即是需要修改的词<br>
            corrected_candidate = recognition(testword[i])<br>
            p = combination_prob(testword[i],corrected_candidate,testword[i-1],testword[i+1])<br>
            word = list(p.keys())[list(p.values()).index(max(list(p.values())))]<br>
            corrected_word[testword[i]] = word<br>
            print(corrected_word[testword[i]])`
export const code7 = `import paddlehub as hub<br>
<br>
model = hub.Module(name='auto_punc', version='1.0.0')<br>
<br>
# 定义方法<br>
def addpunc(txtpath, savetxt):<br>
    f = open(txtpath, encoding = "utf-8")<br>
    # 输出读取到的数据<br>
    txtstr = f.read().split("\\n")<br>
    punc_texts = model.add_puncs(txtstr)<br>
    f.close()<br>
    str1 = "\\n".join(punc_texts)<br>
    print('转换成功:', str1)<br>
    with open(savetxt, "a", encoding='utf-8') as fc:<br>
        fc.write(str1)  # 写入文件<br>
        fc.write("\\n\\n")<br>
        fc.close()<br>
<br>
if __name__ == '__main__':<br>
    # 存放要加标点符号的文字<br>
    txtpath = r'D:\\A\\Project_1\\source.txt'<br>
    # 保存加号标点符号的文字<br>
    savetxt = r'D:\\A\\Project_1\\punc.txt'<br>
    # 调用方法<br>
    addpunc(txtpath, savetxt)`
export const code8 = `def words (text): return re.findall('[a-z]+', text.lower())#正则表达式<br>

def train (features):   #训练模型<br>
model = collections.defaultdict( lambda : 1)<br>
forf in features:<br>
model[f] += 1<br>
returnmodel<br>
NWORDS = train(words(file('big.txt').read()))<br>

def edits1 (word):  #第一次编辑<br>
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]<br>
deletes = [a + b[1:] for a, b in splits if b]<br>
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]<br>
replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]<br>
inserts = [a + c + b for a, b in splits for c in alphabet]<br>
returnset(deletes + transposes + replaces + inserts)<br>

def known_edits2 (word):  #第二次编辑<br>
returnset(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)<br>

def known (words):  <br>
returnset(w for w in words if w in NWORDS)<br>

def correct (word):  #填入建议词<br>
candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]<br>
returnmax(candidates, key=NWORDS.get)`
export const code9 = `import pandas as pd<br>
import numpy as np<br>
def hot_deck_imputation(dataframe: pd.DataFrame):<br>
    from sklearn.impute import KNNImputer<br>
    hot_deck_imputer = KNNImputer(n_neighbors=2, weights="uniform")  # 参数固定：n_neighbors=2<br>
    new_df = hot_deck_imputer.fit_transform(dataframe)<br>
    return new_df<br>
def get_dataset():<br>
    得到数据<br>
    :return data_x：有缺失值的数据<br>
    :return true_value：缺失数据的原始真实值<br>
    :return data_y：原问题中待预测的label<br>
    import copy<br>
    from sklearn.datasets import make_classification<br>
    data_x, data_y = make_classification(n_samples=100, n_classes=4, n_features=6, n_informative=4,<br>
                                         random_state=0)  # 6个特征<br>
    data_x = pd.DataFrame(data_x)<br>
    data_x.columns = ['x1', 'x2', 'x3', 'x4', 'x5', 'miss_line']<br>
    true_data = copy.deepcopy(data_x)<br>
    # 在miss_line这一列删除20%的数据<br>
    drop_index = data_x.sample(frac=0.2).index<br>
    data_x.loc[drop_index, "miss_line"] = np.nan<br>
    true_value = true_data.loc[drop_index, 'miss_line']  # 空值的真实值<br>
    return data_x, true_value, data_y<br>
if __name__ == '__main__':<br>
    value_x, true_value_x, value_y = get_dataset()<br>
    fill_df = hot_deck_imputation(value_x)`
export const code10 = `# 导入需要的库<br>
import numpy as np<br>
import pandas as pd<br>

def Read_data(file):<br>
dt = pd.read_csv(file)<br>
dt.columns = ['age', 'sex', 'chest_pain_type', 'resting_blood_pressure', 'cholesterol',<br>
'fasting_blood_sugar', 'rest_ecg', 'max_heart_rate_achieved','exercise_induced_angina',<br>
'st_depression', 'st_slope', 'num_major_vessels', 'thalassemia', 'target']<br>
data =dt<br>
pd.set_option('display.max_rows', None)<br>
pd.set_option('display.max_columns', None)<br>
pd.set_option('display.width', None)<br>
pd.set_option('display.unicode.ambiguous_as_wide', True)<br>
pd.set_option('display.unicode.east_asian_width', True)<br>
print(data.head())<br>
return data<br>

def data_clean(data):<br>
# 数据清洗<br>
# 异常值处理<br>
data1 = data['resting_blood_pressure']<br>
# 标准差监测<br>
xmean = data1.mean()<br>
xstd = data1.std()<br>
print('存在' if any(data1 > xmean + 2 xstd) else '不存在', '上限异常值')<br>
print('存在' if any(data1 < xmean - 2 xstd) else '不存在', '下限异常值')<br>
# 箱线图监测<br>
q1 = data1.quantile(0.25)<br>
q3 = data1.quantile(0.75)<br>
up = q3 + 1.5 (q3 - q1)<br>
dw = q1 - 1.5 (q3 - q1)<br>
print('存在' if any(data1 > up) else '不存在', '上限异常值')<br>
print('存在' if any(data1 < dw) else '不存在', '下限异常值')<br>
data1[data1 > up] = data1[data1 < up].max()<br>
data1[data1 < dw] = data1[data1 > dw].min()<br>
# print(data1)<br>
`
export const code11 = `import json<br>
<br>
MinMatchType = 1 # 最小匹配规则<br>
MaxMatchType = 2 # 最大匹配规则<br>
<br>
class DFAUtils(object):<br>
DFA算法<br>
<br>
def init(self, word_warehouse):<br>
算法初始化<br>
:param word_warehouse:词库<br>
# 词库<br>
self.root = dict()<br>
# 无意义词库,在检测中需要跳过的（这种无意义的词最后有个专门的地方维护，保存到数据库或者其他存储介质中）<br>
self.skip_root = [' ', '&', '!', '！', '@', '#', '$', '￥', '', '^', '%', '?', '？', '<', '>', "《", '》']<br>
# 初始化词库<br>
for word in word_warehouse:<br>
self.add_word(word)<br>
<br>
def add_word(self, word):<br>
添加词库<br>
:param word:<br>
:return:<br>
now_node = self.root<br>
word_count = len(word)<br>
for i in range(word_count):<br>
char_str = word[i]<br>
if char_str in now_node.keys():<br>
# 如果存在该key，直接赋值，用于下一个循环获取<br>
now_node = now_node.get(word[i])<br>
now_node['is_end'] = False<br>
else:<br>
# 不存在则构建一个dict<br>
new_node = dict()<br>
<br>
if i == word_count - 1: # 最后一个<br>
new_node
"> new_node['is_end'] = True<br>
else: # 不是最后一个<br>
new_node['is_end'] = False<br>
<br>
now_node[char_str] = new_node<br>
now_node = new_node<br>
<br>
def check_match_word(self, txt, begin_index, match_type=MinMatchType):<br>
检查文字中是否包含匹配的字符<br>
:param txt:待检测的文本<br>
:param begin_index: 调用getSensitiveWord时输入的参数，获取词语的上边界index<br>
:param match_type:匹配规则 1：最小匹配规则，2：最大匹配规则<br>
:return:如果存在，则返回匹配字符的长度，不存在返回0<br>
"""<br>
flag = False<br>
match_flag_length = 0 # 匹配字符的长度<br>
now_map = self.root<br>
tmp_flag = 0 # 包括特殊字符的敏感词的长度<br>
<br>
for i in range(begin_index, len(txt)):<br>
word = txt[i]<br>
<br>
# 检测是否是特殊字符"<br>
if word in self.skip_root and len(now_map) < 100:<br>
# len(nowMap)<100 保证已经找到这个词的开头之后出现的特殊字符<br>
tmp_flag += 1<br>
continue<br>
# 获取指定key<br>
now_map = now_map.get(word)<br>
if now_map: # 存在，则判断是否为最后一个<br>
# 找到相应key，匹配标识+1<br>
match_flag_length += 1<br>
tmp_flag += 1<br>
# 如果为最后一个匹配规则，结束循环，返回匹配标识数<br>
if now_map.get("is_end"):<br>
# 结束标志位为true<br>
flag = True<br>
# 最小规则，直接返回,最大规则还需继续查找<br>
if match_type == MinMatchType:<br>
break<br>
else: # 不存在，直接返回<br>
break<br>
if tmp_flag < 2 or not flag: # 长度必须大于等于1，为词<br>
tmp_flag = 0<br>
return tmp_flag<br>
def get_match_word(self, txt, match_type=MinMatchType):<br>
`

export const code12 = `
import numpy as np<br>
import pandas as pd<br>
import scipy.stats as stats<br>
x = np.array([12,13,14,19,21,23])<br>
y =.array([12,13,14,19,21,23,45])<br>
def grubbs_test(x):<br>
    n = len(x)<br>
    mean_x = np.mean(x)<br>
    sd_x = np.std(x)<br>
    numerator = max(abs(x-mean_x))<br>
    g_calculated = numerator/sd_x<br>
    print("Grub Calculated Value:",g_calculated)<br>
    t_value = stats.t.ppf(1 - 0.05 / (2 * n), n - 2)<br>
    g_critical = ((n - 1) * np.sqrt(np.square(t_value))) / (np.sqrt(n) * np.sqrt(n - 2 + np.square(t)))<br>
    print("Grubbs Critical Value:",g_critical)<br>
    if g_critical > g_calculated:<br>
        print("从Grubbs_test中我们观察到计算值小于临界值，接受零假设，得出结论：不存在错误值")<br>
    else:<br>
        print("从Grub_test中我们观察到计算值大于临界值，拒绝零假设，得出结论：存在一个错误值")<br>
grubbs_test(x)<br>
grubbs_test(y)<br>

train = pd.read_csv('../.csv')<br>
sns.boxplot(train['A'])<br>
plt.title("Box Plot before mean imputationplt.show()<br>
q1 = train['A'].quantile(0.25)<br>
q3 = train['A'].quantile(0.75)<br>
iqr = q3-q1<br>
Lower_tail = q1 - 1.5 * iqr<br>
Upper_tail = q3 + 1.5 * iqr<br>
m np.mean(train['A'])<br>
for i in train['A']:<br>
    if i > Upper_tail or i < Lower_tail:<br>
            train['A'] = train['A'].replace(i, m)<br>
sns.boxplot(train['A'])<br>
plt.title("Box Plot after mean imputation")<br>
plt.show()<br>
`
