#!/usr/bin/env python
# coding: utf-8

# In[33]:


import pandas as pd
pd.set_option('display.unicode.east_asian_width', True) #解决列名对齐问题
data = [[110,105,99],[105,88,115],[109,120,130]]
name = ['Oliver','Eva','Nico'] #行号
columns = ['语文','数学','英语']
df = pd.DataFrame(data= data,index= name,columns= columns)
print(df,'\n')
#抽取一行数据,loc不能写数字;iloc用数字。要加列表
#print(df.loc['Oliver'])
#抽取多行
#print(df.loc[['Oliver','Nico']])
#print(df.iloc[[0,2]])
#抽取连续几个，
#print(df.loc['Oliver':'Nico'])
#print(df.loc[:'Eva']) #从第一行开始到eva结束
#print(df.iloc[0:3])
#print(df.iloc[1::]) #从第二行到最后一行
#抽取列
#print(df[['语文','数学']])
#print(df.loc[:,['语文','数学']])
#print(df.iloc[:,[0,1]])
#print(df.loc[:,"数学":]) #列到最后
#print(df.iloc[:,:2]) #，行全都保留，从第0开始到第二列结束，但是不包括第二列

# print(df.loc['Oliver','英语'])
# print(df.loc[['Oliver'],['数学','英语']]) #行和列
# print(df.iloc[[1],[2]]) #索引
# print(df.iloc[1:,[2]]) #从第一行往后要英语成绩
# print(df.iloc[1:,[0,2]])
# print(df.iloc[:,2])

print(df.loc[(df['语文'] > 105) & (df['数学'] > 88) & (df['英语'] > 100)]) #loc只有一个参数就是打印行


# In[37]:


import pandas as pd
pd.set_option('display.unicode.east_asian_width', True) #解决列名对齐问题
data = [[22,"辽宁省大连市"],[23,"辽宁省葫芦岛市"],[21,"湖北省武汉市"]]
name = ['Oliver','Eva','Nico'] #行号
columns = ['年龄','籍贯']
df = pd.DataFrame(data= data,index= name,columns= columns)
print(df)
print(df.loc[df['籍贯'].str.contains('大连市')]) #包含地区


# In[40]:


import pandas as pd
pd.set_option('display.unicode.east_asian_width', True) #解决列名对齐问题
data = [[110,105,99],[105,88,115],[109,120,130]]
name = ['Oliver','Eva','Nico'] #行号
columns = ['语文','数学','英语']
df = pd.DataFrame(data= data,index= name,columns= columns)
print(df,'\n')
#加一列
# df.loc[:,'物理'] = [100,110,120]
#df['物理'] = [100,110,120]
wl = [100,120,130]
df.insert(1,'物理',wl) 
print(df,'\n')

#加一行
df.loc['Taryan'] = [100,110,120,130]
print(df,'\n')



# In[61]:


import pandas as pd
pd.set_option('display.unicode.east_asian_width', True) #解决列名对齐问题
data = [[110,105,99],[105,88,115],[109,120,130]]
name = ['Oliver','Eva','Nico'] #行号
columns = ['语文','数学','英语']
df = pd.DataFrame(data= data,index= name,columns= columns)
print(df,'\n')
#修改
#修改列标题

# # df.columns = ['语文（上）','数学（上）','英语（上）']
# # print(df,'\n')
# print(df.rename(columns = {'语文': '语文（上）','数学': '数学（上）','英语': '英语（上）'}))
# print(df)
      
# #修改行标题
# df.index = ['xiaowang','xiaozhou','xiaozhao']
# print(df)
# #修改数据
# df.loc['xiaowang'] = [88,88,88]
# print(df)
# df.loc[:] = df.loc[:]+10 #将所有行都加10分
# print(df)
# #修改列
# df.loc[:,'语文（上）'] = [100,100,100]
# print(df)
# df.loc['xiaowang','语文（上）'] = 150
# print(df)

#删除
#删除列
print(df.drop(labels = ['数学'], axis = 1)) #labels可以省略
print(df.drop(columns = '数学'))
#删除行
print(df.drop(labels = ['Oliver','Eva'],axis = 0)) #labels和axis=0可以省略
print(df.drop(index = 'Oliver'))

#删除特定条件的行
print(df.drop(index=df[df['数学'].isin([88])].index[0])) #index行，把88删掉
print(df.drop(index=df[df['语文'] < 110].index)) 


# In[12]:


import pandas as pd
df = pd.read_excel('TB2018.xls')
print(df)
#print(df.info())
print(type(df.isnull()))
#删除含有缺失值的行
df.dropna()
#删除全部缺失值的行
df.dropna(how='all') 
#有条件的删除
df1 =df[df['宝贝总数量'].notnull()]
print(df1)
df['宝贝总数量'] = df['宝贝总数量'].fillna(0) #把缺失值都变为0
print(df)


# In[16]:


import pandas as pd
aa = r'1月.xlsx'
df = pd.DataFrame(pd.read_excel(aa))
print(df.duplicated())
print(df.drop_duplicates())
#去除指定列的重复数据
print(df.drop_duplicates(['宝贝标题'],inplace=True))


# In[32]:


#综合实例答案
import pandas as pd
df = pd.read_excel('mrbooks.xls')
df1 = df.sort_values(['买家实际支付金额'],ascending=False)
df2 = df1.head(5)
df2.to_excel('vipbuyer.xlsx', columns=['买家会员名','买家实际支付金额'], index = 0) #index=0把行索引去掉

#需求2
df3 = df[df['收货地址'].str.contains('广东省')]
df3 = df3.sort_values(by=['宝贝总数量'],ascending=False)
df3 = df3[['买家会员名','宝贝总数量','收货地址','宝贝标题']]
df3.columns = ['会员名','宝贝数量','地址','宝贝标题']

df3.to_excel('guangdong.xlsx',sheet_name='buyer',index=0 )


# In[33]:


import pandas as pd
df = pd.read_excel('mrbook.xlsx')
df['收益'] = df['定价'] * df['销量']
df1 = df.groupby('类别')['收益'].sum() #就是一组数据
#print(df1)
print(df1.idxmax(),df1.max())
print(df['收益'].mean())


# In[10]:


#折线图
import pandas as pd
import matplotlib.pyplot as plt
df1 = pd.read_excel('data.xls')
x1 = df1['姓名']
y1 = df1['语文']
y2 = df1['数学']
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['xtick.direction'] = 'out'
plt.rcParams['ytick.direction'] = 'in'
plt.title('成绩大比拼',fontsize = '18')
plt.plot(x1,y1,label='语文',color='r',marker='p',mfc='b',ms=8,alpha=0.9)
plt.plot(x1,y2,label='数学',color='b',marker='*',mfc='r',ms=8,alpha=0.9)
plt.grid(axis='y')
plt.ylabel('分数')
plt.xlabel('语文')
plt.yticks(range(50,150,10))
plt.legend(['语文','数学'])
plt.show()


# In[14]:


#柱形图
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_excel('data.xls')
plt.rcParams['font.sans-serif']=['SimHei']
x=df['姓名']
height = df['语文']
plt.grid(axis='y',which='major')
plt.xlabel('姓名')
plt.ylabel('语文成绩')
plt.title('xx班语文成绩柱状图')
plt.bar(x,height,width= 0.5,align='center',color='b',alpha=0.9)
for a, b in zip(x, height):
    plt.text(a,b,format(b,','), ha='center', va='bottom', fontsize=9,color='b')
plt.legend(['语文'])
plt.show()


# In[15]:


from bs4 import BeautifulSoup
from datetime import datetime
import os
import traceback
import sqlite3
import pandas as pd

#读取指定目录中的每个html文件
#解析，提取：标题、时间、作者、来源、正文
#保存在本地文件中

def get_news(htmlfile):
    #新闻信息字典
    news_dict = {}
    #打开并读取文件
    try:
        #打开文件
        f = open(htmlfile, encoding = 'utf-8')
        #读取文件
        fcontent = f.read()
        #转为DOM树
        soup = BeautifulSoup(fcontent, 'lxml')
        # 提取标题title。（数据清洗）
        #select 方法作用于h1.main-title获得的是一个列表，
        #我们事先确定main-title是独一无二的，因此，列表只有一个元素
        #因此，soup.select('h1.main-title')[0]拿到了带有标签信息的标签元素总体信息
        #我们要的是，实际内容，不要标签描述。采用.text获得。
        news_title = soup.select('h1.main-title')[0].text.strip()
        # 装进字典
        news_dict['title'] = news_title
        # 获取时间,soup.select('span.date')[0].text.strip()是字符串
        nt = datetime.strptime(soup.select('span.date')[0].text.strip(), '%Y年%m月%d日 %H:%M')
        news_time = datetime.strftime(nt, '%Y-%m-%d %H:%M')
        #装进字典
        news_dict['time'] = news_time
        #获取新闻源
        news_source = soup.select('.source')[0].text.strip()
        #装进字典
        news_dict['source'] = news_source
        #获取作者
        news_author = soup.select('.show_author')[0].text.strip()
        #装进字典
        news_dict['author'] = news_author
        #获取正文
        news_article  =soup.select('div#article > p')
        #news_article 存储的是一个包含若干个div标签的子标签p的列表(>是子标签）。
        # 我们要遍历这个列表，将每一个p中的信息提取出来，连接在一起，即为正文内容。
        news_article_text = ''
        for na in news_article:
            news_article_text += na.text.strip()
        # 装进字典
        news_dict['article'] = news_article_text
    except Exception as e:
        print('抓取出错，此条新闻略过')
        print(e)
        traceback.print_exc()
        return None
    #print(htmlfile)
    #print('time: %s title: %s author: %s source: %s' %(news_time,news_title,news_author,news_source))
    return news_dict

# 保存所有新闻内容 （保存正文）,将newslist中的正文，保存到filename
def save_text(filename, newslist):
    newstext = ''
    #neslist是一个字典列表，每个元素n都是一个新闻字典
    for n in newslist:
         newstext += n['article'] + '\r\n\n\n\n'
    #保存文件
    f = open(filename, 'wb')
    f.write(newstext.encode('utf-8'))
    f.close()

#创建数据库newsdb(格式：new.db), 创建表newstable(格式：news)
def createNewsDB(newsdb, newstable):
    #链接数据库，如果不存在，直接创建
    conn = sqlite3.connect(newsdb)
        # 建立游标
    cursor = conn.cursor()
        # 建立一个表newstable,按照新闻的各个属性建立字段。
    cursor.execute('create table if not exists "%s" (id int(10) primary key,title varchar(300), time datetime,source varchar(300),author varchar(200), article text)' % newstable)
    #关闭游标
    cursor.close()
    #提交操作、
    conn.commit()
        #关闭链接
    conn.close()
        
#保存新闻字典列表newslist到数据库newsdb的表newstable中。
def saveNewsToSqlite (newslist,newsdb,newstable):
    conn = sqlite3.connect(newsdb)
    cursor = conn.cursor()
    #利用select语句获取数据库表中的记录数
    cursor.execute('select count(*) from "%s"'%newstable)
    i = cursor.fetchone()[0]
    #设定了新加入的纪录id的起始位置。避免id重复的异常
    i += 1

    #遍历news list字典列表，针对每一个新闻，写入数据库表newstable中
    for sn in newslist:
        #打印出新闻到屏幕
        #print（sn）
        
        #当重复运行代码时，解析的新闻和原新闻重复，可以以title作为区分。
        sql = 'select * from "%s" where title = ?'%newstable
        
        cursor.execute(sql,(sn['title'],))
        if cursor.fetchone() == None:
            sql = 'insert into "%s"(id, title, time, source, author, article) values(?,?,?,?,?,?)' %newstable
            cursor.execute(sql,(i,sn['title'],sn['time'],sn['source'],sn['author'],sn['article']))
            i+=1
    cursor.close()
    conn.commit()
    conn.close()
 
#allnews是一个新闻字典列表
#allnews格式：[{:::},{:::},{:::}]

def saveToExcel(newsdictlist,filename):
    sourcenews = []
    id = 1
    for news in allnews:
        subnews = []
        subnews.append(id)
        subnews.append(news['title'])
        subnews.append(news['time'])
        subnews.append(news['source'])
        subnews.append(news['author'])
        subnews.append(news['article'])
        id += 1
        sourcenews.append(subnews)
    sn = pd.DataFrame(sourcenews,columns=['ID','标题','发表时间','来源','作者','正文'])
    sn.to_excel('news.xlsx',index=0)



if __name__ == '__main__':
    htmldir = "E:\\news"
    #遍历新闻文件夹
    fs = os.walk(htmldir)
    #用列表allnews来装载所有新闻字典
    allnews = []
    #遍历所有的目录和文件
    for d, s, fns in fs: #d是根目录 （str),s是子目录列表（list),fns是文件名列表（list)
        #遍历文件
        for fn in fns:
            singlenews = get_news(htmldir + os.sep + fn)
            #singlenews = get_news(os.path.abspath(fn))
            if singlenews != None:
                allnews.append(singlenews)
#     #将allnews保存至E:\news\news.txt中
#     save_text(r'E:\news\news.txt',allnews)
    
#     #将allnews写入数据库中（sqlite3）
#     createNewsDB('news.db', 'news')
#     saveNewsToSqlite(allnews, 'news.db', 'news')
    
    #保存至news.xlsx文件中
    saveToExcel(allnews, 'new.xlsx')
    


# In[20]:


import requests
import json
import pandas as pd
import re

url = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=13248209&score=0&sortType=5&page={pno}&pageSize=10&isShadowSku=0&rid=0&fold=1'
p = 0
allcomments = []
while True:
    u = url.format(pno = p)
    res = requests.get(u,headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.134 Safari/537.36 Edg/103.0.1264.71","Cookie": "unpl=JF8EAK1nNSttWhlWDU8AExMSGwldW1UISkRUaGcHVghcGFEAHAUYEUR7XlVdXhRKER9sYRRUWlNKVA4YBSsSEXteXVdZDEsWC2tXVgQFDQ8VXURJQlZAFDNVCV9dSRZRZjJWBFtdT1xWSAYYRRMfDlAKDlhCR1FpMjVkXlh7VAQrBxsVFUJcXV1fOHsQM19XA11YWENVNRoyGiJSHwFQV1sPTx9OamcCUVRZQlcHKwMrEQ; __jdv=76161171|baidu-pinzhuan|t_288551095_baidupinzhuan|cpc|0f3d30c8dba7459bb52f2eb5eba8ac7d_0_3c39e3903ae84910bf6133d5b446622e|1658908889396; __jdu=416480823; areaId=8; PCSYCityID=CN_210000_210200_0; shshshfpa=8125168a-1958-c33d-69ae-83161c4ee784-1658908890; shshshfpb=uZOqvDzgbDSK6x2okbKYvKQ; __jdc=122270672; __jda=122270672.416480823.1658908888.1658908888.1658908889.1; shshshfp=3d8e4c663816c79116aece7eb686717f; jsavif=0; token=c463a4813062c490afaeeee906d18da2,2,921616; __tk=furWNvbyxRx3zy1AfxNWevTP2CgpyyqCOEH3MYTYxAyp1DTCqtyRNvTENBq4hvt1OEIwN0Tr,2,921616; ip_cityCode=573; ipLoc-djd=8-573-577-46902; jwotest_product=99; __jdb=122270672.6.416480823|1.1658908889; shshshsID=8e9f037bcac7876a4403b514cd4ab121_6_1658909069000; 3AB9D23F7A4B3C9B=ZQSWFPEIQ25HBFL3TDDEOSB6KAUHMWAU6SCK6IIBEGNMNPB3TEQXPX2JW653NBLW3NC5XDLG7ZH2JQUSQ2AXGLYJE4; JSESSIONID=77FAD884E430C2A9F74EFC943B8944F3.s1"})
    comments = json.loads(res.text[len("fetchJSON_comment98("):-2])
    comments = comments['comments']
    
    pc = 0
    for c in comments:
        allcomments.append([c["creationTime"],c["content"]])
        pc += 1
    if pc < 10:
        break
    p += 1
df = pd.DataFrame(allcomments, columns=['时间','评论内容'])
df.to_excel('京东评论.xlsx',index=True)


    


# In[ ]:




