#!/usr/bin/env python
# coding: utf-8

# In[1]:


import sqlite3
conn = sqlite3.connect('students.db')
cursor = conn.cursor()
#if not exists 如果有重复，就不再多创建
cursor.execute('create table if not exists user (id int(10) primary key, name varchar(20))') 
cursor.close()
conn.commit()
conn.close()


# In[2]:


#插入数据
import sqlite3
conn = sqlite3.connect('students.db')
cursor = conn.cursor()
cursor.execute('insert into user (id, name) values ("1", "Eva")')
sql = 'insert into user (id, name) values ("2", "Oliver")'
cursor.execute(sql)
cursor.execute('insert into user (id, name) values("3", "Nico")')
cursor.close()
conn.commit()
conn.close()


# In[3]:


#插入数据
import sqlite3
conn = sqlite3.connect('students.db')
cursor = conn.cursor()
sql = "insert into user (id, name) values ('%s', '%s')" % ('4', 'Taryna')
cursor.execute(sql)
cursor.close()
conn.commit()
conn.close()


# In[4]:


#查询用户数据，cursor游标·
import sqlite3
conn = sqlite3.connect('students.db')
cursor = conn.cursor()
sql = 'select * from user'
cursor.execute(sql)
print(cursor.fetchone()) #一个一个拿出来
print(cursor.fetchone())
cursor.close()
conn.commit()
conn.close()


# In[7]:


import sqlite3
conn = sqlite3.connect('students.db')
cursor = conn.cursor()
sql = 'select * from user'
cursor.execute(sql)
dataset = cursor.fetchall() 
cursor.close()
conn.commit()
conn.close()

#把数据输出梳理一下
for data in dataset:
    print('id:',data[0],'name:',data[1])


# In[10]:


#select语句的用法
import sqlite3
conn = sqlite3.connect('students.db')
cursor = conn.cursor()
stuName = input('please enter a name:')
sql = 'select * from user where name = "%s"' % stuName
cursor.execute(sql)
dataset = cursor.fetchall() 
print(dataset)
cursor.close()
conn.commit()
conn.close()


# In[ ]:


#注入攻击,
import sqlite3
conn = sqlite3.connect('students.db')
cursor = conn.cursor()
stuName = input('please enter a name:')
sql = 'select * from user where name = "%s"' % stuName
# select * from user where name = "Nico" or 1=1 or ""
cursor.execute(sql)
dataset = cursor.fetchall() 
print(dataset)
cursor.close()
conn.commit()
conn.close()


# In[13]:


#注入攻击,解决方法1：正则表达式对输入进行预处理
import sqlite3
import re
conn = sqlite3.connect('students.db')
cursor = conn.cursor()
stuName = input('please enter a name:')
match = re.search('\"', stuName)
if match != None:
    print('输出错误')
else:
    stuName = stuName.strip()
    sql = 'select * from user where name = "%s"' % stuName
    # select * from user where name = "Nico" or 1=1 or ""
    cursor.execute(sql)
    dataset = cursor.fetchall() 
    print(dataset)
    cursor.close()
    conn.commit()
    conn.close()


# In[15]:


#注入攻击,解决方法2：用sql自己的占位符（通配符）？
import sqlite3
conn = sqlite3.connect('students.db')
cursor = conn.cursor()
stuName = input('please enter a name:')
sql = 'select * from user where name = ?' #占位符将对你的输入做类型检查
# select * from user where name = "Nico" or 1=1 or ""
cursor.execute(sql,(stuName,)) #元组的写法（1，）
dataset = cursor.fetchall() 
print(dataset)
cursor.close()
conn.commit()
conn.close()


# In[16]:


#插入数据，用通配符？,这是常用方法
import sqlite3
conn = sqlite3.connect('students.db')
cursor = conn.cursor()
sql = "insert into user (id, name) values (?, ?)" 
cursor.execute(sql,("5", "Eve"))
cursor.close()
conn.commit()
conn.close()


# In[17]:


#插入数据，用通配符？,execute
import sqlite3
conn = sqlite3.connect('students.db')
cursor = conn.cursor()
sql = "insert into user (id, name) values (?, ?)" 
cursor.executemany(sql,[('6','Frank'),('7','Alex'),('8','Vivi')])

cursor.close()
conn.commit()
conn.close()


# In[18]:


#修改（更新）
import sqlite3
conn = sqlite3.connect('students.db')
cursor = conn.cursor()
sql = 'update user set name = ? where id = ?'
cursor.execute(sql, ('Sunny', '4'))
cursor.close()
conn.commit()
conn.close()


# In[22]:


#删除
import sqlite3
conn = sqlite3.connect('students.db')
cursor = conn.cursor()
#删除一条记录
sql = 'delete from user where id = ?'
#删除所有记录
sql = 'delete from user'
cursor.execute(sql)
cursor.close()
conn.commit()
conn.close()


# In[3]:


import pymysql

db = pymysql.connect(host = "localhost", user='root', password="root", database="firstdb")
cursor = db.cursor()
cursor.execute('SELECT VERSION()')
data = cursor.fetchone()
print("Database version:",data)
cursor.close()
db.close()


# In[8]:


#创建表
import pymysql
db = pymysql.connect(host = "localhost", user='root', password="root", database="firstdb")
cursor = db.cursor()
#避免重复使用该程序创建，造成语法错误，如果存在，则先删除表
cursor.execute("drop table if exists books")
#创建表
sql = '''
create table books(
    id int(8) not null auto_increment,
    name varchar(50) not null,
    category varchar(50) not null,
    price decimal(10,2) default null,
    publish_time date default null,
    primary key(id)
) engine = myisam auto_increment=1 default charset=utf8;
'''
cursor.execute(sql)
cursor.close()
db.close()


# In[9]:


import pymysql
db = pymysql.connect(host = "localhost", user='root', password="root", database="firstdb")
cursor = db.cursor()

data = [('第一本书', '鸡汤', '80', '2018-1-25'),
       ('第二本书', '毒鸡汤', '100.54', '2022-7-25')]
try:
    cursor.executemany("insert into books (name, category, price, publish_time) values (%s, %s, %s, %s)", data)
    db.commit()
except:
    db.rollback()
db.close()


# In[2]:


from bs4 import BeautifulSoup
from datetime import datetime
import os
import traceback
import sqlite3

#读取指定目录中的每个html文件
#解析，提取：标题、时间、作者、来源、正文
#保存在本地文件中

def get_news(htmlfile):
    #新闻信息字典
    news_dict = {}
    #打开并读取文件
    try:
        #打开文件
        f = open(htmlfile, encoding = 'utf-8')
        #读取文件
        fcontent = f.read()
        #转为DOM树
        soup = BeautifulSoup(fcontent, 'lxml')
        # 提取标题title。（数据清洗）
        #select 方法作用于h1.main-title获得的是一个列表，
        #我们事先确定main-title是独一无二的，因此，列表只有一个元素
        #因此，soup.select('h1.main-title')[0]拿到了带有标签信息的标签元素总体信息
        #我们要的是，实际内容，不要标签描述。采用.text获得。
        news_title = soup.select('h1.main-title')[0].text.strip()
        # 装进字典
        news_dict['title'] = news_title
        # 获取时间,soup.select('span.date')[0].text.strip()是字符串
        nt = datetime.strptime(soup.select('span.date')[0].text.strip(), '%Y年%m月%d日 %H:%M')
        news_time = datetime.strftime(nt, '%Y-%m-%d %H:%M')
        #装进字典
        news_dict['time'] = news_time
        #获取新闻源
        news_source = soup.select('.source')[0].text.strip()
        #装进字典
        news_dict['source'] = news_source
        #获取作者
        news_author = soup.select('.show_author')[0].text.strip()
        #装进字典
        news_dict['author'] = news_author
        #获取正文
        news_article  =soup.select('div#article > p')
        #news_article 存储的是一个包含若干个div标签的子标签p的列表(>是子标签）。
        # 我们要遍历这个列表，将每一个p中的信息提取出来，连接在一起，即为正文内容。
        news_article_text = ''
        for na in news_article:
            news_article_text += na.text.strip()
        # 装进字典
        news_dict['article'] = news_article_text
    except Exception as e:
        print('抓取出错，此条新闻略过')
        print(e)
        traceback.print_exc()
        return None
    #print(htmlfile)
    #print('time: %s title: %s author: %s source: %s' %(news_time,news_title,news_author,news_source))
    return news_dict

# 保存所有新闻内容 （保存正文）,将newslist中的正文，保存到filename
def save_text(filename, newslist):
    newstext = ''
    #neslist是一个字典列表，每个元素n都是一个新闻字典
    for n in newslist:
         newstext += n['article'] + '\r\n\n\n\n'
    #保存文件
    f = open(filename, 'wb')
    f.write(newstext.encode('utf-8'))
    f.close()

#创建数据库newsdb(格式：new.db), 创建表newstable(格式：news)
def createNewsDB(newsdb, newstable):
    #链接数据库，如果不存在，直接创建
    conn = sqlite3.connect(newsdb)
        # 建立游标
    cursor = conn.cursor()
        # 建立一个表newstable,按照新闻的各个属性建立字段。
    cursor.execute('create table if not exists "%s" (id int(10) primary key,title varchar(300), time datetime,source varchar(300),author varchar(200), article text)' % newstable)
    #关闭游标
    cursor.close()
    #提交操作、
    conn.commit()
        #关闭链接
    conn.close()
        
#保存新闻字典列表newslist到数据库newsdb的表newstable中。
def saveNewsToSqlite (newslist,newsdb,newstable):
    conn = sqlite3.connect(newsdb)
    cursor = conn.cursor()
    #利用select语句获取数据库表中的记录数
    cursor.execute('select count(*) from "%s"'%newstable)
    i = cursor.fetchone()[0]
    #设定了新加入的纪录id的起始位置。避免id重复的异常
    i += 1

    #遍历news list字典列表，针对每一个新闻，写入数据库表newstable中
    for sn in newslist:
        #打印出新闻到屏幕
        #print（sn）
        
        #当重复运行代码时，解析的新闻和原新闻重复，可以以title作为区分。
        sql = 'select * from "%s" where title = ?'%newstable
        
        cursor.execute(sql,(sn['title'],))
        if cursor.fetchone() == None:
            sql = 'insert into "%s"(id, title, time, source, author, article) values(?,?,?,?,?,?)' %newstable
            cursor.execute(sql,(i,sn['title'],sn['time'],sn['source'],sn['author'],sn['article']))
            i+=1
    cursor.close()
    conn.commit()
    conn.close()
    
if __name__ == '__main__':
    htmldir = "E:\\news"
    #遍历新闻文件夹
    fs = os.walk(htmldir)
    #用列表allnews来装载所有新闻字典
    allnews = []
    #遍历所有的目录和文件
    for d, s, fns in fs: #d是根目录 （str),s是子目录列表（list),fns是文件名列表（list)
        #遍历文件
        for fn in fns:
            singlenews = get_news(htmldir + os.sep + fn)
            #singlenews = get_news(os.path.abspath(fn))
            if singlenews != None:
                allnews.append(singlenews)
    #将allnews保存至E:\news\news.txt中
    save_text(r'E:\news\news.txt',allnews)
    
    #将allnews写入数据库中（sqlite3）
    createNewsDB('news.db', 'news')
    saveNewsToSqlite(allnews, 'news.db', 'news')


# In[1]:


import pandas as pd
pd.set_option('display.unicode.east_asian_width', True)
pd.set_option('display.max_rows', 1000)
pd.set_option('display.max_columns', 1000)
df = pd.read_excel('data.xlsx')

print(df)


# In[11]:


import pandas as pd
s1 = pd.Series([88,60,75], index=[1,2,3])
s2 = pd.Series([88,60,75], index=['AAA','BBB','CCC'])
#print(s1[1])
#print(s2[['AAA','BBB']])
print(s2['AAA':'CCC'])
print(s1.index)
print(s2.values)
#print(s2)


# In[12]:


import pandas as pd
pd.set_option('display.unicode.east_asian_width', True) #解决列名对齐问题
data = [[110,105,99],[105,88,115],[109,120,130]]
index = [0,1,2]
columns = ['语文','数学','英语']
df = pd.DataFrame(data= data,index= index,columns= columns)
print(df)


# In[16]:


for col in df.columns:
    series = df[col]
    print(series)


# In[20]:


import pandas as pd
pd.set_option('display.unicode.east_asian_width', True)
df = pd.DataFrame({'语文':[1,2,3],'数学':[4,5,6],'英语':[7,8,9],'班级':'高一2班'}) #用字典
print(df)
print(df.mean())


# In[22]:


import pandas as pd
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel('1月.xlsx',sheet_name='莫寒')
print(df.head())


# In[29]:


import pandas as pd
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel('1月.xlsx',index_col = 0)  #列索引
print(df.head())


# In[30]:


import pandas as pd
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel('1月.xlsx')
print(df.head())
df = pd.read_excel('1月.xlsx',header= 1) #header=1 ，把第1行作为列索引
print(df.head())
df = pd.read_excel('1月.xlsx',header= None) #header=None，第1行数据不是表头，
print(df.head())


# In[34]:


import pandas as pd
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel('1月.xlsx')
print(df.head())
df = pd.read_excel('1月.xlsx',usecols=[1,3])#选第一列，和第三列
print(df.head())
df = pd.read_excel('1月.xlsx',usecols=['买家会员名','收货人姓名'])#选第一列，和第三列
print(df.head())


# In[39]:


import pandas as pd
pd.set_option('display.unicode.east_asian_width', True)
df = pd.read_excel('1月.xlsx')
print(df.head())
df = pd.read_excel('1月.xlsx',header = 0, usecols = [0,3])
print(df.head())
print(df['买家会员名'].head())
print(df.loc[3]) #loc是行


# In[40]:


import pandas as pd
pd.set_option('display.unicode.east_asian_width', True)
pd.set_option('display.max_rows', 1000)
pd.set_option('display.max_columns', 1000)
df = pd.read_csv('1月.csv', encoding = 'gbk')
print(df.head())


# In[41]:


import pandas as pd

df = pd.read_csv('1月.txt',sep = '\t',encoding='gbk') #用tab键来分隔
print(df)


# In[44]:


import pandas as pd
df = pd.DataFrame()
url_list = ['http://www.espn.com/nba/salaries/_/seasontype/4']
for i in range(2, 13):
    url = 'http://www.espn.com/nba/salaries/_/page/%s/seasontype/4' % i
    url_list.append(url)
for url in url_list:
    df = df.append(pd.read_html(url), ignore_index=True)

df = df[[x.startswith('$') for x in df[3]]]
print(df)
df.to_csv('NBAplayer.csv', header = ['RK','name','team','salary'], index=False)


# In[ ]:




