
#coding=utf-8
import sqlite3
import os.path
from datetime import datetime
# 批量查询大小
batch_size = 1000
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
db_path = os.path.join(BASE_DIR, "allclean.db")

newAll = []

def changeTuple(c):
    # print (type(list(c)))
    print(list(c)[1])

    # 处理每个单条数据 符合条件就放入all
    if(True):
        # 把单条数据放入all
        global newAll
        newAll.append(list(c))
    # return list(c)
def cursor_query():
    # 使用数据库连接池，使用普通的连接方法运行貌似也会内存飙升，因此改为了连接池
    # simple_conn_pool = psycopg2.pool.SimpleConnectionPool(minconn=1, maxconn=5, database="dbname", user="username",
    #                                                       password="123456", host="172.0.0.1", port="5432")
    # 从数据库连接池获取连接
    conn = sqlite3.connect(db_path)
    # 自动提交事务设为false
    # conn.autocommit = False
    # 创建游标,这里传入name参数，会返回一个服务端游标否则返回的是客户端游标
    cursor = conn.cursor()
    # 首先查询全量数据
    cursor.execute('select * from lyc')
    count = 0
    # 开始时间
    start_time = datetime.now()
    
    while True:
        count = count + 1
        # 每次获取时会从上次游标的位置开始移动size个位置，返回size条数据
        data = cursor.fetchmany(batch_size)
        
        # 数据为空的时候中断循环
        if not data:
            break
        else:
            # 用changeTuple方法把搜索出来的数据data处理
            cc = map(changeTuple,data)
            # print(type(list(data[0])))
            # cc = map(changeTuple,data)
            print (len(list(cc)))
            # all.append(cc)
        # print('all has -----' % ((count - 1) * batch_size, count * batch_size))
        global newAll
        print('newAll[] has %s' % len(newAll))
    print('fetchmany获取全量数据所用时间:', (datetime.now() - start_time).seconds)
cursor_query()
