#-*- coding:utf-8 -*-
import demjson
from fastapi import FastAPI
from pybloom_live import ScalableBloomFilter, BloomFilter
from datetime import datetime
bloom = ScalableBloomFilter(initial_capacity=1000)
from multiprocessing import Process #用多进程

import psycopg2

app = FastAPI(debug=True)
cylinder = []
baidu_cylinder = []
templation = {'type':'','content':''}
query_record = {}
#type有normal和search两种类型
limitation = 200
timestamp_start = datetime.now()



# -- read config --
f = open("./../config/config.json", "r")  # 取上一级的config.json
js = demjson.decode(f.read())
f.close()
host = js["Spider"]["host"]
port = js["Spider"]["port"]
root = js["Spider"]["root"]
password = js["Spider"]["password"]
database = js["Spider"]["db"]
# -- end of read config --
# -- postgres --
def postgresql_initation():  # 这边是postgres的版本
    global mysql, cursor
    mysql = psycopg2.connect(
        host=host, port=int(port), user=root, password=password, database=database
    )
    cursor = mysql.cursor()

    while False:
        try:
            mysql = psycopg2.connect(
                host=host,
                port=int(port),
                user=root,
                password=password,
                database=database,
            )
        except:
            time.sleep(1)
            continue
        break
    # cursor = mysql.cursor()

# -- end postgres -- 

#baidu-cds的思路就是将关键词从百度获取要爬虫的消息，然后放进cds待爬虫队列里面爬虫，并且标注为baidu出来的网址，给每个权值+20分x
@app.post('/get')
async def get():
    global cylinder
    #默认丢四个地址回去，以防不够
    ret = []
    #如果内存过小的情况下，就自动将amount调大，limit调小
    amount = 4
    for i in range(amount):
        if cylinder != []:
            ret.append(cylinder.pop())
    return ret


@app.post('/set')
async def setting(*,url,typ):
    global cylinder
    if len(cylinder)>=limitation:
        return 
    if url in bloom:
        print('bloom filter responsed!')
        return #直接在这边加过滤器
    cylinder.insert(0,{'typ':typ,'content':url})
    bloom.add(url)
    
    
    #cylinder = list(set(cylinder))
    pass

@app.post('/del')
async def delete():
    global cylinder
    cylinder = []
    return
@app.post('/just_get')
async def justget():
    global cylinder
    #默认丢四个地址回去，以防不够
    ret = []
    #如果内存过小的情况下，就自动将amount调大，limit调小
    amount = 4
    for i in range(amount):
        if i<len(cylinder):
            ret.insert(0,cylinder[i])
    return ret
@app.post('/filter_set')
async def baiduset(*,url):
    bloom.add(url)


@app.post('/filter_contain')
async def baidudel(*,url):
    
    if url in bloom:
        return '1'
    else:
        return '0'

@app.get('/save')
async def save():
    f = open('bloom_temp.bin','wb')
    bloom.tofile(f)
    f.close()
    return '{}'
@app.get('/read')
async def read():
    f = open('bloom_temp.bin','rb')
    bloom = bloom.fromfile(f)
    f.close()
    return '{}'

#port = 1278

#这边最新版本实现了一个分布式锁，依然存在内存中，savetimer会定期备份分布式锁的内存，防止出事，对内存要求更高了
@app.get('/sqlget')
async def sqlget():
    
    pass

@app.post('/set_record')
async def setrecord(*,name,amount,content):
    global query_record
    if (datetime.now() - timestamp_start).days > 1:
        query_record = {}
    query_record[name] = {amount:content}
    return 'YES'

@app.post('/get_record')
async def getrecord(*,name,amount):
    
    if name in query_record:
        if amount in query_record[name]:
            return (query_record[name][amount])
    return {}

# initialization
# 从数据库读取信息，然后全部加进bloom filter
print('inputting the data from database into bloomfilter')
postgresql_initation()
cursor.execute('select url from content')
urls = cursor.fetchall()
for url in urls:
    print(url)
    bloom.add(url[0])
print('done')