from urllib.parse import quote_plus
from urllib.request import urlopen
from urllib.error import URLError
from urllib.error import HTTPError
from bs4 import BeautifulSoup
from http.client import HTTPException

import requests
import time
import sys
import init
#import json

def initFile(fileName):
    try:
        file = open('./' + fileName , 'r')
        kwList = file.read()
    except FileNotFoundError as e:
        print("No such file \"%s\""% fileName + ",please check it and retry.")
        sys.exit(0)
    kwList = kwList.splitlines()
    return kwList
    print("Read file completed,%3d"% len(kwList)+" key words:")
    for i in range(0,len(kwList)):
        print("%14s"% kwList[i],end="")
        if i == len(kwList) - 1:
            print("\n")

def search_keyword(keyword):
    surl = 'http://baike.baidu.com/api/openapi/BaikeLemmaCardApi?scope=103&format=json' \
           '&appid=379020&bk_key={keyword}&bk_length=600'.format(keyword=quote_plus(keyword))
    re_data = requests.get(surl).json()
    if not re_data:
        return False
    #re_data = json.dumps(re_data)
    print(re_data)
    #save(re_data)

'''
def crawl(kw):
    varibles.kw = kw
    #保存记录条数
    savedRecords = 0
    spendTime = 0
    saveCrawl(kw, spendTime , savedRecords)
    #此次爬取的开始时间
    startTime = (int)(time.time())
    quoted_kw = quote_plus(kw)
    searchUrlCreator(quoted_kw,1)
    getRecordAmount()
    print(time.strftime("[%Y/%m/%d %H:%M:%S] "), end="")
    print("Keyword is \"%s\""%kw + ",found %d Rcds ," % varibles.recordAmount, end="")
    #每页有10条记录，爬取100页
    for i in range(1,100):
        #爬取数量大于记录总数时退出循环
        if savedRecords > varibles.recordAmount:
            break
        searchUrlCreator(quoted_kw,i)
        getUrl()
        for j in range(0,10):
            id = varibles.recordUrlList[j][varibles.recordUrlList[j].rfind("/") + 1:]
            if bloomCheck(id) == False:
                addIdToFilter(id)                
                url = varibles.recordUrlList[j]
                #获取内容的关键词、领域和相关文献的标题、Url
                getContent_Relative_IndexTerm_Field(url,j)
                #获取作者、来源、发表日期和类别
                getTitle_Author_Source_Date_Category(j)
                #获取下载次数和引用次数
                getDownload_CitedTimes(j)
                #保存每条记录信息
                saveRecord(j, kw)
                #计数
                savedRecords = savedRecords + 1
                #爬取数量大于记录总数时退出循环
                if savedRecords == varibles.recordAmount:
                    break
            else:
                continue
    #此次爬取的结束时间
    endTime = (int)(time.time())
    spendTime = endTime - startTime
    #保存此次爬取的信息
    updateCrawl(kw , spendTime , savedRecords)
    print("cost %ds,"% spendTime + "%d Rcds saved."% savedRecords)'''
    
