import re 
import urllib.request
from bs4 import BeautifulSoup
import pymongo
import unittest
import re
import json
import datetime
from time import strftime
import pandas as pd
#from unittestbasic1.unittest_operation import *
#后续加入可视化点击，自动生成配置文件
#每个网站的收集配置
#定义规则类及其属性\n",
df = pd.read_excel('/home/scrapy_config/jianbao.xlsx')
db_all=df['db_all'][0]

class Rule:
    def test():
        print("init Rule")
class Rulemanager:
    def __init__(self):
        self.rule = Rule()
    def manager(self,string):
        self.json_str = json.loads(string)
        for k,v in self.json_str.items():
            setattr(self.rule, k, v)
#后续扩展
url = "http://www.eastmountyxz.com/"
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
#定义数据库对象\n",
class DB:
    def __init__(self,ip,port):
        self.myclinet = pymongo.MongoClient('mongodb://'+str(ip)+":"+str(port)+"/")
    def createdb(self,string):
        self.mydb =self.myclinet[string]
    def setcollection(self,coltring):
        self.collection = self.mydb[coltring]
    def insert(self,string):
        self.collection.insert_one(string)
    def find(self,string):
        self.collection.find(string)
    def delete(self,string):
        self.collection.delete_many(string)

class DB_test(unittest.TestCase):
    @classmethod
    def setUpClass(self):
        self.db = DB("localhost","27017")
    def test_db(self):
        self.db.createdb('dhs')
        self.db.setcollection('dhs')
        self.db.insert({"ddd":"dddd"})
        self.assertIn("dhs",self.db.myclinet.list_database_names()) 
    def test_sql(self):
        self.assertIn("dhs",self.db.mydb.list_collection_names())
    def test_find(self):
        print(self.db.find({"ddd":"dddd"}))
    def test_delte(self):
        self.db.delete({"ddd":"dddd"})
        asss = self.db.collection.find()
        for a in asss:
            print("a")
            print(a)
        self.assertIsNotNone(asss)
#编写find_all参数匹配函数findAll(tag, attributes, recursive, text, limit, keywords)
def find_all(soup,search_fule):
    istext =search_fule[0]
    if istext=='text':
        return soup.find_all(text=search_fule[1])
    if istext=='name':
        if (type(search_fule[2]['class']) ==float):
            return soup.find_all(search_fule[1],recursive=search_fule[3],limit=search_fule[4])
        return soup.find_all(search_fule[1],search_fule[2],recursive=search_fule[3],limit=search_fule[4])
    if istext=='id':
        return soup.find_all(id=search_fule[1])
#单元信息查找匹配代码,通过二维re表达式实现
def lookcup(tag,pattern):
    matches=""
    for index, strring in enumerate(tag):
        matche = re.findall(pattern[:,index], string)
        matches = matches+matche
    return matches

#数据库操作类
class Dbmanager:
    #获取数据库配置
    def __init__(self):
         self.dbclient= DB("192.168.50.101","27017") 
    def getrules(self):
        self.dbclient.createdb(db_all)
        collection =self. dbclient.setcollection('rule')
        rule_data = self. dbclient.collection.find()
        return rule_data
    #入库代码
    def storage(self,tag,pattern,r_url):
        print(pattern[0])
        print(pattern[1])
        now=datetime.datetime.now()
        self.dbclient.createdb(pattern[0])
        if pattern[1] is None:
            self.dbclient.setcollection("co"+now.strftime("%Y%m%d")+"co")
        if not pattern[1] is None:
            self.dbclient.setcollection(pattern[1])
        self.dbclient.insert({'time':now.strftime("%Y%m%d%H%M%S"),'str':tag,'url':r_url})
        print('后续补充')
# 爬虫函数，函数一次浏览，传入地址，浏览器头，规则对象\n",
def crawl(dbmanager):
    rule_data =dbmanager.getrules()
    rulemanager = Rulemanager()
    for data in rule_data:
        data.pop('_id', None)
        print(data)
        rulemanager.manager(json.dumps(data))
        rule = rulemanager.rule
        page = urllib.request.Request(rule.url, headers=rule.header)
        try:
            page = urllib.request.urlopen(page)
        except Exception:
        # 打开文件并写入内容
            file = open("/home/scrapy_config/error.txt", "a") # 第二个参数"w"表示以写入模式打开文件
            file.write(str(rule.url)+"：："+"HTTPError: HTTP Error 404: Not Found"+"\n") # 将字符串写入文件
            file.close() # 关闭文件
            print(str(rule.url))
            print("HTTPError: HTTP Error 404: Not Found")
            continue
        contents = page.read().decode(rule.character)
        if(rule.page_type=="html"):
            soup = BeautifulSoup(contents, "html.parser")
            print("爬取:"+str(rule.url))
        print("search_file"+str(type(rule.search_fule[2]['class'])))
        essay0 = find_all(soup,rule.search_fule)
        #改变入库匹配，改为先入库，出库再进行数据整理
        #content = lookup(essay0,rule.find_rule)
        content = str(essay0)
        dbmanager.storage(content,rule.save_rule,rule.url)
    
#定义收集类
class Crawl():
    def __init__(self,dbmanager):
        self.dbmanager = dbmanager
    def start(self):
        crawl(self.dbmanager)
              
# 主函数\n"
if __name__ == '__main__':
    #unittest.main(argv=['first-arg-is-ignored'], exit=False)
    #crawl(url, headers)
    #example
    dbmanager = Dbmanager()
    cr = Crawl(dbmanager)
    cr.start()