import pandas as pd
import urllib.request
from bs4 import BeautifulSoup
import pymongo
import unittest
import re
import json
import datetime
from time import strftime
import requests
# 读取excel文件
df = pd.read_excel('/home/scrapy_config/jianbao.xlsx')
urls=df['ad']
db_all_string=df['db_all'][0]
db_string=df['db'][0]
print(db_all_string)
print(db_string)
single = df['single']
title =df['p4']
content =df['p5']
tourl=df['p6']
trans=df['trans']
url_name=df['name']
get_rules =[]
for index,ti in enumerate(title):
    get_rule={
        'name':url_name[index],
        'url':urls[index],
        'title':ti,
        'content':content[index],
        'tourl':tourl[index],
        'single':single[index],
        'trans':trans[index]
    }
    if (not type(get_rule['title']) ==float) :
        get_rules.append(get_rule)
print(get_rules)

def find_get_rule(get_rules,url):
    for  get_rule  in get_rules:
        if url == get_rule['url']:
            return get_rule
    return None

now=datetime.datetime.now()
collectionname = "co"+now.strftime("%Y%m%d")+"co"
ip ='192.168.50.101'
port='27017'
client = pymongo.MongoClient('mongodb://'+str(ip)+":"+str(port)+"/")
db=client[db_all_string]
collection = db[collectionname]
allstring= collection.find()
#御用信息入库
db_dhs=client[db_string]
collection_dhs = db_dhs[collectionname]

def testprint(url_a,string_s,onestring):
    if url_a==string_s:
        print(onestring)    
for onestring in allstring:
    url_a = onestring['url']
    url_count=0
    url_name_dd=" "
    get_rule_a = find_get_rule(get_rules,url_a)
    if not   get_rule_a    is None:
        url_name_dd=get_rule_a['name']
    if not   get_rule_a    is None:
        soup = BeautifulSoup(onestring['str'], "html.parser")
        #testprint(url_a,'https://www.pingwest.com/tag/%E7%A1%85%E6%98%9F%E4%BA%BA',onestring['str'])
        single_all = soup.select(get_rule_a['single'])
        #testprint(url_a,'https://www.pingwest.com/tag/%E7%A1%85%E6%98%9F%E4%BA%BA',single_all) 
        #testprint(url_a,'https://www.iimedia.cn/c1040',single_all)     
        for single_a in single_all:
            if (not single_a    is None) and (not len(single_a) ==0):
                tile_s=''
                content_s=''
                tourl_s=''
                try:
                    soup_s = BeautifulSoup(str(single_a), "html.parser") 
                    tile_a =  soup_s.select(get_rule_a['title'])
                    #testprint(url_a,'https://www.pingwest.com/tag/%E7%A1%85%E6%98%9F%E4%BA%BA',tile_a)
                    if (not tile_a    is None)  and (not len(tile_a) ==0):
                        tile_s = tile_a[0].text
                    content_a =  soup_s.select(get_rule_a['content'])
                    if (not content_a    is None)  and (not len(content_a) ==0):
                        content_s =content_a [0].text
                    tourl_a =  soup_s.select(get_rule_a['tourl'])
                    if (not tourl_a    is None)  and (not len(tourl_a) ==0):
                        tourl_s=tourl_a[0].get('href')
                    if   (not tile_s    is None)  and (not len(tile_s) ==0):
                        stringone = {'title':str(tile_s),'content ':str(content_s),'url':str(tourl_s),'trans':get_rule_a['trans']}
                        #testprint(url_a,'https://www.pingwest.com/tag/%E7%A1%85%E6%98%9F%E4%BA%BA',stringone)
                        collection_dhs.update_one({'title': str(tile_s)}, {'$set': stringone}, True)
                        url_count=url_count+1
                except Exception as e:
                    file = open("/home/scrapy_config/error.txt", "a") # 第二个参数"w"表示以写入模式打开文件
                    file.write(url_name_dd+"    "+"：："+str(e)+"\n") # 将字符串写入文件
                    file.close() # 关闭文件
                    print(str(e))
                    print(str(single_a))
                    print(get_rule_a['title'])
    file = open("/home/scrapy_config/error.txt", "a") # 第二个参数"w"表示以写入模式打开文件
    file.write(url_name_dd+"    "+url_a+"：："+str(url_count)+"\n") # 将字符串写入文件
    file.close() # 关闭文件