import requests
from bs4 import BeautifulSoup
from lxml import etree
import re
import time
import csv
import pandas as pd
import json

  # bd_bj = etree.HTML(r.text)
  # bd_html = etree.tostring(bd_bj).decode("utf-8")

def getrequest(url):
   try:
    r = requests.get(url, timeout=10)
    r.raise_for_status()
    r.encoding =r.apparent_encoding
    return r
   except:
    print("爬取失败")

def save(data,filename):
  # 写入数组到txt文件
  with open('./sdataset/' +filename+'.csv', 'a', encoding='utf_8_sig', newline='') as file:
   
    # 写入数据
    writer = csv.writer(file)
    # writer.writerows(res_arr)
    for item in data:
      writer.writerow(item.values())  # 写入数据行
          



if __name__ == '__main__':
  # url = 'http://www.gxhospital.com/advice_medical/'
  url_pre = 'http://www.gxhospital.com'
  # r = getrequest(url)
  # # print(r.text[:1000])
  # soup = BeautifulSoup(r.text, 'html.parser')
  # paragraphs  = soup.find("div", id="department-tab")
  # ul = soup.find("ul", class_="department-select")
  # a_arr = soup.select('.department-select a')
  # # print(a_arr)
  # for a in a_arr:   
  #   print(a)
  #   #获取每个科室的询问信息
  #   href = a.get('href')
  #   href = '/advice_medical/1/dp/400/ty/0/'
  #   eve = getrequest(url_pre+href)
  #   soup1 = BeautifulSoup(eve.text, 'html.parser')
  #   page = soup1.select('.f1')
  #   print(page)

  typeid = '738' #每个科室的id
  tmpid = 28  #如果中断 下一页的id
  href = '/advice_medical/'+str(tmpid)+'/dp/'+ typeid +'/ty/1/'
  eve = getrequest(url_pre+href)
  soup1 = BeautifulSoup(eve.text, 'html.parser')
  div = soup1.find("div", class_="pager mt25")
  page = div.find_all('a',class_='cur')
  if len(page) >0 :
    size = page[0].get('attr-pages')
    print(size)
  else:
    size = 0
  count = tmpid
  try:
    for p in range(tmpid,int(size)+1):
      count = count +1
      col_li_list = soup1.find_all('li',class_='column-tit')
      res_arr = []
      # print(col_li_list)
      for col_li in col_li_list:
        # print(col_li)
        col_time = col_li.find('span',class_='col-time completed')
        col_time_text = col_time.get_text()
  
        col_cat = col_li.find('a',class_='col-cat')
        col_cat_text = col_cat.get_text()  #心血管内科
        col_cat_text = col_cat_text.replace('[','').replace(']','')
        pattern = re.compile('/dynamic/guestbook_show/*')
        col_a = col_li.find('a',href=pattern)
        col_a_href = col_a.get('href')
        col_a_text = col_a.get_text()
        # col_state_no = col_li.find('span',class_='col_state')
        col_state_yes = col_li.find('span',class_='col-state completed')
        if col_state_yes !='None':
          #已回复，点击详情查看回复
          col_eve = getrequest(url_pre+col_a_href)
          soup2 = BeautifulSoup(col_eve.text, 'html.parser')
          content = soup2.select('.ask-content p')
          replay = soup2.select('.replay-info p')
          
          
          if(len(content)>0):
            text = content[0].get_text()
            if(len(text)>30):continue
            # print(text)
          if (len(replay)>0):
            rept = replay[0].get_text()
            # print(rept)
          res_data ={"time":col_time_text,"type":col_cat_text,"content":text,"reply":rept}
          print(res_data)
          res_arr.append(res_data)
      #一页保存一次
      print(count)
      save(res_arr,col_cat_text)
      #访问下一页
      next_url='/advice_medical/'+ str(p+1) +'/dp/'+ typeid +'/ty/1/'
      print(next_url)
      next_html = getrequest(url_pre+next_url)
      soup1 = BeautifulSoup(next_html.text, 'html.parser')
      # break
      time.sleep(1)
  except Exception as e:
    print("失败" , e)

  

    

      
      

