Python爬虫实战(十一):两种简单的方法爬取动态网页

#一网页POST方式

#coding=utf-8  
  
import requests
from bs4 import Tag
from bs4 import BeautifulSoup
from prettytable import PrettyTable  
  
def getHtml(url,pageNo):  
     data = {#反复分析得出只需要提交这两个参数即可
        'page.pageNo':pageNo,   #页码 
        'tempPageSize':10,      #每页显示条数
        }  
     headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0'}     #伪装一下
     page = requests.post(url,headers = headers,data = data)    #POST方式提交
     html = page.text  
     return html

def getData(html):
    global row
    get_text = Tag.get_text
    
    soup = BeautifulSoup(html, 'html.parser')
    for i in range(1,11):
         info = []
         texts = soup.find_all('td',rowid = str(i)) #分析查找的数据得出该方法
         for text in texts:
              info.append(text.get_text())
         row.add_row (info)
            
  
  
  
  
if __name__=='__main__':  
    url = 'http://datacenter.mep.gov.cn:8099/ths-report/report!list.action?xmlname=1465594312346'
    row = PrettyTable()
    row.field_names = ['序号',"所属省份", "所属市(区、地、州)", "养殖场(小区)名称", "畜禽种类","数量(头羽)","COD排放量(mg/h)","NH3排放量(mg/h)","年份"] 
    for i in range(1,5):  #只爬取5页的数据
        html = getHtml(url,i)
        getData(html)
    print (row)

方式二:selenium爬取方式

    原文作者:悦来客栈的老板
    原文地址: https://blog.csdn.net/qq523176585/article/details/78471297
    本文转自网络文章,转载此文章仅为分享知识,如有侵权,请联系博主进行删除。
点赞