通过在scrapy中使用请求有效负载发布请求

我该如何刮掉这个
website?如何使用有效负载发送帖子请求并从中获取数据?

如果我使用这个代码,我可以抓第一页,但我怎么刮第二页?我是否需要使用硒或足够的scrapy?

import scrapy
from scrapy import log
from scrapy.http import *
import urllib2
class myntra_spider(scrapy.Spider):
    name="myntra"
    allowed_domain=[]
    start_urls=["http://www.myntra.com/men-footwear"]
    logfile=open('testlog.log','w')
    log_observer=log.ScrapyFileLogObserver(logfile,level=log.ERROR)
    log_observer.start()
    # sub_category=[]



    def parse(self,response):
        print "response url ",response.url

        link=response.xpath("//ul[@class='results small']/li/a/@href").extract()
        print links
        yield Request('http://www.myntra.com/search-service/searchservice/search/filteredSearch', callback=self.nextpages,body="")



    def nextpages(self,response):
        link=response.xpath("//ul[@class='results small']/li/a/@href").extract()
        for i in range(10):
            print "link ",link[i]

最佳答案 你不需要Selenium.在浏览器中检查需要与请求一起发送的有效负载,并将其附加到请求中.

我尝试使用您的网站,以下代码段有效 –

def start_requests(self):
    url = "http://www.myntra.com/search-service/searchservice/search/filteredSearch"
    payload = [{
        "query": "(global_attr_age_group:(\"Adults-Unisex\" OR \"Adults-Women\") AND global_attr_master_category:(\"Footwear\"))",
        "start": 0,
        "rows": 96,
        "facetField": [],
        "pivotFacets": [],
        "fq": ["count_options_availbale:[1 TO *]"],
        "sort": [
            {"sort_field": "count_options_availbale", "order_by": "desc"},
            {"sort_field": "score", "order_by": "desc"},
            {"sort_field": "style_store1_female_sort_field", "order_by": "desc"},
            {"sort_field": "potential_revenue_female_sort_field", "order_by": "desc"},
            {"sort_field": "global_attr_catalog_add_date", "order_by": "desc"}
        ],
        "return_docs": True,
        "colour_grouping": True,
        "useCache": True,
        "flatshot": False,
        "outOfStock": False,
        "showInactiveStyles": False,
        "facet": True
    }]
    yield Request(url, self.parse, method="POST", body=json.dumps(payload))

def parse(self, response):
    data = json.loads(response.body)
    print data
点赞