目的是要爬取书的书名,以及书的数目和一些简介
(1)配置item文件
class DuyuanItem(scrapy.Item):
book_list_title = scrapy.Field()
book_number = scrapy.Field()
book_list_author = scrapy.Field()
book_list_date = scrapy.Field()
book_list_summary = scrapy.Field()
book_url = scrapy.Field()
book_name = scrapy.Field()
book_author = scrapy.Field()
book_summary = scrapy.Field()
(2)配置setting文件
ROBOTSTXT_OBEY = False #这是基础里面就说了要配置的
ITEM_PIPELINES = { 'duyuan.pipelines.DuyuanPipeline': 300, } #pipeline文件的入口
MONGODB_HOST = '127.0.0.1'
MONGODB_PORT = 27017
MONGODB_DBNAME = 'duyuan'
MONGODB_DOCNAME = 'bookitem' #MongoDB的一些参数
(3)配置pipelines文件
import pymongo
from scrapy.conf import settingsclass
class DuyuanPipeline(object):
def __init__(self):
host = settings['MONGODB_HOST']
port = settings['MONGODB_PORT']
db_name = settings['MONGODB_DBNAME']
client = pymongo.MongoClient(host=host, port=port)
db = client[db_name]
self.post = db[settings['MONGODB_DOCNAME']]
def process_item(self, item, spider):
book_info = dict(item)
self.post.insert(book_info)
return item
(4)配置爬虫文件
import scrapy
from duyuan.items import DuyuanItemclass
ReadcolorSpider(scrapy.Spider):
name = "readcolor"
allowed_domains = ["readcolor.com"]
start_urls = ['http://readcolor.com/lists']
url = 'http://readcolor.com'
def parse(self, response):
book_list_group = response.xpath('//article[@style="margin:10px 0 20px;"]')
for each in book_list_group:
item = DuyuanItem()
item['book_list_title'] = each.xpath('header/h3/a/text()').extract()[0]
item['book_number'] = each.xpath('p/a/text()').extract()[0]
book_list_url = each.xpath('header/h3/a/@href').extract()[0]
yield scrapy.Request(self.url+book_list_url,callback=self.parse_book_list_detail,dont_filter=True,meta={'item':item})
def parse_book_list_detail(self,response):
item = response.meta['item']
summary = response.xpath('//div[@id="list-description"]/p/text()').extract()
item['book_list_summary'] = '\n'.join(summary)
yield item