pyinstaller scrapy打包exe

强调 如果是mac虽然能装pyinstall库但是不能打包exe 必须在windows电脑上

1、修改新家 start.py 主要是用代码运行 scrapy

from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
 
process = CrawlerProcess(get_project_settings())
 
# 'followall' is the name of one of the spiders of the project.
process.crawl('followall', domain='scrapinghub.com')
process.start() # the script will block here until the crawling is finished

2、在导入scrapy库文件防止找不到库报错

import robotparser
 
import scrapy.spiderloader
import scrapy.statscollectors
import scrapy.logformatter
import scrapy.dupefilters
import scrapy.squeues
 
import scrapy.extensions.spiderstate
import scrapy.extensions.corestats
import scrapy.extensions.telnet
import scrapy.extensions.logstats
import scrapy.extensions.memusage
import scrapy.extensions.memdebug
import scrapy.extensions.feedexport
import scrapy.extensions.closespider
import scrapy.extensions.debug
import scrapy.extensions.httpcache
import scrapy.extensions.statsmailer
import scrapy.extensions.throttle
 
import scrapy.core.scheduler
import scrapy.core.engine
import scrapy.core.scraper
import scrapy.core.spidermw
import scrapy.core.downloader
 
import scrapy.downloadermiddlewares.stats
import scrapy.downloadermiddlewares.httpcache
import scrapy.downloadermiddlewares.cookies
import scrapy.downloadermiddlewares.useragent
import scrapy.downloadermiddlewares.httpproxy
import scrapy.downloadermiddlewares.ajaxcrawl
import scrapy.downloadermiddlewares.chunked
import scrapy.downloadermiddlewares.decompression
import scrapy.downloadermiddlewares.defaultheaders
import scrapy.downloadermiddlewares.downloadtimeout
import scrapy.downloadermiddlewares.httpauth
import scrapy.downloadermiddlewares.httpcompression
import scrapy.downloadermiddlewares.redirect
import scrapy.downloadermiddlewares.retry
import scrapy.downloadermiddlewares.robotstxt
 
import scrapy.spidermiddlewares.depth
import scrapy.spidermiddlewares.httperror
import scrapy.spidermiddlewares.offsite
import scrapy.spidermiddlewares.referer
import scrapy.spidermiddlewares.urllength
 
import scrapy.pipelines
 
import scrapy.core.downloader.handlers.http
import scrapy.core.downloader.contextfactory
 
import scrapy.pipelines.images  # 用到图片管道
import openpyxl  # 用到openpyxl库



from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
 
process = CrawlerProcess(get_project_settings())
 
# 'followall' is the name of one of the spiders of the project.
process.crawl('followall', domain='scrapinghub.com')
process.start() # the script will block here until the crawling is finished

3、如果遇到Maximum recursion depth exceeded错误解决办法

1)生成spce文件

pyi -makespec start.py

2)在生成的start.spec文件中(顶部)添加如下代码,修改“recursion limit”

# -*- mode: python -*-
import sys
sys.setrecursionlimit(5000)

3)以这个新的start.spec文件为依据,输入如下命令打包:

pyinstaller start.spec

4、折执行exe遇到如下错误

《pyinstaller scrapy打包exe》 image.png

在exe同及目录新建scrapy文件夹 然后到 你scrapy库所在目录找到VERSION和mime.types拷贝到刚刚你新建的scrapy文件夹中

注意:: 此操作必须是scrapy 不带-F执行的exe目录 如果带-F还不知道怎么操作

    原文作者:python_django
    原文地址: https://www.jianshu.com/p/908af794d154
    本文转自网络文章,转载此文章仅为分享知识,如有侵权,请联系博主进行删除。
点赞