[pytorch source read][1-1]setup.py

先普及一下setup.py

基本介绍

setup.py 文件是我们在每一个要发布的package项目都必须的

setup.py 文件中的常用项:
* name: 包名称, 也就是能够通过 import name 被导入的名字

* packages: 你要安装的包的路径, 例如: [“canbeAny”, “canbeAny.packages”]

* version: 版本号, 通常在 name.__init__.py 中以__version__="0.0.1" 的形式被定义
* description: PyPI首页的一句话短描述
* long_description: PyPI首页的正文
* url: 你的项目主页, 通常是项目的Github主页
* download_url: 你项目源码的下载链接
* license: 版权协议名

在使用 python setup.py build, python setup.py install 时:

包含子包和子包的子包

包含数据文件

include sub packages

认情况下, setup.py 只会安装在 name 参数中被定义的包名, 子包是不会被安装的

setuptools.find_packages 函数会递归寻找子包

NAME = "canbeAny"
PACKAGES = [NAME] + ["%s.%s" % (NAME, i) for i in find_packages(NAME)]
setup(
        ...
        packages=PACKAGES,
        ...
)
# PACKAGES = ['canbeAny', 'canbeAny.packages', 'canbeAny.packages.subpackage1', 'canbeAny.packages.subpackage2']

include package data

canbeAny
        |--- dataset
                |--- CnetNews.txt
        |--- packages
        |--- __init__.py
        ...

比如我们的项目的是这样的, 我们需要练CnetNews.txt一起打包, 但是build安装包的时候, 只会编译.py文件, 如果需要一些其他的文件, 比如 CnetNews.txt, 则需要在setup函数里面指定好package_data

setup(
        ...
    package_data  = {
        "canbeAny": ["*.txt"], # or "canbeAny": ["dataset/*.data"]
    },
    ...
)

install requires

setup(install_requires=["requests"]) # example1
setup(install_requires=["numpy >= 1.8.1", "pandas >= 0.14.1"]) # example2

这个命令在python setup.py build, python setup.py install, pip install xxx的时候会起到作用, 如果条件不满足, 则会自动安装

setup_requires

setup_requires 关键字中的包是 不会被自动安装的

setup(setup_requires=["requests"]) # example

看一下PyTorch里面的setup.py

先看一下依赖

from setuptools import setup, Extension, distutils, Command, find_packages
import setuptools.command.build_ext
import setuptools.command.install
import setuptools.command.develop
import setuptools.command.build_py
import distutils.unixccompiler
import distutils.command.build
import distutils.command.clean
import platform
import subprocess
import shutil
import multiprocessing
import sys
import os
import json
import glob
import importlib

from tools.setup_helpers.env import check_env_flag
from tools.setup_helpers.cuda import WITH_CUDA, CUDA_HOME, CUDA_VERSION
from tools.setup_helpers.cudnn import (WITH_CUDNN, CUDNN_LIBRARY,
                                       CUDNN_LIB_DIR, CUDNN_INCLUDE_DIR)
from tools.setup_helpers.nccl import WITH_NCCL, WITH_SYSTEM_NCCL, NCCL_LIB_DIR, \
    NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB
from tools.setup_helpers.mkldnn import (WITH_MKLDNN, MKLDNN_LIBRARY,
                                        MKLDNN_LIB_DIR, MKLDNN_INCLUDE_DIR)
from tools.setup_helpers.nnpack import WITH_NNPACK
from tools.setup_helpers.nvtoolext import NVTOOLEXT_HOME
from tools.setup_helpers.generate_code import generate_code
from tools.setup_helpers.ninja_builder import NinjaBuilder, ninja_build_ext
from tools.setup_helpers.dist_check import WITH_DISTRIBUTED, \
    WITH_DISTRIBUTED_MW, WITH_GLOO_IBVERBS

setuptools是一个创建和发布python包的库

tools.setup_helperspytorch根目录下面tools/setuptools的文件夹, 里面是关于setup过程的一些配置问题, 在[pytorch source read][2]里面我会详细介绍

DEBUG = check_env_flag('DEBUG')

IS_WINDOWS = (platform.system() == 'Windows')
IS_DARWIN = (platform.system() == 'Darwin')
IS_LINUX = (platform.system() == 'Linux')

NUM_JOBS = multiprocessing.cpu_count()
max_jobs = os.getenv("MAX_JOBS")
if max_jobs is not None:
    NUM_JOBS = min(NUM_JOBS, int(max_jobs))

try:
    import ninja
    WITH_NINJA = True
except ImportError:
    WITH_NINJA = False

这里面check_env_flagtools下面的一个函数(稍后回介绍)

pytorch0.4.0完美支持WINDOWS, DARWIN, LINUX

DARWAN是MacOS的成分

multiprocessing采用的是多线程build的方式

os.getenv()是获取环境变量的值, 没有则是None

try:
    import ninja
    WITH_NINJA = True
except ImportError:
    WITH_NINJA = False

if not WITH_NINJA:
    ################################################################################
    # Monkey-patch setuptools to compile in parallel
    ################################################################################

    def parallelCCompile(self, sources, output_dir=None, macros=None,
                         include_dirs=None, debug=0, extra_preargs=None,
                         extra_postargs=None, depends=None):
        # those lines are copied from distutils.ccompiler.CCompiler directly
        macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
            output_dir, macros, include_dirs, sources, depends, extra_postargs)
        cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)

        # compile using a thread pool
        import multiprocessing.pool

        def _single_compile(obj):
            src, ext = build[obj]
            self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
        multiprocessing.pool.ThreadPool(NUM_JOBS).map(_single_compile, objects)

        return objects
    distutils.ccompiler.CCompiler.compile = parallelCCompile

original_link = distutils.unixccompiler.UnixCCompiler.link

先介绍一下ninja

Ninja is a small build system with a focus on speed

这里是如果有ninja库的话则ccompiler使用ninja

distutils是python发布包要用的库

上面这段代码是设置编译过程中要用的compiler

################################################################################
# Workaround setuptools -Wstrict-prototypes warnings
# I lifted this code from https://stackoverflow.com/a/29634231/23845
################################################################################
import distutils.sysconfig
cfg_vars = distutils.sysconfig.get_config_vars()
for key, value in cfg_vars.items():
    if type(value) == str:
        cfg_vars[key] = value.replace("-Wstrict-prototypes", "")

################################################################################
# Custom build commands
################################################################################

dep_libs = [
    'nccl', 'ATen',
    'libshm', 'libshm_windows', 'gloo', 'THD', 'nanopb',
]


# global ninja file for building generated code stuff
ninja_global = None
if WITH_NINJA:
    ninja_global = NinjaBuilder('global')

def build_libs(libs):
    for lib in libs:
        assert lib in dep_libs, 'invalid lib: {}'.format(lib)
    if IS_WINDOWS:
        build_libs_cmd = ['tools\\build_pytorch_libs.bat']
    else:
        build_libs_cmd = ['bash', 'tools/build_pytorch_libs.sh']
    my_env = os.environ.copy()
    my_env["PYTORCH_PYTHON"] = sys.executable
    my_env["NUM_JOBS"] = str(NUM_JOBS)
    if not IS_WINDOWS:
        if WITH_NINJA:
            my_env["CMAKE_GENERATOR"] = '-GNinja'
            my_env["CMAKE_INSTALL"] = 'ninja install'
        else:
            my_env['CMAKE_GENERATOR'] = ''
            my_env['CMAKE_INSTALL'] = 'make install'
    if WITH_SYSTEM_NCCL:
        my_env["NCCL_ROOT_DIR"] = NCCL_ROOT_DIR
    if WITH_CUDA:
        my_env["CUDA_BIN_PATH"] = CUDA_HOME
        build_libs_cmd += ['--with-cuda']
    if WITH_NNPACK:
        build_libs_cmd += ['--with-nnpack']
    if WITH_CUDNN:
        my_env["CUDNN_LIB_DIR"] = CUDNN_LIB_DIR
        my_env["CUDNN_LIBRARY"] = CUDNN_LIBRARY
        my_env["CUDNN_INCLUDE_DIR"] = CUDNN_INCLUDE_DIR
    if WITH_MKLDNN:
        my_env["MKLDNN_LIB_DIR"] = MKLDNN_LIB_DIR
        my_env["MKLDNN_LIBRARY"] = MKLDNN_LIBRARY
        my_env["MKLDNN_INCLUDE_DIR"] = MKLDNN_INCLUDE_DIR
        build_libs_cmd += ['--with-mkldnn']

    if WITH_GLOO_IBVERBS:
        build_libs_cmd += ['--with-gloo-ibverbs']

    if WITH_DISTRIBUTED_MW:
        build_libs_cmd += ['--with-distributed-mw']

    if subprocess.call(build_libs_cmd + libs, env=my_env) != 0:
        sys.exit(1)

上面这部分是对sys和环境变量的一些设置

missing_pydep = ''' Missing build dependency: Unable to `import {importname}`. Please install it via `conda install {module}` or `pip install {module}` '''.strip()


def check_pydep(importname, module):
    try:
        importlib.import_module(importname)
    except ImportError:
        raise RuntimeError(missing_pydep.format(importname=importname, module=module))

如果缺少一些环境变量的话, 则会反馈相应的信息

importlib.import_module是动态导入对象的方法

class build_deps(Command):
    user_options = []

    def initialize_options(self):
        pass

    def finalize_options(self):
        pass

    def run(self):
        # Check if you remembered to check out submodules
        def check_file(f):
            if not os.path.exists(f):
                print("Could not find {}".format(f))
                print("Did you run 'git submodule update --init'?")
                sys.exit(1)
        check_file(os.path.join(third_party_path, "gloo", "CMakeLists.txt"))
        check_file(os.path.join(third_party_path, "nanopb", "CMakeLists.txt"))
        check_file(os.path.join(third_party_path, "pybind11", "CMakeLists.txt"))
        check_file(os.path.join(third_party_path, 'cpuinfo', 'CMakeLists.txt'))
        check_file(os.path.join(third_party_path, 'tbb', 'Makefile'))
        check_file(os.path.join(third_party_path, 'catch', 'CMakeLists.txt'))

        check_pydep('yaml', 'pyyaml')
        check_pydep('typing', 'typing')

        libs = []
        if WITH_NCCL and not WITH_SYSTEM_NCCL:
            libs += ['nccl']
        libs += ['ATen', 'nanopb']
        if IS_WINDOWS:
            libs += ['libshm_windows']
        else:
            libs += ['libshm']
        if WITH_DISTRIBUTED:
            if sys.platform.startswith('linux'):
                libs += ['gloo']
            libs += ['THD']
        build_libs(libs)

        # Use copies instead of symbolic files.
        # Windows has very poor support for them.
        sym_files = ['tools/shared/cwrap_common.py']
        orig_files = ['aten/src/ATen/common_with_cwrap.py']
        for sym_file, orig_file in zip(sym_files, orig_files):
            if os.path.exists(sym_file):
                os.remove(sym_file)
            shutil.copyfile(orig_file, sym_file)

        # Copy headers necessary to compile C++ extensions.
        #
        # This is not perfect solution as build does not depend on any of
        # the auto-generated code and auto-generated files will not be
        # included in this copy. If we want to use auto-generated files,
        # we need to find a better way to do this.
        # More information can be found in conversation thread of PR #5772

        self.copy_tree('torch/csrc', 'torch/lib/include/torch/csrc/')
        self.copy_tree('third_party/pybind11/include/pybind11/',
                       'torch/lib/include/pybind11')
        self.copy_file('torch/torch.h', 'torch/lib/include/torch/torch.h')

上面这个build_depclass是继承的setuptools里面的Command

目的是自定义setup过程

如果相对这个过程想有个详细的了解,
可以参加一篇 How To Add Custom Build Steps and Commands To setup.py

模板如下:

import distutils.cmd
import distutils.log
import setuptools
import subprocess


class PylintCommand(distutils.cmd.Command):
  """A custom command to run Pylint on all Python source files."""

  description = 'run Pylint on Python source files'
  user_options = [
      # The format is (long option, short option, description).
      ('pylint-rcfile=', None, 'path to Pylint config file'),
  ]

  def initialize_options(self):
    """Set default values for options."""
    # Each user option must be listed here with their default value.
    self.pylint_rcfile = ''

  def finalize_options(self):
    """Post-process options."""
    if self.pylint_rcfile:
      assert os.path.exists(self.pylint_rcfile), (
          'Pylint config file %s does not exist.' % self.pylint_rcfile)

  def run(self):
    """Run command."""
    command = ['/usr/bin/pylint']
    if self.pylint_rcfile:
      command.append('--rcfile=%s' % self.pylint_rcfile)
    command.append(os.getcwd())
    self.announce(
        'Running command: %s' % str(command),
        level=distutils.log.INFO)
    subprocess.check_call(command)


setuptools.setup(
    cmdclass={
        'pylint': PylintCommand,
    },
    # Usual setup() args.
    # ...
)

这个distutils.Command里面有两个method

copy_file(infile, outfile[, preserve_mode, ...]) 作用是Copy a file respecting verbose, dry-run and force flags.

copy_tree(infile, outfile[, preserve_mode, ...]) 作用是Copy an entire directory tree respecting verbose, dry-run,

详见 http://code.nabla.net/doc/setuptools/api/setuptools/setuptools.Command.html

build_dep_cmds = {}

for lib in dep_libs:
    # wrap in function to capture lib
    class build_dep(build_deps):
        description = 'Build {} external library'.format(lib)

        def run(self):
            build_libs([self.lib])
    build_dep.lib = lib
    build_dep_cmds['build_' + lib.lower()] = build_dep

这个循环中对每个lib简历一个类build_dep, 他继承父类build_deps run(self)继承自Command, 在这个函数里面调用build_libs来build lib

class build_module(Command):
    user_options = []

    def initialize_options(self):
        pass

    def finalize_options(self):
        pass

    def run(self):
        self.run_command('build_py')
        self.run_command('build_ext')

run_command(command)继承自Command

class build_py(setuptools.command.build_py.build_py):

    def run(self):
        self.create_version_file()
        setuptools.command.build_py.build_py.run(self)

    @staticmethod
    def create_version_file():
        global version, cwd
        print('-- Building version ' + version)
        version_path = os.path.join(cwd, 'torch', 'version.py')
        with open(version_path, 'w') as f:
            f.write("__version__ = '{}'\n".format(version))
            # NB: This is not 100% accurate, because you could have built the
            # library code with DEBUG, but csrc without DEBUG (in which case
            # this would claim to be a release build when it's not.)
            f.write("debug = {}\n".format(repr(DEBUG)))
            f.write("cuda = {}\n".format(repr(CUDA_VERSION)))

build_pyclass详见 https://github.com/pypa/setuptools/blob/master/setuptools/command/build_py.py

这里普及一下 @staticmethod @classmethod

\@staticmethod 就和普通的函数人一样, 函数参数里面没有self

\@classmethod 函数参数里面也没有self, 但是有cls, 因此可以调用类的属性/方法/实例化对象 避免hardcode

class develop(setuptools.command.develop.develop):

    def run(self):
        build_py.create_version_file()
        setuptools.command.develop.develop.run(self)
        self.create_compile_commands()

    def create_compile_commands(self):
        def load(filename):
            with open(filename) as f:
                return json.load(f)
        ninja_files = glob.glob('build/*_compile_commands.json')
        cmake_files = glob.glob('torch/lib/build/*/compile_commands.json')
        all_commands = [entry
                        for f in ninja_files + cmake_files
                        for entry in load(f)]
        with open('compile_commands.json', 'w') as f:
            json.dump(all_commands, f, indent=2)
        if not WITH_NINJA:
            print("WARNING: 'develop' is not building C++ code incrementally")
            print("because ninja is not installed. Run this to enable it:")
            print(" > pip install ninja")

这一部分是继承的setuptools库里面的.command.develop.develop

作用是从.json文件里面读取command

def monkey_patch_THD_link_flags():
    '''  THD's dynamic link deps are not determined until after build_deps is run  So, we need to monkey-patch them in later  '''
    # read tmp_install_path/THD_deps.txt for THD's dynamic linkage deps
    with open(tmp_install_path + '/THD_deps.txt', 'r') as f:
        thd_deps_ = f.read()
    thd_deps = []
    # remove empty lines
    for l in thd_deps_.split(';'):
        if l != '':
            thd_deps.append(l)

    C.extra_link_args += thd_deps

这是对于THDlinkflags的monky patch(猴子补丁)

普及一下猴子补丁

猴子补丁是一种给代码加补丁的方式, 多见于Ruby/Python等脚本语言中

有两种解释:

– 一种是从Guerrilla Patch这种说法变过来的, Guerrilla是游击队的意思, Gorilla和Guerrilla发音十分xiangjin, 而Gorilla是大猩猩的意思, 后来就演变为monky patch的说法

– 还有一种说法是 这种打补丁的方式会把代码mess up, 如同猴子一样调皮

回到正题

这个monkey_patch_THD_link_flags的作用就是把读取的THD_deps.txt里面的内容的;去掉

build_ext_parent = ninja_build_ext if WITH_NINJA \
    else setuptools.command.build_ext.build_ext


class build_ext(build_ext_parent):

    def run(self):

        # Print build options
        if WITH_NUMPY:
            print('-- Building with NumPy bindings')
        else:
            print('-- NumPy not found')
        if WITH_CUDNN:
            print('-- Detected cuDNN at ' + CUDNN_LIBRARY + ', ' + CUDNN_INCLUDE_DIR)
        else:
            print('-- Not using cuDNN')
        if WITH_CUDA:
            print('-- Detected CUDA at ' + CUDA_HOME)
        else:
            print('-- Not using CUDA')
        if WITH_MKLDNN:
            print('-- Detected MKLDNN at ' + MKLDNN_LIBRARY + ', ' + MKLDNN_INCLUDE_DIR)
        else:
            print('-- Not using MKLDNN')
        if WITH_NCCL and WITH_SYSTEM_NCCL:
            print('-- Using system provided NCCL library at ' +
                  NCCL_SYSTEM_LIB + ', ' + NCCL_INCLUDE_DIR)
        elif WITH_NCCL:
            print('-- Building NCCL library')
        else:
            print('-- Not using NCCL')
        if WITH_DISTRIBUTED:
            print('-- Building with distributed package ')
            monkey_patch_THD_link_flags()
        else:
            print('-- Building without distributed package')

        generate_code(ninja_global)

        if WITH_NINJA:
            # before we start the normal build make sure all generated code
            # gets built
            ninja_global.run()

        # It's an old-style class in Python 2.7...
        setuptools.command.build_ext.build_ext.run(self)

        # Copy the essential export library to compile C++ extensions.
        if IS_WINDOWS:
            build_temp = self.build_temp

            ext_filename = self.get_ext_filename('_C')
            lib_filename = '.'.join(ext_filename.split('.')[:-1]) + '.lib'

            export_lib = os.path.join(
                build_temp, 'torch', 'csrc', lib_filename).replace('\\', '/')

            build_lib = self.build_lib

            target_lib = os.path.join(
                build_lib, 'torch', 'lib', '_C.lib').replace('\\', '/')

            self.copy_file(export_lib, target_lib)

这部分还是build的准备过程

解释一下 build, compile, make 的区别

build : 重新编译整个工程

compile : 编译选定的目标

make : 编译选定的目标, 但是只编译跟上次比有区别的文件

回归正题

其中函数generate_global(w)函数是取自 tools/setup_helpers/ 下面的

def generate_code_ninja(w):
    all_inputs = all_generator_source() + inputs
    cmd = "{} {}".format(sys.executable, 'tools/setup_helpers/generate_code.py')
    w.writer.build(
        outputs, 'do_cmd', all_inputs,
        variables={
            'cmd': cmd,
            # Note [Unchanging results for ninja]
            # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
            # generate_code.py will avoid bumping the timestamp on its
            # output files if the contents of the generated file did not
            # change. To let Ninja take advantage of this, it must stat
            # the output files after the build. See
            # https://groups.google.com/forum/#!topic/ninja-build/rExDmgDL2oc
            # for a more detailed discussion.
            'restat': True,
        })


def generate_code(ninja_global=None,
                  declarations_path=None,
                  nn_path=None):
    # if ninja is enabled, we just register this file as something
    # ninja will need to call if needed
    if ninja_global is not None:
        return generate_code_ninja(ninja_global)

    # cwrap depends on pyyaml, so we can't import it earlier
    root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
    sys.path.insert(0, root)
    from tools.autograd.gen_autograd import gen_autograd
    from tools.jit.gen_jit_dispatch import gen_jit_dispatch
    from tools.nnwrap import generate_wrappers as generate_nn_wrappers

    # Build THNN/THCUNN.cwrap and then THNN/THCUNN.cpp. These are primarily
    # used by the legacy NN bindings.
    generate_nn_wrappers(nn_path)

    # Build ATen based Variable classes
    autograd_gen_dir = 'torch/csrc/autograd/generated'
    jit_gen_dir = 'torch/csrc/jit/generated'
    for d in (autograd_gen_dir, jit_gen_dir):
        if not os.path.exists(d):
            os.mkdir(d)
    gen_autograd(declarations_path or DECLARATIONS_PATH, autograd_gen_dir)
    gen_jit_dispatch(declarations_path or DECLARATIONS_PATH, jit_gen_dir)

这是函数的具体源代码

class build(distutils.command.build.build):
    sub_commands = [
        ('build_deps', lambda self: True),
    ] + distutils.command.build.build.sub_commands


class install(setuptools.command.install.install):

    def run(self):
        if not self.skip_build:
            self.run_command('build_deps')

        setuptools.command.install.install.run(self)


class clean(distutils.command.clean.clean):

    def run(self):
        import glob
        with open('.gitignore', 'r') as f:
            ignores = f.read()
            for wildcard in filter(bool, ignores.split('\n')):
                for filename in glob.glob(wildcard):
                    try:
                        os.remove(filename)
                    except OSError:
                        shutil.rmtree(filename, ignore_errors=True)

        # It's an old-style class in Python 2.7...
        distutils.command.clean.clean.run(self)

借助distutils库 标准的 build, install, clean 三部曲

普及一下 build, install, clean 三部曲

build : 编译过程, 生成二进制文件

install : 把build生成的二进制文件/库/配置放到相应的位置

clean : 清除编译的结果

太心烦了,

知乎有文章的字数限制,

我被迫把一个文件拆成两个…

    原文作者:Nick.Zxx
    原文地址: https://zhuanlan.zhihu.com/p/36306208
    本文转自网络文章,转载此文章仅为分享知识,如有侵权,请联系博主进行删除。
点赞