Delete .venv directory
This commit is contained in:
committed by
GitHub
parent
7795984d81
commit
5a2693bd9f
@@ -1,98 +0,0 @@
|
||||
# This file is generated by numpy's setup.py
|
||||
# It contains system_info results at the time of building this package.
|
||||
__all__ = ["get_info","show"]
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
|
||||
|
||||
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
|
||||
if sys.version_info >= (3, 8):
|
||||
os.add_dll_directory(extra_dll_dir)
|
||||
else:
|
||||
os.environ.setdefault('PATH', '')
|
||||
os.environ['PATH'] += os.pathsep + extra_dll_dir
|
||||
|
||||
blas_mkl_info={}
|
||||
blis_info={}
|
||||
openblas_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)], 'runtime_library_dirs': ['/usr/local/lib']}
|
||||
blas_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)], 'runtime_library_dirs': ['/usr/local/lib']}
|
||||
lapack_mkl_info={}
|
||||
openblas_lapack_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)], 'runtime_library_dirs': ['/usr/local/lib']}
|
||||
lapack_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)], 'runtime_library_dirs': ['/usr/local/lib']}
|
||||
|
||||
def get_info(name):
|
||||
g = globals()
|
||||
return g.get(name, g.get(name + "_info", {}))
|
||||
|
||||
def show():
|
||||
"""
|
||||
Show libraries in the system on which NumPy was built.
|
||||
|
||||
Print information about various resources (libraries, library
|
||||
directories, include directories, etc.) in the system on which
|
||||
NumPy was built.
|
||||
|
||||
See Also
|
||||
--------
|
||||
get_include : Returns the directory containing NumPy C
|
||||
header files.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Classes specifying the information to be printed are defined
|
||||
in the `numpy.distutils.system_info` module.
|
||||
|
||||
Information may include:
|
||||
|
||||
* ``language``: language used to write the libraries (mostly
|
||||
C or f77)
|
||||
* ``libraries``: names of libraries found in the system
|
||||
* ``library_dirs``: directories containing the libraries
|
||||
* ``include_dirs``: directories containing library header files
|
||||
* ``src_dirs``: directories containing library source files
|
||||
* ``define_macros``: preprocessor macros used by
|
||||
``distutils.setup``
|
||||
* ``baseline``: minimum CPU features required
|
||||
* ``found``: dispatched features supported in the system
|
||||
* ``not found``: dispatched features that are not supported
|
||||
in the system
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.show_config()
|
||||
blas_opt_info:
|
||||
language = c
|
||||
define_macros = [('HAVE_CBLAS', None)]
|
||||
libraries = ['openblas', 'openblas']
|
||||
library_dirs = ['/usr/local/lib']
|
||||
"""
|
||||
from numpy.core._multiarray_umath import (
|
||||
__cpu_features__, __cpu_baseline__, __cpu_dispatch__
|
||||
)
|
||||
for name,info_dict in globals().items():
|
||||
if name[0] == "_" or type(info_dict) is not type({}): continue
|
||||
print(name + ":")
|
||||
if not info_dict:
|
||||
print(" NOT AVAILABLE")
|
||||
for k,v in info_dict.items():
|
||||
v = str(v)
|
||||
if k == "sources" and len(v) > 200:
|
||||
v = v[:60] + " ...\n... " + v[-60:]
|
||||
print(" %s = %s" % (k,v))
|
||||
|
||||
features_found, features_not_found = [], []
|
||||
for feature in __cpu_dispatch__:
|
||||
if __cpu_features__[feature]:
|
||||
features_found.append(feature)
|
||||
else:
|
||||
features_not_found.append(feature)
|
||||
|
||||
print("Supported SIMD extensions in this NumPy install:")
|
||||
print(" baseline = %s" % (','.join(__cpu_baseline__)))
|
||||
print(" found = %s" % (','.join(features_found)))
|
||||
print(" not found = %s" % (','.join(features_not_found)))
|
||||
|
@@ -1,51 +0,0 @@
|
||||
"""
|
||||
An enhanced distutils, providing support for Fortran compilers, for BLAS,
|
||||
LAPACK and other common libraries for numerical computing, and more.
|
||||
|
||||
Public submodules are::
|
||||
|
||||
misc_util
|
||||
system_info
|
||||
cpu_info
|
||||
log
|
||||
exec_command
|
||||
|
||||
For details, please see the *Packaging* and *NumPy Distutils User Guide*
|
||||
sections of the NumPy Reference Guide.
|
||||
|
||||
For configuring the preference for and location of libraries like BLAS and
|
||||
LAPACK, and for setting include paths and similar build options, please see
|
||||
``site.cfg.example`` in the root of the NumPy repository or sdist.
|
||||
|
||||
"""
|
||||
|
||||
# Must import local ccompiler ASAP in order to get
|
||||
# customized CCompiler.spawn effective.
|
||||
from . import ccompiler
|
||||
from . import unixccompiler
|
||||
|
||||
from .npy_pkg_config import *
|
||||
|
||||
# If numpy is installed, add distutils.test()
|
||||
try:
|
||||
from . import __config__
|
||||
# Normally numpy is installed if the above import works, but an interrupted
|
||||
# in-place build could also have left a __config__.py. In that case the
|
||||
# next import may still fail, so keep it inside the try block.
|
||||
from numpy._pytesttester import PytestTester
|
||||
test = PytestTester(__name__)
|
||||
del PytestTester
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
def customized_fcompiler(plat=None, compiler=None):
|
||||
from numpy.distutils.fcompiler import new_fcompiler
|
||||
c = new_fcompiler(plat=plat, compiler=compiler)
|
||||
c.customize()
|
||||
return c
|
||||
|
||||
def customized_ccompiler(plat=None, compiler=None, verbose=1):
|
||||
c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose)
|
||||
c.customize('')
|
||||
return c
|
@@ -1,4 +0,0 @@
|
||||
from typing import Any
|
||||
|
||||
# TODO: remove when the full numpy namespace is defined
|
||||
def __getattr__(name: str) -> Any: ...
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,91 +0,0 @@
|
||||
"""
|
||||
Helper functions for interacting with the shell, and consuming shell-style
|
||||
parameters provided in config files.
|
||||
"""
|
||||
import os
|
||||
import shlex
|
||||
import subprocess
|
||||
try:
|
||||
from shlex import quote
|
||||
except ImportError:
|
||||
from pipes import quote
|
||||
|
||||
__all__ = ['WindowsParser', 'PosixParser', 'NativeParser']
|
||||
|
||||
|
||||
class CommandLineParser:
|
||||
"""
|
||||
An object that knows how to split and join command-line arguments.
|
||||
|
||||
It must be true that ``argv == split(join(argv))`` for all ``argv``.
|
||||
The reverse neednt be true - `join(split(cmd))` may result in the addition
|
||||
or removal of unnecessary escaping.
|
||||
"""
|
||||
@staticmethod
|
||||
def join(argv):
|
||||
""" Join a list of arguments into a command line string """
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def split(cmd):
|
||||
""" Split a command line string into a list of arguments """
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class WindowsParser:
|
||||
"""
|
||||
The parsing behavior used by `subprocess.call("string")` on Windows, which
|
||||
matches the Microsoft C/C++ runtime.
|
||||
|
||||
Note that this is _not_ the behavior of cmd.
|
||||
"""
|
||||
@staticmethod
|
||||
def join(argv):
|
||||
# note that list2cmdline is specific to the windows syntax
|
||||
return subprocess.list2cmdline(argv)
|
||||
|
||||
@staticmethod
|
||||
def split(cmd):
|
||||
import ctypes # guarded import for systems without ctypes
|
||||
try:
|
||||
ctypes.windll
|
||||
except AttributeError:
|
||||
raise NotImplementedError
|
||||
|
||||
# Windows has special parsing rules for the executable (no quotes),
|
||||
# that we do not care about - insert a dummy element
|
||||
if not cmd:
|
||||
return []
|
||||
cmd = 'dummy ' + cmd
|
||||
|
||||
CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
|
||||
CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p)
|
||||
CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int))
|
||||
|
||||
nargs = ctypes.c_int()
|
||||
lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs))
|
||||
args = [lpargs[i] for i in range(nargs.value)]
|
||||
assert not ctypes.windll.kernel32.LocalFree(lpargs)
|
||||
|
||||
# strip the element we inserted
|
||||
assert args[0] == "dummy"
|
||||
return args[1:]
|
||||
|
||||
|
||||
class PosixParser:
|
||||
"""
|
||||
The parsing behavior used by `subprocess.call("string", shell=True)` on Posix.
|
||||
"""
|
||||
@staticmethod
|
||||
def join(argv):
|
||||
return ' '.join(quote(arg) for arg in argv)
|
||||
|
||||
@staticmethod
|
||||
def split(cmd):
|
||||
return shlex.split(cmd, posix=True)
|
||||
|
||||
|
||||
if os.name == 'nt':
|
||||
NativeParser = WindowsParser
|
||||
elif os.name == 'posix':
|
||||
NativeParser = PosixParser
|
@@ -1,795 +0,0 @@
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import shlex
|
||||
import time
|
||||
import subprocess
|
||||
from copy import copy
|
||||
from distutils import ccompiler
|
||||
from distutils.ccompiler import (
|
||||
compiler_class, gen_lib_options, get_default_compiler, new_compiler,
|
||||
CCompiler
|
||||
)
|
||||
from distutils.errors import (
|
||||
DistutilsExecError, DistutilsModuleError, DistutilsPlatformError,
|
||||
CompileError, UnknownFileError
|
||||
)
|
||||
from distutils.sysconfig import customize_compiler
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from numpy.distutils import log
|
||||
from numpy.distutils.exec_command import (
|
||||
filepath_from_subprocess_output, forward_bytes_to_stdout
|
||||
)
|
||||
from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
|
||||
get_num_build_jobs, \
|
||||
_commandline_dep_string
|
||||
|
||||
# globals for parallel build management
|
||||
import threading
|
||||
|
||||
_job_semaphore = None
|
||||
_global_lock = threading.Lock()
|
||||
_processing_files = set()
|
||||
|
||||
|
||||
def _needs_build(obj, cc_args, extra_postargs, pp_opts):
|
||||
"""
|
||||
Check if an objects needs to be rebuild based on its dependencies
|
||||
|
||||
Parameters
|
||||
----------
|
||||
obj : str
|
||||
object file
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
# defined in unixcompiler.py
|
||||
dep_file = obj + '.d'
|
||||
if not os.path.exists(dep_file):
|
||||
return True
|
||||
|
||||
# dep_file is a makefile containing 'object: dependencies'
|
||||
# formatted like posix shell (spaces escaped, \ line continuations)
|
||||
# the last line contains the compiler commandline arguments as some
|
||||
# projects may compile an extension multiple times with different
|
||||
# arguments
|
||||
with open(dep_file, "r") as f:
|
||||
lines = f.readlines()
|
||||
|
||||
cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts)
|
||||
last_cmdline = lines[-1]
|
||||
if last_cmdline != cmdline:
|
||||
return True
|
||||
|
||||
contents = ''.join(lines[:-1])
|
||||
deps = [x for x in shlex.split(contents, posix=True)
|
||||
if x != "\n" and not x.endswith(":")]
|
||||
|
||||
try:
|
||||
t_obj = os.stat(obj).st_mtime
|
||||
|
||||
# check if any of the dependencies is newer than the object
|
||||
# the dependencies includes the source used to create the object
|
||||
for f in deps:
|
||||
if os.stat(f).st_mtime > t_obj:
|
||||
return True
|
||||
except OSError:
|
||||
# no object counts as newer (shouldn't happen if dep_file exists)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def replace_method(klass, method_name, func):
|
||||
# Py3k does not have unbound method anymore, MethodType does not work
|
||||
m = lambda self, *args, **kw: func(self, *args, **kw)
|
||||
setattr(klass, method_name, m)
|
||||
|
||||
|
||||
######################################################################
|
||||
## Method that subclasses may redefine. But don't call this method,
|
||||
## it i private to CCompiler class and may return unexpected
|
||||
## results if used elsewhere. So, you have been warned..
|
||||
|
||||
def CCompiler_find_executables(self):
|
||||
"""
|
||||
Does nothing here, but is called by the get_version method and can be
|
||||
overridden by subclasses. In particular it is redefined in the `FCompiler`
|
||||
class where more documentation can be found.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
replace_method(CCompiler, 'find_executables', CCompiler_find_executables)
|
||||
|
||||
|
||||
# Using customized CCompiler.spawn.
|
||||
def CCompiler_spawn(self, cmd, display=None):
|
||||
"""
|
||||
Execute a command in a sub-process.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cmd : str
|
||||
The command to execute.
|
||||
display : str or sequence of str, optional
|
||||
The text to add to the log file kept by `numpy.distutils`.
|
||||
If not given, `display` is equal to `cmd`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
|
||||
Raises
|
||||
------
|
||||
DistutilsExecError
|
||||
If the command failed, i.e. the exit status was not 0.
|
||||
|
||||
"""
|
||||
if display is None:
|
||||
display = cmd
|
||||
if is_sequence(display):
|
||||
display = ' '.join(list(display))
|
||||
log.info(display)
|
||||
try:
|
||||
if self.verbose:
|
||||
subprocess.check_output(cmd)
|
||||
else:
|
||||
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
o = exc.output
|
||||
s = exc.returncode
|
||||
except OSError:
|
||||
# OSError doesn't have the same hooks for the exception
|
||||
# output, but exec_command() historically would use an
|
||||
# empty string for EnvironmentError (base class for
|
||||
# OSError)
|
||||
o = b''
|
||||
# status previously used by exec_command() for parent
|
||||
# of OSError
|
||||
s = 127
|
||||
else:
|
||||
# use a convenience return here so that any kind of
|
||||
# caught exception will execute the default code after the
|
||||
# try / except block, which handles various exceptions
|
||||
return None
|
||||
|
||||
if is_sequence(cmd):
|
||||
cmd = ' '.join(list(cmd))
|
||||
|
||||
if self.verbose:
|
||||
forward_bytes_to_stdout(o)
|
||||
|
||||
if re.search(b'Too many open files', o):
|
||||
msg = '\nTry rerunning setup command until build succeeds.'
|
||||
else:
|
||||
msg = ''
|
||||
raise DistutilsExecError('Command "%s" failed with exit status %d%s' %
|
||||
(cmd, s, msg))
|
||||
|
||||
replace_method(CCompiler, 'spawn', CCompiler_spawn)
|
||||
|
||||
def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
|
||||
"""
|
||||
Return the name of the object files for the given source files.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
source_filenames : list of str
|
||||
The list of paths to source files. Paths can be either relative or
|
||||
absolute, this is handled transparently.
|
||||
strip_dir : bool, optional
|
||||
Whether to strip the directory from the returned paths. If True,
|
||||
the file name prepended by `output_dir` is returned. Default is False.
|
||||
output_dir : str, optional
|
||||
If given, this path is prepended to the returned paths to the
|
||||
object files.
|
||||
|
||||
Returns
|
||||
-------
|
||||
obj_names : list of str
|
||||
The list of paths to the object files corresponding to the source
|
||||
files in `source_filenames`.
|
||||
|
||||
"""
|
||||
if output_dir is None:
|
||||
output_dir = ''
|
||||
obj_names = []
|
||||
for src_name in source_filenames:
|
||||
base, ext = os.path.splitext(os.path.normpath(src_name))
|
||||
base = os.path.splitdrive(base)[1] # Chop off the drive
|
||||
base = base[os.path.isabs(base):] # If abs, chop off leading /
|
||||
if base.startswith('..'):
|
||||
# Resolve starting relative path components, middle ones
|
||||
# (if any) have been handled by os.path.normpath above.
|
||||
i = base.rfind('..')+2
|
||||
d = base[:i]
|
||||
d = os.path.basename(os.path.abspath(d))
|
||||
base = d + base[i:]
|
||||
if ext not in self.src_extensions:
|
||||
raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name))
|
||||
if strip_dir:
|
||||
base = os.path.basename(base)
|
||||
obj_name = os.path.join(output_dir, base + self.obj_extension)
|
||||
obj_names.append(obj_name)
|
||||
return obj_names
|
||||
|
||||
replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames)
|
||||
|
||||
def CCompiler_compile(self, sources, output_dir=None, macros=None,
|
||||
include_dirs=None, debug=0, extra_preargs=None,
|
||||
extra_postargs=None, depends=None):
|
||||
"""
|
||||
Compile one or more source files.
|
||||
|
||||
Please refer to the Python distutils API reference for more details.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
sources : list of str
|
||||
A list of filenames
|
||||
output_dir : str, optional
|
||||
Path to the output directory.
|
||||
macros : list of tuples
|
||||
A list of macro definitions.
|
||||
include_dirs : list of str, optional
|
||||
The directories to add to the default include file search path for
|
||||
this compilation only.
|
||||
debug : bool, optional
|
||||
Whether or not to output debug symbols in or alongside the object
|
||||
file(s).
|
||||
extra_preargs, extra_postargs : ?
|
||||
Extra pre- and post-arguments.
|
||||
depends : list of str, optional
|
||||
A list of file names that all targets depend on.
|
||||
|
||||
Returns
|
||||
-------
|
||||
objects : list of str
|
||||
A list of object file names, one per source file `sources`.
|
||||
|
||||
Raises
|
||||
------
|
||||
CompileError
|
||||
If compilation fails.
|
||||
|
||||
"""
|
||||
# This method is effective only with Python >=2.3 distutils.
|
||||
# Any changes here should be applied also to fcompiler.compile
|
||||
# method to support pre Python 2.3 distutils.
|
||||
global _job_semaphore
|
||||
|
||||
jobs = get_num_build_jobs()
|
||||
|
||||
# setup semaphore to not exceed number of compile jobs when parallelized at
|
||||
# extension level (python >= 3.5)
|
||||
with _global_lock:
|
||||
if _job_semaphore is None:
|
||||
_job_semaphore = threading.Semaphore(jobs)
|
||||
|
||||
if not sources:
|
||||
return []
|
||||
from numpy.distutils.fcompiler import (FCompiler, is_f_file,
|
||||
has_f90_header)
|
||||
if isinstance(self, FCompiler):
|
||||
display = []
|
||||
for fc in ['f77', 'f90', 'fix']:
|
||||
fcomp = getattr(self, 'compiler_'+fc)
|
||||
if fcomp is None:
|
||||
continue
|
||||
display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp)))
|
||||
display = '\n'.join(display)
|
||||
else:
|
||||
ccomp = self.compiler_so
|
||||
display = "C compiler: %s\n" % (' '.join(ccomp),)
|
||||
log.info(display)
|
||||
macros, objects, extra_postargs, pp_opts, build = \
|
||||
self._setup_compile(output_dir, macros, include_dirs, sources,
|
||||
depends, extra_postargs)
|
||||
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
|
||||
display = "compile options: '%s'" % (' '.join(cc_args))
|
||||
if extra_postargs:
|
||||
display += "\nextra options: '%s'" % (' '.join(extra_postargs))
|
||||
log.info(display)
|
||||
|
||||
def single_compile(args):
|
||||
obj, (src, ext) = args
|
||||
if not _needs_build(obj, cc_args, extra_postargs, pp_opts):
|
||||
return
|
||||
|
||||
# check if we are currently already processing the same object
|
||||
# happens when using the same source in multiple extensions
|
||||
while True:
|
||||
# need explicit lock as there is no atomic check and add with GIL
|
||||
with _global_lock:
|
||||
# file not being worked on, start working
|
||||
if obj not in _processing_files:
|
||||
_processing_files.add(obj)
|
||||
break
|
||||
# wait for the processing to end
|
||||
time.sleep(0.1)
|
||||
|
||||
try:
|
||||
# retrieve slot from our #job semaphore and build
|
||||
with _job_semaphore:
|
||||
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
|
||||
finally:
|
||||
# register being done processing
|
||||
with _global_lock:
|
||||
_processing_files.remove(obj)
|
||||
|
||||
|
||||
if isinstance(self, FCompiler):
|
||||
objects_to_build = list(build.keys())
|
||||
f77_objects, other_objects = [], []
|
||||
for obj in objects:
|
||||
if obj in objects_to_build:
|
||||
src, ext = build[obj]
|
||||
if self.compiler_type=='absoft':
|
||||
obj = cyg2win32(obj)
|
||||
src = cyg2win32(src)
|
||||
if is_f_file(src) and not has_f90_header(src):
|
||||
f77_objects.append((obj, (src, ext)))
|
||||
else:
|
||||
other_objects.append((obj, (src, ext)))
|
||||
|
||||
# f77 objects can be built in parallel
|
||||
build_items = f77_objects
|
||||
# build f90 modules serial, module files are generated during
|
||||
# compilation and may be used by files later in the list so the
|
||||
# ordering is important
|
||||
for o in other_objects:
|
||||
single_compile(o)
|
||||
else:
|
||||
build_items = build.items()
|
||||
|
||||
if len(build) > 1 and jobs > 1:
|
||||
# build parallel
|
||||
import multiprocessing.pool
|
||||
pool = multiprocessing.pool.ThreadPool(jobs)
|
||||
pool.map(single_compile, build_items)
|
||||
pool.close()
|
||||
else:
|
||||
# build serial
|
||||
for o in build_items:
|
||||
single_compile(o)
|
||||
|
||||
# Return *all* object filenames, not just the ones we just built.
|
||||
return objects
|
||||
|
||||
replace_method(CCompiler, 'compile', CCompiler_compile)
|
||||
|
||||
def CCompiler_customize_cmd(self, cmd, ignore=()):
|
||||
"""
|
||||
Customize compiler using distutils command.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cmd : class instance
|
||||
An instance inheriting from `distutils.cmd.Command`.
|
||||
ignore : sequence of str, optional
|
||||
List of `CCompiler` commands (without ``'set_'``) that should not be
|
||||
altered. Strings that are checked for are:
|
||||
``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs',
|
||||
'rpath', 'link_objects')``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
|
||||
"""
|
||||
log.info('customize %s using %s' % (self.__class__.__name__,
|
||||
cmd.__class__.__name__))
|
||||
|
||||
if hasattr(self, 'compiler') and 'clang' in self.compiler[0]:
|
||||
# clang defaults to a non-strict floating error point model.
|
||||
# Since NumPy and most Python libs give warnings for these, override:
|
||||
self.compiler.append('-ffp-exception-behavior=strict')
|
||||
|
||||
def allow(attr):
|
||||
return getattr(cmd, attr, None) is not None and attr not in ignore
|
||||
|
||||
if allow('include_dirs'):
|
||||
self.set_include_dirs(cmd.include_dirs)
|
||||
if allow('define'):
|
||||
for (name, value) in cmd.define:
|
||||
self.define_macro(name, value)
|
||||
if allow('undef'):
|
||||
for macro in cmd.undef:
|
||||
self.undefine_macro(macro)
|
||||
if allow('libraries'):
|
||||
self.set_libraries(self.libraries + cmd.libraries)
|
||||
if allow('library_dirs'):
|
||||
self.set_library_dirs(self.library_dirs + cmd.library_dirs)
|
||||
if allow('rpath'):
|
||||
self.set_runtime_library_dirs(cmd.rpath)
|
||||
if allow('link_objects'):
|
||||
self.set_link_objects(cmd.link_objects)
|
||||
|
||||
replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd)
|
||||
|
||||
def _compiler_to_string(compiler):
|
||||
props = []
|
||||
mx = 0
|
||||
keys = list(compiler.executables.keys())
|
||||
for key in ['version', 'libraries', 'library_dirs',
|
||||
'object_switch', 'compile_switch',
|
||||
'include_dirs', 'define', 'undef', 'rpath', 'link_objects']:
|
||||
if key not in keys:
|
||||
keys.append(key)
|
||||
for key in keys:
|
||||
if hasattr(compiler, key):
|
||||
v = getattr(compiler, key)
|
||||
mx = max(mx, len(key))
|
||||
props.append((key, repr(v)))
|
||||
fmt = '%-' + repr(mx+1) + 's = %s'
|
||||
lines = [fmt % prop for prop in props]
|
||||
return '\n'.join(lines)
|
||||
|
||||
def CCompiler_show_customization(self):
|
||||
"""
|
||||
Print the compiler customizations to stdout.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
None
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
|
||||
Notes
|
||||
-----
|
||||
Printing is only done if the distutils log threshold is < 2.
|
||||
|
||||
"""
|
||||
try:
|
||||
self.get_version()
|
||||
except Exception:
|
||||
pass
|
||||
if log._global_log.threshold<2:
|
||||
print('*'*80)
|
||||
print(self.__class__)
|
||||
print(_compiler_to_string(self))
|
||||
print('*'*80)
|
||||
|
||||
replace_method(CCompiler, 'show_customization', CCompiler_show_customization)
|
||||
|
||||
def CCompiler_customize(self, dist, need_cxx=0):
|
||||
"""
|
||||
Do any platform-specific customization of a compiler instance.
|
||||
|
||||
This method calls `distutils.sysconfig.customize_compiler` for
|
||||
platform-specific customization, as well as optionally remove a flag
|
||||
to suppress spurious warnings in case C++ code is being compiled.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dist : object
|
||||
This parameter is not used for anything.
|
||||
need_cxx : bool, optional
|
||||
Whether or not C++ has to be compiled. If so (True), the
|
||||
``"-Wstrict-prototypes"`` option is removed to prevent spurious
|
||||
warnings. Default is False.
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
|
||||
Notes
|
||||
-----
|
||||
All the default options used by distutils can be extracted with::
|
||||
|
||||
from distutils import sysconfig
|
||||
sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',
|
||||
'CCSHARED', 'LDSHARED', 'SO')
|
||||
|
||||
"""
|
||||
# See FCompiler.customize for suggested usage.
|
||||
log.info('customize %s' % (self.__class__.__name__))
|
||||
customize_compiler(self)
|
||||
if need_cxx:
|
||||
# In general, distutils uses -Wstrict-prototypes, but this option is
|
||||
# not valid for C++ code, only for C. Remove it if it's there to
|
||||
# avoid a spurious warning on every compilation.
|
||||
try:
|
||||
self.compiler_so.remove('-Wstrict-prototypes')
|
||||
except (AttributeError, ValueError):
|
||||
pass
|
||||
|
||||
if hasattr(self, 'compiler') and 'cc' in self.compiler[0]:
|
||||
if not self.compiler_cxx:
|
||||
if self.compiler[0].startswith('gcc'):
|
||||
a, b = 'gcc', 'g++'
|
||||
else:
|
||||
a, b = 'cc', 'c++'
|
||||
self.compiler_cxx = [self.compiler[0].replace(a, b)]\
|
||||
+ self.compiler[1:]
|
||||
else:
|
||||
if hasattr(self, 'compiler'):
|
||||
log.warn("#### %s #######" % (self.compiler,))
|
||||
if not hasattr(self, 'compiler_cxx'):
|
||||
log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__)
|
||||
|
||||
|
||||
# check if compiler supports gcc style automatic dependencies
|
||||
# run on every extension so skip for known good compilers
|
||||
if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or
|
||||
'g++' in self.compiler[0] or
|
||||
'clang' in self.compiler[0]):
|
||||
self._auto_depends = True
|
||||
elif os.name == 'posix':
|
||||
import tempfile
|
||||
import shutil
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
try:
|
||||
fn = os.path.join(tmpdir, "file.c")
|
||||
with open(fn, "w") as f:
|
||||
f.write("int a;\n")
|
||||
self.compile([fn], output_dir=tmpdir,
|
||||
extra_preargs=['-MMD', '-MF', fn + '.d'])
|
||||
self._auto_depends = True
|
||||
except CompileError:
|
||||
self._auto_depends = False
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
return
|
||||
|
||||
replace_method(CCompiler, 'customize', CCompiler_customize)
|
||||
|
||||
def simple_version_match(pat=r'[-.\d]+', ignore='', start=''):
|
||||
"""
|
||||
Simple matching of version numbers, for use in CCompiler and FCompiler.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
pat : str, optional
|
||||
A regular expression matching version numbers.
|
||||
Default is ``r'[-.\\d]+'``.
|
||||
ignore : str, optional
|
||||
A regular expression matching patterns to skip.
|
||||
Default is ``''``, in which case nothing is skipped.
|
||||
start : str, optional
|
||||
A regular expression matching the start of where to start looking
|
||||
for version numbers.
|
||||
Default is ``''``, in which case searching is started at the
|
||||
beginning of the version string given to `matcher`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
matcher : callable
|
||||
A function that is appropriate to use as the ``.version_match``
|
||||
attribute of a `CCompiler` class. `matcher` takes a single parameter,
|
||||
a version string.
|
||||
|
||||
"""
|
||||
def matcher(self, version_string):
|
||||
# version string may appear in the second line, so getting rid
|
||||
# of new lines:
|
||||
version_string = version_string.replace('\n', ' ')
|
||||
pos = 0
|
||||
if start:
|
||||
m = re.match(start, version_string)
|
||||
if not m:
|
||||
return None
|
||||
pos = m.end()
|
||||
while True:
|
||||
m = re.search(pat, version_string[pos:])
|
||||
if not m:
|
||||
return None
|
||||
if ignore and re.match(ignore, m.group(0)):
|
||||
pos = m.end()
|
||||
continue
|
||||
break
|
||||
return m.group(0)
|
||||
return matcher
|
||||
|
||||
def CCompiler_get_version(self, force=False, ok_status=[0]):
|
||||
"""
|
||||
Return compiler version, or None if compiler is not available.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
force : bool, optional
|
||||
If True, force a new determination of the version, even if the
|
||||
compiler already has a version attribute. Default is False.
|
||||
ok_status : list of int, optional
|
||||
The list of status values returned by the version look-up process
|
||||
for which a version string is returned. If the status value is not
|
||||
in `ok_status`, None is returned. Default is ``[0]``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
version : str or None
|
||||
Version string, in the format of `distutils.version.LooseVersion`.
|
||||
|
||||
"""
|
||||
if not force and hasattr(self, 'version'):
|
||||
return self.version
|
||||
self.find_executables()
|
||||
try:
|
||||
version_cmd = self.version_cmd
|
||||
except AttributeError:
|
||||
return None
|
||||
if not version_cmd or not version_cmd[0]:
|
||||
return None
|
||||
try:
|
||||
matcher = self.version_match
|
||||
except AttributeError:
|
||||
try:
|
||||
pat = self.version_pattern
|
||||
except AttributeError:
|
||||
return None
|
||||
def matcher(version_string):
|
||||
m = re.match(pat, version_string)
|
||||
if not m:
|
||||
return None
|
||||
version = m.group('version')
|
||||
return version
|
||||
|
||||
try:
|
||||
output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
output = exc.output
|
||||
status = exc.returncode
|
||||
except OSError:
|
||||
# match the historical returns for a parent
|
||||
# exception class caught by exec_command()
|
||||
status = 127
|
||||
output = b''
|
||||
else:
|
||||
# output isn't actually a filepath but we do this
|
||||
# for now to match previous distutils behavior
|
||||
output = filepath_from_subprocess_output(output)
|
||||
status = 0
|
||||
|
||||
version = None
|
||||
if status in ok_status:
|
||||
version = matcher(output)
|
||||
if version:
|
||||
version = LooseVersion(version)
|
||||
self.version = version
|
||||
return version
|
||||
|
||||
replace_method(CCompiler, 'get_version', CCompiler_get_version)
|
||||
|
||||
def CCompiler_cxx_compiler(self):
|
||||
"""
|
||||
Return the C++ compiler.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
None
|
||||
|
||||
Returns
|
||||
-------
|
||||
cxx : class instance
|
||||
The C++ compiler, as a `CCompiler` instance.
|
||||
|
||||
"""
|
||||
if self.compiler_type in ('msvc', 'intelw', 'intelemw'):
|
||||
return self
|
||||
|
||||
cxx = copy(self)
|
||||
cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:]
|
||||
if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]:
|
||||
# AIX needs the ld_so_aix script included with Python
|
||||
cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \
|
||||
+ cxx.linker_so[2:]
|
||||
else:
|
||||
cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:]
|
||||
return cxx
|
||||
|
||||
replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler)
|
||||
|
||||
compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler',
|
||||
"Intel C Compiler for 32-bit applications")
|
||||
compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler',
|
||||
"Intel C Itanium Compiler for Itanium-based applications")
|
||||
compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler',
|
||||
"Intel C Compiler for 64-bit applications")
|
||||
compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW',
|
||||
"Intel C Compiler for 32-bit applications on Windows")
|
||||
compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW',
|
||||
"Intel C Compiler for 64-bit applications on Windows")
|
||||
compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler',
|
||||
"PathScale Compiler for SiCortex-based applications")
|
||||
ccompiler._default_compilers += (('linux.*', 'intel'),
|
||||
('linux.*', 'intele'),
|
||||
('linux.*', 'intelem'),
|
||||
('linux.*', 'pathcc'),
|
||||
('nt', 'intelw'),
|
||||
('nt', 'intelemw'))
|
||||
|
||||
if sys.platform == 'win32':
|
||||
compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler',
|
||||
"Mingw32 port of GNU C Compiler for Win32"\
|
||||
"(for MSC built Python)")
|
||||
if mingw32():
|
||||
# On windows platforms, we want to default to mingw32 (gcc)
|
||||
# because msvc can't build blitz stuff.
|
||||
log.info('Setting mingw32 as default compiler for nt.')
|
||||
ccompiler._default_compilers = (('nt', 'mingw32'),) \
|
||||
+ ccompiler._default_compilers
|
||||
|
||||
|
||||
_distutils_new_compiler = new_compiler
|
||||
def new_compiler (plat=None,
|
||||
compiler=None,
|
||||
verbose=None,
|
||||
dry_run=0,
|
||||
force=0):
|
||||
# Try first C compilers from numpy.distutils.
|
||||
if verbose is None:
|
||||
verbose = log.get_threshold() <= log.INFO
|
||||
if plat is None:
|
||||
plat = os.name
|
||||
try:
|
||||
if compiler is None:
|
||||
compiler = get_default_compiler(plat)
|
||||
(module_name, class_name, long_description) = compiler_class[compiler]
|
||||
except KeyError:
|
||||
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
|
||||
if compiler is not None:
|
||||
msg = msg + " with '%s' compiler" % compiler
|
||||
raise DistutilsPlatformError(msg)
|
||||
module_name = "numpy.distutils." + module_name
|
||||
try:
|
||||
__import__ (module_name)
|
||||
except ImportError as e:
|
||||
msg = str(e)
|
||||
log.info('%s in numpy.distutils; trying from distutils',
|
||||
str(msg))
|
||||
module_name = module_name[6:]
|
||||
try:
|
||||
__import__(module_name)
|
||||
except ImportError as e:
|
||||
msg = str(e)
|
||||
raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \
|
||||
module_name)
|
||||
try:
|
||||
module = sys.modules[module_name]
|
||||
klass = vars(module)[class_name]
|
||||
except KeyError:
|
||||
raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " +
|
||||
"in module '%s'") % (class_name, module_name))
|
||||
compiler = klass(None, dry_run, force)
|
||||
compiler.verbose = verbose
|
||||
log.debug('new_compiler returns %s' % (klass))
|
||||
return compiler
|
||||
|
||||
ccompiler.new_compiler = new_compiler
|
||||
|
||||
_distutils_gen_lib_options = gen_lib_options
|
||||
def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):
|
||||
# the version of this function provided by CPython allows the following
|
||||
# to return lists, which are unpacked automatically:
|
||||
# - compiler.runtime_library_dir_option
|
||||
# our version extends the behavior to:
|
||||
# - compiler.library_dir_option
|
||||
# - compiler.library_option
|
||||
# - compiler.find_library_file
|
||||
r = _distutils_gen_lib_options(compiler, library_dirs,
|
||||
runtime_library_dirs, libraries)
|
||||
lib_opts = []
|
||||
for i in r:
|
||||
if is_sequence(i):
|
||||
lib_opts.extend(list(i))
|
||||
else:
|
||||
lib_opts.append(i)
|
||||
return lib_opts
|
||||
ccompiler.gen_lib_options = gen_lib_options
|
||||
|
||||
# Also fix up the various compiler modules, which do
|
||||
# from distutils.ccompiler import gen_lib_options
|
||||
# Don't bother with mwerks, as we don't support Classic Mac.
|
||||
for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']:
|
||||
_m = sys.modules.get('distutils.' + _cc + 'compiler')
|
||||
if _m is not None:
|
||||
setattr(_m, 'gen_lib_options', gen_lib_options)
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,25 +0,0 @@
|
||||
#ifdef _MSC_VER
|
||||
#include <Intrin.h>
|
||||
#endif
|
||||
#include <arm_neon.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
float32x4_t v1 = vdupq_n_f32(1.0f), v2 = vdupq_n_f32(2.0f);
|
||||
/* MAXMIN */
|
||||
int ret = (int)vgetq_lane_f32(vmaxnmq_f32(v1, v2), 0);
|
||||
ret += (int)vgetq_lane_f32(vminnmq_f32(v1, v2), 0);
|
||||
/* ROUNDING */
|
||||
ret += (int)vgetq_lane_f32(vrndq_f32(v1), 0);
|
||||
#ifdef __aarch64__
|
||||
{
|
||||
float64x2_t vd1 = vdupq_n_f64(1.0), vd2 = vdupq_n_f64(2.0);
|
||||
/* MAXMIN */
|
||||
ret += (int)vgetq_lane_f64(vmaxnmq_f64(vd1, vd2), 0);
|
||||
ret += (int)vgetq_lane_f64(vminnmq_f64(vd1, vd2), 0);
|
||||
/* ROUNDING */
|
||||
ret += (int)vgetq_lane_f64(vrndq_f64(vd1), 0);
|
||||
}
|
||||
#endif
|
||||
return ret;
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
#ifdef _MSC_VER
|
||||
#include <Intrin.h>
|
||||
#endif
|
||||
#include <arm_neon.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
uint8x16_t v1 = vdupq_n_u8((unsigned char)1), v2 = vdupq_n_u8((unsigned char)2);
|
||||
uint32x4_t va = vdupq_n_u32(3);
|
||||
int ret = (int)vgetq_lane_u32(vdotq_u32(va, v1, v2), 0);
|
||||
#ifdef __aarch64__
|
||||
ret += (int)vgetq_lane_u32(vdotq_laneq_u32(va, v1, v2, 0), 0);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
@@ -1,17 +0,0 @@
|
||||
#ifdef _MSC_VER
|
||||
#include <Intrin.h>
|
||||
#endif
|
||||
#include <arm_neon.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
float16x8_t vhp = vdupq_n_f16((float16_t)1);
|
||||
float16x4_t vlhp = vdup_n_f16((float16_t)1);
|
||||
float32x4_t vf = vdupq_n_f32(1.0f);
|
||||
float32x2_t vlf = vdup_n_f32(1.0f);
|
||||
|
||||
int ret = (int)vget_lane_f32(vfmlal_low_u32(vlf, vlhp, vlhp), 0);
|
||||
ret += (int)vgetq_lane_f32(vfmlslq_high_u32(vf, vhp, vhp), 0);
|
||||
|
||||
return ret;
|
||||
}
|
@@ -1,14 +0,0 @@
|
||||
#ifdef _MSC_VER
|
||||
#include <Intrin.h>
|
||||
#endif
|
||||
#include <arm_neon.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
float16x8_t vhp = vdupq_n_f16((float16_t)-1);
|
||||
float16x4_t vlhp = vdup_n_f16((float16_t)-1);
|
||||
|
||||
int ret = (int)vgetq_lane_f16(vabdq_f16(vhp, vhp), 0);
|
||||
ret += (int)vget_lane_f16(vabd_f16(vlhp, vlhp), 0);
|
||||
return ret;
|
||||
}
|
@@ -1,20 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __AVX__
|
||||
#error "HOST/ARCH doesn't support AVX"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m256 a = _mm256_add_ps(_mm256_loadu_ps((const float*)argv[argc-1]), _mm256_loadu_ps((const float*)argv[1]));
|
||||
return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
|
||||
}
|
@@ -1,20 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __AVX2__
|
||||
#error "HOST/ARCH doesn't support AVX2"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m256i a = _mm256_abs_epi16(_mm256_loadu_si256((const __m256i*)argv[argc-1]));
|
||||
return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
|
||||
}
|
@@ -1,22 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __AVX512VNNI__
|
||||
#error "HOST/ARCH doesn't support CascadeLake AVX512 features"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
/* VNNI */
|
||||
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
|
||||
a = _mm512_dpbusd_epi32(a, _mm512_setzero_si512(), a);
|
||||
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
||||
}
|
@@ -1,24 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#if !defined(__AVX512VBMI__) || !defined(__AVX512IFMA__)
|
||||
#error "HOST/ARCH doesn't support CannonLake AVX512 features"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
|
||||
/* IFMA */
|
||||
a = _mm512_madd52hi_epu64(a, a, _mm512_setzero_si512());
|
||||
/* VMBI */
|
||||
a = _mm512_permutex2var_epi8(a, _mm512_setzero_si512(), a);
|
||||
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
||||
}
|
@@ -1,26 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#if !defined(__AVX512VPOPCNTDQ__) || !defined(__AVX512BITALG__) || !defined(__AVX512VPOPCNTDQ__)
|
||||
#error "HOST/ARCH doesn't support IceLake AVX512 features"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
|
||||
/* VBMI2 */
|
||||
a = _mm512_shrdv_epi64(a, a, _mm512_setzero_si512());
|
||||
/* BITLAG */
|
||||
a = _mm512_popcnt_epi8(a);
|
||||
/* VPOPCNTDQ */
|
||||
a = _mm512_popcnt_epi64(a);
|
||||
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
||||
}
|
@@ -1,25 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#if !defined(__AVX512ER__) || !defined(__AVX512PF__)
|
||||
#error "HOST/ARCH doesn't support Knights Landing AVX512 features"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int base[128];
|
||||
__m512d ad = _mm512_loadu_pd((const __m512d*)argv[argc-1]);
|
||||
/* ER */
|
||||
__m512i a = _mm512_castpd_si512(_mm512_exp2a23_pd(ad));
|
||||
/* PF */
|
||||
_mm512_mask_prefetch_i64scatter_pd(base, _mm512_cmpeq_epi64_mask(a, a), a, 1, _MM_HINT_T1);
|
||||
return base[0];
|
||||
}
|
@@ -1,30 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#if !defined(__AVX5124FMAPS__) || !defined(__AVX5124VNNIW__) || !defined(__AVX512VPOPCNTDQ__)
|
||||
#error "HOST/ARCH doesn't support Knights Mill AVX512 features"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
|
||||
__m512 b = _mm512_loadu_ps((const __m512*)argv[argc-2]);
|
||||
|
||||
/* 4FMAPS */
|
||||
b = _mm512_4fmadd_ps(b, b, b, b, b, NULL);
|
||||
/* 4VNNIW */
|
||||
a = _mm512_4dpwssd_epi32(a, a, a, a, a, NULL);
|
||||
/* VPOPCNTDQ */
|
||||
a = _mm512_popcnt_epi64(a);
|
||||
|
||||
a = _mm512_add_epi32(a, _mm512_castps_si512(b));
|
||||
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
||||
}
|
@@ -1,26 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#if !defined(__AVX512VL__) || !defined(__AVX512BW__) || !defined(__AVX512DQ__)
|
||||
#error "HOST/ARCH doesn't support SkyLake AVX512 features"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m512i aa = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
|
||||
/* VL */
|
||||
__m256i a = _mm256_abs_epi64(_mm512_extracti64x4_epi64(aa, 1));
|
||||
/* DQ */
|
||||
__m512i b = _mm512_broadcast_i32x8(a);
|
||||
/* BW */
|
||||
b = _mm512_abs_epi16(b);
|
||||
return _mm_cvtsi128_si32(_mm512_castsi512_si128(b));
|
||||
}
|
@@ -1,20 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __AVX512CD__
|
||||
#error "HOST/ARCH doesn't support AVX512CD"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m512i a = _mm512_lzcnt_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
|
||||
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
||||
}
|
@@ -1,20 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __AVX512F__
|
||||
#error "HOST/ARCH doesn't support AVX512F"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m512i a = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
|
||||
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
||||
}
|
@@ -1,22 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __F16C__
|
||||
#error "HOST/ARCH doesn't support F16C"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <emmintrin.h>
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m128 a = _mm_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-1]));
|
||||
__m256 a8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-2]));
|
||||
return (int)(_mm_cvtss_f32(a) + _mm_cvtss_f32(_mm256_castps256_ps128(a8)));
|
||||
}
|
@@ -1,22 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#if !defined(__FMA__) && !defined(__AVX2__)
|
||||
#error "HOST/ARCH doesn't support FMA3"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <xmmintrin.h>
|
||||
#include <immintrin.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m256 a = _mm256_loadu_ps((const float*)argv[argc-1]);
|
||||
a = _mm256_fmadd_ps(a, a, a);
|
||||
return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
|
||||
}
|
@@ -1,13 +0,0 @@
|
||||
#include <immintrin.h>
|
||||
#ifdef _MSC_VER
|
||||
#include <ammintrin.h>
|
||||
#else
|
||||
#include <x86intrin.h>
|
||||
#endif
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__m256 a = _mm256_loadu_ps((const float*)argv[argc-1]);
|
||||
a = _mm256_macc_ps(a, a, a);
|
||||
return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
#ifdef _MSC_VER
|
||||
#include <Intrin.h>
|
||||
#endif
|
||||
#include <arm_neon.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
float32x4_t v1 = vdupq_n_f32(1.0f), v2 = vdupq_n_f32(2.0f);
|
||||
int ret = (int)vgetq_lane_f32(vmulq_f32(v1, v2), 0);
|
||||
#ifdef __aarch64__
|
||||
float64x2_t vd1 = vdupq_n_f64(1.0), vd2 = vdupq_n_f64(2.0);
|
||||
ret += (int)vgetq_lane_f64(vmulq_f64(vd1, vd2), 0);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
@@ -1,11 +0,0 @@
|
||||
#ifdef _MSC_VER
|
||||
#include <Intrin.h>
|
||||
#endif
|
||||
#include <arm_neon.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
short z4[] = {0, 0, 0, 0, 0, 0, 0, 0};
|
||||
float32x4_t v_z4 = vcvt_f32_f16((float16x4_t)vld1_s16((const short*)z4));
|
||||
return (int)vgetq_lane_f32(v_z4, 0);
|
||||
}
|
@@ -1,19 +0,0 @@
|
||||
#ifdef _MSC_VER
|
||||
#include <Intrin.h>
|
||||
#endif
|
||||
#include <arm_neon.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
float32x4_t v1 = vdupq_n_f32(1.0f);
|
||||
float32x4_t v2 = vdupq_n_f32(2.0f);
|
||||
float32x4_t v3 = vdupq_n_f32(3.0f);
|
||||
int ret = (int)vgetq_lane_f32(vfmaq_f32(v1, v2, v3), 0);
|
||||
#ifdef __aarch64__
|
||||
float64x2_t vd1 = vdupq_n_f64(1.0);
|
||||
float64x2_t vd2 = vdupq_n_f64(2.0);
|
||||
float64x2_t vd3 = vdupq_n_f64(3.0);
|
||||
ret += (int)vgetq_lane_f64(vfmaq_f64(vd1, vd2, vd3), 0);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
@@ -1,32 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env vr `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#if !defined(__SSE4_2__) && !defined(__POPCNT__)
|
||||
#error "HOST/ARCH doesn't support POPCNT"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <nmmintrin.h>
|
||||
#else
|
||||
#include <popcntintrin.h>
|
||||
#endif
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
// To make sure popcnt instructions are generated
|
||||
// and been tested against the assembler
|
||||
unsigned long long a = *((unsigned long long*)argv[argc-1]);
|
||||
unsigned int b = *((unsigned int*)argv[argc-2]);
|
||||
|
||||
#if defined(_M_X64) || defined(__x86_64__)
|
||||
a = _mm_popcnt_u64(a);
|
||||
#endif
|
||||
b = _mm_popcnt_u32(b);
|
||||
return (int)a + b;
|
||||
}
|
@@ -1,20 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __SSE__
|
||||
#error "HOST/ARCH doesn't support SSE"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <xmmintrin.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
__m128 a = _mm_add_ps(_mm_setzero_ps(), _mm_setzero_ps());
|
||||
return (int)_mm_cvtss_f32(a);
|
||||
}
|
@@ -1,20 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __SSE2__
|
||||
#error "HOST/ARCH doesn't support SSE2"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <emmintrin.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
__m128i a = _mm_add_epi16(_mm_setzero_si128(), _mm_setzero_si128());
|
||||
return _mm_cvtsi128_si32(a);
|
||||
}
|
@@ -1,20 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __SSE3__
|
||||
#error "HOST/ARCH doesn't support SSE3"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <pmmintrin.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
__m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps());
|
||||
return (int)_mm_cvtss_f32(a);
|
||||
}
|
@@ -1,20 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __SSE4_1__
|
||||
#error "HOST/ARCH doesn't support SSE41"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <smmintrin.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
__m128 a = _mm_floor_ps(_mm_setzero_ps());
|
||||
return (int)_mm_cvtss_f32(a);
|
||||
}
|
@@ -1,20 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __SSE4_2__
|
||||
#error "HOST/ARCH doesn't support SSE42"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <smmintrin.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
__m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps());
|
||||
return (int)_mm_cvtss_f32(a);
|
||||
}
|
@@ -1,20 +0,0 @@
|
||||
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
||||
/*
|
||||
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
||||
* whether or not the build options for those features are specified.
|
||||
* Therefore, we must test #definitions of CPU features when option native/host
|
||||
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
||||
* the test will be broken and leads to enable all possible features.
|
||||
*/
|
||||
#ifndef __SSSE3__
|
||||
#error "HOST/ARCH doesn't support SSSE3"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <tmmintrin.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
__m128i a = _mm_hadd_epi16(_mm_setzero_si128(), _mm_setzero_si128());
|
||||
return (int)_mm_cvtsi128_si32(a);
|
||||
}
|
@@ -1,21 +0,0 @@
|
||||
#ifndef __VSX__
|
||||
#error "VSX is not supported"
|
||||
#endif
|
||||
#include <altivec.h>
|
||||
|
||||
#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__))
|
||||
#define vsx_ld vec_vsx_ld
|
||||
#define vsx_st vec_vsx_st
|
||||
#else
|
||||
#define vsx_ld vec_xl
|
||||
#define vsx_st vec_xst
|
||||
#endif
|
||||
|
||||
int main(void)
|
||||
{
|
||||
unsigned int zout[4];
|
||||
unsigned int z4[] = {0, 0, 0, 0};
|
||||
__vector unsigned int v_z4 = vsx_ld(0, z4);
|
||||
vsx_st(v_z4, 0, zout);
|
||||
return zout[0];
|
||||
}
|
@@ -1,13 +0,0 @@
|
||||
#ifndef __VSX__
|
||||
#error "VSX is not supported"
|
||||
#endif
|
||||
#include <altivec.h>
|
||||
|
||||
typedef __vector unsigned long long v_uint64x2;
|
||||
|
||||
int main(void)
|
||||
{
|
||||
v_uint64x2 z2 = (v_uint64x2){0, 0};
|
||||
z2 = (v_uint64x2)vec_cmpeq(z2, z2);
|
||||
return (int)vec_extract(z2, 0);
|
||||
}
|
@@ -1,13 +0,0 @@
|
||||
#ifndef __VSX__
|
||||
#error "VSX is not supported"
|
||||
#endif
|
||||
#include <altivec.h>
|
||||
|
||||
typedef __vector unsigned int v_uint32x4;
|
||||
|
||||
int main(void)
|
||||
{
|
||||
v_uint32x4 z4 = (v_uint32x4){0, 0, 0, 0};
|
||||
z4 = vec_absd(z4, z4);
|
||||
return (int)vec_extract(z4, 0);
|
||||
}
|
@@ -1,12 +0,0 @@
|
||||
#include <immintrin.h>
|
||||
#ifdef _MSC_VER
|
||||
#include <ammintrin.h>
|
||||
#else
|
||||
#include <x86intrin.h>
|
||||
#endif
|
||||
|
||||
int main(void)
|
||||
{
|
||||
__m128i a = _mm_comge_epu32(_mm_setzero_si128(), _mm_setzero_si128());
|
||||
return _mm_cvtsi128_si32(a);
|
||||
}
|
@@ -1,18 +0,0 @@
|
||||
#include <immintrin.h>
|
||||
/**
|
||||
* Test BW mask operations due to:
|
||||
* - MSVC has supported it since vs2019 see,
|
||||
* https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html
|
||||
* - Clang >= v8.0
|
||||
* - GCC >= v7.1
|
||||
*/
|
||||
int main(void)
|
||||
{
|
||||
__mmask64 m64 = _mm512_cmpeq_epi8_mask(_mm512_set1_epi8((char)1), _mm512_set1_epi8((char)1));
|
||||
m64 = _kor_mask64(m64, m64);
|
||||
m64 = _kxor_mask64(m64, m64);
|
||||
m64 = _cvtu64_mask64(_cvtmask64_u64(m64));
|
||||
m64 = _mm512_kunpackd(m64, m64);
|
||||
m64 = (__mmask64)_mm512_kunpackw((__mmask32)m64, (__mmask32)m64);
|
||||
return (int)_cvtmask64_u64(m64);
|
||||
}
|
@@ -1,16 +0,0 @@
|
||||
#include <immintrin.h>
|
||||
/**
|
||||
* Test DQ mask operations due to:
|
||||
* - MSVC has supported it since vs2019 see,
|
||||
* https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html
|
||||
* - Clang >= v8.0
|
||||
* - GCC >= v7.1
|
||||
*/
|
||||
int main(void)
|
||||
{
|
||||
__mmask8 m8 = _mm512_cmpeq_epi64_mask(_mm512_set1_epi64(1), _mm512_set1_epi64(1));
|
||||
m8 = _kor_mask8(m8, m8);
|
||||
m8 = _kxor_mask8(m8, m8);
|
||||
m8 = _cvtu32_mask8(_cvtmask8_u32(m8));
|
||||
return (int)_cvtmask8_u32(m8);
|
||||
}
|
@@ -1,41 +0,0 @@
|
||||
#include <immintrin.h>
|
||||
/**
|
||||
* The following intrinsics don't have direct native support but compilers
|
||||
* tend to emulate them.
|
||||
* They're usually supported by gcc >= 7.1, clang >= 4 and icc >= 19
|
||||
*/
|
||||
int main(void)
|
||||
{
|
||||
__m512 one_ps = _mm512_set1_ps(1.0f);
|
||||
__m512d one_pd = _mm512_set1_pd(1.0);
|
||||
__m512i one_i64 = _mm512_set1_epi64(1);
|
||||
// add
|
||||
float sum_ps = _mm512_reduce_add_ps(one_ps);
|
||||
double sum_pd = _mm512_reduce_add_pd(one_pd);
|
||||
int sum_int = (int)_mm512_reduce_add_epi64(one_i64);
|
||||
sum_int += (int)_mm512_reduce_add_epi32(one_i64);
|
||||
// mul
|
||||
sum_ps += _mm512_reduce_mul_ps(one_ps);
|
||||
sum_pd += _mm512_reduce_mul_pd(one_pd);
|
||||
sum_int += (int)_mm512_reduce_mul_epi64(one_i64);
|
||||
sum_int += (int)_mm512_reduce_mul_epi32(one_i64);
|
||||
// min
|
||||
sum_ps += _mm512_reduce_min_ps(one_ps);
|
||||
sum_pd += _mm512_reduce_min_pd(one_pd);
|
||||
sum_int += (int)_mm512_reduce_min_epi32(one_i64);
|
||||
sum_int += (int)_mm512_reduce_min_epu32(one_i64);
|
||||
sum_int += (int)_mm512_reduce_min_epi64(one_i64);
|
||||
// max
|
||||
sum_ps += _mm512_reduce_max_ps(one_ps);
|
||||
sum_pd += _mm512_reduce_max_pd(one_pd);
|
||||
sum_int += (int)_mm512_reduce_max_epi32(one_i64);
|
||||
sum_int += (int)_mm512_reduce_max_epu32(one_i64);
|
||||
sum_int += (int)_mm512_reduce_max_epi64(one_i64);
|
||||
// and
|
||||
sum_int += (int)_mm512_reduce_and_epi32(one_i64);
|
||||
sum_int += (int)_mm512_reduce_and_epi64(one_i64);
|
||||
// or
|
||||
sum_int += (int)_mm512_reduce_or_epi32(one_i64);
|
||||
sum_int += (int)_mm512_reduce_or_epi64(one_i64);
|
||||
return (int)sum_ps + (int)sum_pd + sum_int;
|
||||
}
|
@@ -1,36 +0,0 @@
|
||||
/**
|
||||
* Testing ASM VSX register number fixer '%x<n>'
|
||||
*
|
||||
* old versions of CLANG doesn't support %x<n> in the inline asm template
|
||||
* which fixes register number when using any of the register constraints wa, wd, wf.
|
||||
*
|
||||
* xref:
|
||||
* - https://bugs.llvm.org/show_bug.cgi?id=31837
|
||||
* - https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html
|
||||
*/
|
||||
#ifndef __VSX__
|
||||
#error "VSX is not supported"
|
||||
#endif
|
||||
#include <altivec.h>
|
||||
|
||||
#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__))
|
||||
#define vsx_ld vec_vsx_ld
|
||||
#define vsx_st vec_vsx_st
|
||||
#else
|
||||
#define vsx_ld vec_xl
|
||||
#define vsx_st vec_xst
|
||||
#endif
|
||||
|
||||
int main(void)
|
||||
{
|
||||
float z4[] = {0, 0, 0, 0};
|
||||
signed int zout[] = {0, 0, 0, 0};
|
||||
|
||||
__vector float vz4 = vsx_ld(0, z4);
|
||||
__vector signed int asm_ret = vsx_ld(0, zout);
|
||||
|
||||
__asm__ ("xvcvspsxws %x0,%x1" : "=wa" (vz4) : "wa" (asm_ret));
|
||||
|
||||
vsx_st(asm_ret, 0, zout);
|
||||
return zout[0];
|
||||
}
|
@@ -1 +0,0 @@
|
||||
int test_flags;
|
@@ -1,41 +0,0 @@
|
||||
"""distutils.command
|
||||
|
||||
Package containing implementation of all the standard Distutils
|
||||
commands.
|
||||
|
||||
"""
|
||||
def test_na_writable_attributes_deletion():
|
||||
a = np.NA(2)
|
||||
attr = ['payload', 'dtype']
|
||||
for s in attr:
|
||||
assert_raises(AttributeError, delattr, a, s)
|
||||
|
||||
|
||||
__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $"
|
||||
|
||||
distutils_all = [ #'build_py',
|
||||
'clean',
|
||||
'install_clib',
|
||||
'install_scripts',
|
||||
'bdist',
|
||||
'bdist_dumb',
|
||||
'bdist_wininst',
|
||||
]
|
||||
|
||||
__import__('distutils.command', globals(), locals(), distutils_all)
|
||||
|
||||
__all__ = ['build',
|
||||
'config_compiler',
|
||||
'config',
|
||||
'build_src',
|
||||
'build_py',
|
||||
'build_ext',
|
||||
'build_clib',
|
||||
'build_scripts',
|
||||
'install',
|
||||
'install_data',
|
||||
'install_headers',
|
||||
'install_lib',
|
||||
'bdist_rpm',
|
||||
'sdist',
|
||||
] + distutils_all
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,148 +0,0 @@
|
||||
"""This module implements additional tests ala autoconf which can be useful.
|
||||
|
||||
"""
|
||||
import textwrap
|
||||
|
||||
# We put them here since they could be easily reused outside numpy.distutils
|
||||
|
||||
def check_inline(cmd):
|
||||
"""Return the inline identifier (may be empty)."""
|
||||
cmd._check_compiler()
|
||||
body = textwrap.dedent("""
|
||||
#ifndef __cplusplus
|
||||
static %(inline)s int static_func (void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
%(inline)s int nostatic_func (void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif""")
|
||||
|
||||
for kw in ['inline', '__inline__', '__inline']:
|
||||
st = cmd.try_compile(body % {'inline': kw}, None, None)
|
||||
if st:
|
||||
return kw
|
||||
|
||||
return ''
|
||||
|
||||
|
||||
def check_restrict(cmd):
|
||||
"""Return the restrict identifier (may be empty)."""
|
||||
cmd._check_compiler()
|
||||
body = textwrap.dedent("""
|
||||
static int static_func (char * %(restrict)s a)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
""")
|
||||
|
||||
for kw in ['restrict', '__restrict__', '__restrict']:
|
||||
st = cmd.try_compile(body % {'restrict': kw}, None, None)
|
||||
if st:
|
||||
return kw
|
||||
|
||||
return ''
|
||||
|
||||
|
||||
def check_compiler_gcc(cmd):
|
||||
"""Check if the compiler is GCC."""
|
||||
|
||||
cmd._check_compiler()
|
||||
body = textwrap.dedent("""
|
||||
int
|
||||
main()
|
||||
{
|
||||
#if (! defined __GNUC__)
|
||||
#error gcc required
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
""")
|
||||
return cmd.try_compile(body, None, None)
|
||||
|
||||
|
||||
def check_gcc_version_at_least(cmd, major, minor=0, patchlevel=0):
|
||||
"""
|
||||
Check that the gcc version is at least the specified version."""
|
||||
|
||||
cmd._check_compiler()
|
||||
version = '.'.join([str(major), str(minor), str(patchlevel)])
|
||||
body = textwrap.dedent("""
|
||||
int
|
||||
main()
|
||||
{
|
||||
#if (! defined __GNUC__) || (__GNUC__ < %(major)d) || \\
|
||||
(__GNUC_MINOR__ < %(minor)d) || \\
|
||||
(__GNUC_PATCHLEVEL__ < %(patchlevel)d)
|
||||
#error gcc >= %(version)s required
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
""")
|
||||
kw = {'version': version, 'major': major, 'minor': minor,
|
||||
'patchlevel': patchlevel}
|
||||
|
||||
return cmd.try_compile(body % kw, None, None)
|
||||
|
||||
|
||||
def check_gcc_function_attribute(cmd, attribute, name):
|
||||
"""Return True if the given function attribute is supported."""
|
||||
cmd._check_compiler()
|
||||
body = textwrap.dedent("""
|
||||
#pragma GCC diagnostic error "-Wattributes"
|
||||
#pragma clang diagnostic error "-Wattributes"
|
||||
|
||||
int %s %s(void* unused)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
""") % (attribute, name)
|
||||
return cmd.try_compile(body, None, None) != 0
|
||||
|
||||
|
||||
def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code,
|
||||
include):
|
||||
"""Return True if the given function attribute is supported with
|
||||
intrinsics."""
|
||||
cmd._check_compiler()
|
||||
body = textwrap.dedent("""
|
||||
#include<%s>
|
||||
int %s %s(void)
|
||||
{
|
||||
%s;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
""") % (include, attribute, name, code)
|
||||
return cmd.try_compile(body, None, None) != 0
|
||||
|
||||
|
||||
def check_gcc_variable_attribute(cmd, attribute):
|
||||
"""Return True if the given variable attribute is supported."""
|
||||
cmd._check_compiler()
|
||||
body = textwrap.dedent("""
|
||||
#pragma GCC diagnostic error "-Wattributes"
|
||||
#pragma clang diagnostic error "-Wattributes"
|
||||
|
||||
int %s foo;
|
||||
|
||||
int
|
||||
main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
""") % (attribute, )
|
||||
return cmd.try_compile(body, None, None) != 0
|
@@ -1,22 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
if 'setuptools' in sys.modules:
|
||||
from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm
|
||||
else:
|
||||
from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm
|
||||
|
||||
class bdist_rpm(old_bdist_rpm):
|
||||
|
||||
def _make_spec_file(self):
|
||||
spec_file = old_bdist_rpm._make_spec_file(self)
|
||||
|
||||
# Replace hardcoded setup.py script name
|
||||
# with the real setup script name.
|
||||
setup_py = os.path.basename(sys.argv[0])
|
||||
if setup_py == 'setup.py':
|
||||
return spec_file
|
||||
new_spec_file = []
|
||||
for line in spec_file:
|
||||
line = line.replace('setup.py', setup_py)
|
||||
new_spec_file.append(line)
|
||||
return new_spec_file
|
@@ -1,61 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
from distutils.command.build import build as old_build
|
||||
from distutils.util import get_platform
|
||||
from numpy.distutils.command.config_compiler import show_fortran_compilers
|
||||
|
||||
class build(old_build):
|
||||
|
||||
sub_commands = [('config_cc', lambda *args: True),
|
||||
('config_fc', lambda *args: True),
|
||||
('build_src', old_build.has_ext_modules),
|
||||
] + old_build.sub_commands
|
||||
|
||||
user_options = old_build.user_options + [
|
||||
('fcompiler=', None,
|
||||
"specify the Fortran compiler type"),
|
||||
('warn-error', None,
|
||||
"turn all warnings into errors (-Werror)"),
|
||||
('cpu-baseline=', None,
|
||||
"specify a list of enabled baseline CPU optimizations"),
|
||||
('cpu-dispatch=', None,
|
||||
"specify a list of dispatched CPU optimizations"),
|
||||
('disable-optimization', None,
|
||||
"disable CPU optimized code(dispatch,simd,fast...)"),
|
||||
('simd-test=', None,
|
||||
"specify a list of CPU optimizations to be tested against NumPy SIMD interface"),
|
||||
]
|
||||
|
||||
help_options = old_build.help_options + [
|
||||
('help-fcompiler', None, "list available Fortran compilers",
|
||||
show_fortran_compilers),
|
||||
]
|
||||
|
||||
def initialize_options(self):
|
||||
old_build.initialize_options(self)
|
||||
self.fcompiler = None
|
||||
self.warn_error = False
|
||||
self.cpu_baseline = "min"
|
||||
self.cpu_dispatch = "max -xop -fma4" # drop AMD legacy features by default
|
||||
self.disable_optimization = False
|
||||
"""
|
||||
the '_simd' module is a very large. Adding more dispatched features
|
||||
will increase binary size and compile time. By default we minimize
|
||||
the targeted features to those most commonly used by the NumPy SIMD interface(NPYV),
|
||||
NOTE: any specified features will be ignored if they're:
|
||||
- part of the baseline(--cpu-baseline)
|
||||
- not part of dispatch-able features(--cpu-dispatch)
|
||||
- not supported by compiler or platform
|
||||
"""
|
||||
self.simd_test = "BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F AVX512_SKX VSX VSX2 VSX3 NEON ASIMD"
|
||||
|
||||
def finalize_options(self):
|
||||
build_scripts = self.build_scripts
|
||||
old_build.finalize_options(self)
|
||||
plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
|
||||
if build_scripts is None:
|
||||
self.build_scripts = os.path.join(self.build_base,
|
||||
'scripts' + plat_specifier)
|
||||
|
||||
def run(self):
|
||||
old_build.run(self)
|
@@ -1,431 +0,0 @@
|
||||
""" Modified version of build_clib that handles fortran source files.
|
||||
"""
|
||||
import os
|
||||
from glob import glob
|
||||
import shutil
|
||||
from distutils.command.build_clib import build_clib as old_build_clib
|
||||
from distutils.errors import DistutilsSetupError, DistutilsError, \
|
||||
DistutilsFileError
|
||||
|
||||
from numpy.distutils import log
|
||||
from distutils.dep_util import newer_group
|
||||
from numpy.distutils.misc_util import (
|
||||
filter_sources, get_lib_source_files, get_numpy_include_dirs,
|
||||
has_cxx_sources, has_f_sources, is_sequence
|
||||
)
|
||||
from numpy.distutils.ccompiler_opt import new_ccompiler_opt
|
||||
|
||||
# Fix Python distutils bug sf #1718574:
|
||||
_l = old_build_clib.user_options
|
||||
for _i in range(len(_l)):
|
||||
if _l[_i][0] in ['build-clib', 'build-temp']:
|
||||
_l[_i] = (_l[_i][0] + '=',) + _l[_i][1:]
|
||||
#
|
||||
|
||||
|
||||
class build_clib(old_build_clib):
|
||||
|
||||
description = "build C/C++/F libraries used by Python extensions"
|
||||
|
||||
user_options = old_build_clib.user_options + [
|
||||
('fcompiler=', None,
|
||||
"specify the Fortran compiler type"),
|
||||
('inplace', 'i', 'Build in-place'),
|
||||
('parallel=', 'j',
|
||||
"number of parallel jobs"),
|
||||
('warn-error', None,
|
||||
"turn all warnings into errors (-Werror)"),
|
||||
('cpu-baseline=', None,
|
||||
"specify a list of enabled baseline CPU optimizations"),
|
||||
('cpu-dispatch=', None,
|
||||
"specify a list of dispatched CPU optimizations"),
|
||||
('disable-optimization', None,
|
||||
"disable CPU optimized code(dispatch,simd,fast...)"),
|
||||
]
|
||||
|
||||
boolean_options = old_build_clib.boolean_options + \
|
||||
['inplace', 'warn-error', 'disable-optimization']
|
||||
|
||||
def initialize_options(self):
|
||||
old_build_clib.initialize_options(self)
|
||||
self.fcompiler = None
|
||||
self.inplace = 0
|
||||
self.parallel = None
|
||||
self.warn_error = None
|
||||
self.cpu_baseline = None
|
||||
self.cpu_dispatch = None
|
||||
self.disable_optimization = None
|
||||
|
||||
|
||||
def finalize_options(self):
|
||||
if self.parallel:
|
||||
try:
|
||||
self.parallel = int(self.parallel)
|
||||
except ValueError as e:
|
||||
raise ValueError("--parallel/-j argument must be an integer") from e
|
||||
old_build_clib.finalize_options(self)
|
||||
self.set_undefined_options('build',
|
||||
('parallel', 'parallel'),
|
||||
('warn_error', 'warn_error'),
|
||||
('cpu_baseline', 'cpu_baseline'),
|
||||
('cpu_dispatch', 'cpu_dispatch'),
|
||||
('disable_optimization', 'disable_optimization')
|
||||
)
|
||||
|
||||
def have_f_sources(self):
|
||||
for (lib_name, build_info) in self.libraries:
|
||||
if has_f_sources(build_info.get('sources', [])):
|
||||
return True
|
||||
return False
|
||||
|
||||
def have_cxx_sources(self):
|
||||
for (lib_name, build_info) in self.libraries:
|
||||
if has_cxx_sources(build_info.get('sources', [])):
|
||||
return True
|
||||
return False
|
||||
|
||||
def run(self):
|
||||
if not self.libraries:
|
||||
return
|
||||
|
||||
# Make sure that library sources are complete.
|
||||
languages = []
|
||||
|
||||
# Make sure that extension sources are complete.
|
||||
self.run_command('build_src')
|
||||
|
||||
for (lib_name, build_info) in self.libraries:
|
||||
l = build_info.get('language', None)
|
||||
if l and l not in languages:
|
||||
languages.append(l)
|
||||
|
||||
from distutils.ccompiler import new_compiler
|
||||
self.compiler = new_compiler(compiler=self.compiler,
|
||||
dry_run=self.dry_run,
|
||||
force=self.force)
|
||||
self.compiler.customize(self.distribution,
|
||||
need_cxx=self.have_cxx_sources())
|
||||
|
||||
if self.warn_error:
|
||||
self.compiler.compiler.append('-Werror')
|
||||
self.compiler.compiler_so.append('-Werror')
|
||||
|
||||
libraries = self.libraries
|
||||
self.libraries = None
|
||||
self.compiler.customize_cmd(self)
|
||||
self.libraries = libraries
|
||||
|
||||
self.compiler.show_customization()
|
||||
|
||||
if not self.disable_optimization:
|
||||
dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h")
|
||||
dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath)
|
||||
opt_cache_path = os.path.abspath(
|
||||
os.path.join(self.build_temp, 'ccompiler_opt_cache_clib.py')
|
||||
)
|
||||
if hasattr(self, "compiler_opt"):
|
||||
# By default `CCompilerOpt` update the cache at the exit of
|
||||
# the process, which may lead to duplicate building
|
||||
# (see build_extension()/force_rebuild) if run() called
|
||||
# multiple times within the same os process/thread without
|
||||
# giving the chance the previous instances of `CCompilerOpt`
|
||||
# to update the cache.
|
||||
self.compiler_opt.cache_flush()
|
||||
|
||||
self.compiler_opt = new_ccompiler_opt(
|
||||
compiler=self.compiler, dispatch_hpath=dispatch_hpath,
|
||||
cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch,
|
||||
cache_path=opt_cache_path
|
||||
)
|
||||
def report(copt):
|
||||
log.info("\n########### CLIB COMPILER OPTIMIZATION ###########")
|
||||
log.info(copt.report(full=True))
|
||||
|
||||
import atexit
|
||||
atexit.register(report, self.compiler_opt)
|
||||
|
||||
if self.have_f_sources():
|
||||
from numpy.distutils.fcompiler import new_fcompiler
|
||||
self._f_compiler = new_fcompiler(compiler=self.fcompiler,
|
||||
verbose=self.verbose,
|
||||
dry_run=self.dry_run,
|
||||
force=self.force,
|
||||
requiref90='f90' in languages,
|
||||
c_compiler=self.compiler)
|
||||
if self._f_compiler is not None:
|
||||
self._f_compiler.customize(self.distribution)
|
||||
|
||||
libraries = self.libraries
|
||||
self.libraries = None
|
||||
self._f_compiler.customize_cmd(self)
|
||||
self.libraries = libraries
|
||||
|
||||
self._f_compiler.show_customization()
|
||||
else:
|
||||
self._f_compiler = None
|
||||
|
||||
self.build_libraries(self.libraries)
|
||||
|
||||
if self.inplace:
|
||||
for l in self.distribution.installed_libraries:
|
||||
libname = self.compiler.library_filename(l.name)
|
||||
source = os.path.join(self.build_clib, libname)
|
||||
target = os.path.join(l.target_dir, libname)
|
||||
self.mkpath(l.target_dir)
|
||||
shutil.copy(source, target)
|
||||
|
||||
def get_source_files(self):
|
||||
self.check_library_list(self.libraries)
|
||||
filenames = []
|
||||
for lib in self.libraries:
|
||||
filenames.extend(get_lib_source_files(lib))
|
||||
return filenames
|
||||
|
||||
def build_libraries(self, libraries):
|
||||
for (lib_name, build_info) in libraries:
|
||||
self.build_a_library(build_info, lib_name, libraries)
|
||||
|
||||
def build_a_library(self, build_info, lib_name, libraries):
|
||||
# default compilers
|
||||
compiler = self.compiler
|
||||
fcompiler = self._f_compiler
|
||||
|
||||
sources = build_info.get('sources')
|
||||
if sources is None or not is_sequence(sources):
|
||||
raise DistutilsSetupError(("in 'libraries' option (library '%s'), " +
|
||||
"'sources' must be present and must be " +
|
||||
"a list of source filenames") % lib_name)
|
||||
sources = list(sources)
|
||||
|
||||
c_sources, cxx_sources, f_sources, fmodule_sources \
|
||||
= filter_sources(sources)
|
||||
requiref90 = not not fmodule_sources or \
|
||||
build_info.get('language', 'c') == 'f90'
|
||||
|
||||
# save source type information so that build_ext can use it.
|
||||
source_languages = []
|
||||
if c_sources:
|
||||
source_languages.append('c')
|
||||
if cxx_sources:
|
||||
source_languages.append('c++')
|
||||
if requiref90:
|
||||
source_languages.append('f90')
|
||||
elif f_sources:
|
||||
source_languages.append('f77')
|
||||
build_info['source_languages'] = source_languages
|
||||
|
||||
lib_file = compiler.library_filename(lib_name,
|
||||
output_dir=self.build_clib)
|
||||
depends = sources + build_info.get('depends', [])
|
||||
|
||||
force_rebuild = self.force
|
||||
if not self.disable_optimization and not self.compiler_opt.is_cached():
|
||||
log.debug("Detected changes on compiler optimizations")
|
||||
force_rebuild = True
|
||||
if not (force_rebuild or newer_group(depends, lib_file, 'newer')):
|
||||
log.debug("skipping '%s' library (up-to-date)", lib_name)
|
||||
return
|
||||
else:
|
||||
log.info("building '%s' library", lib_name)
|
||||
|
||||
config_fc = build_info.get('config_fc', {})
|
||||
if fcompiler is not None and config_fc:
|
||||
log.info('using additional config_fc from setup script '
|
||||
'for fortran compiler: %s'
|
||||
% (config_fc,))
|
||||
from numpy.distutils.fcompiler import new_fcompiler
|
||||
fcompiler = new_fcompiler(compiler=fcompiler.compiler_type,
|
||||
verbose=self.verbose,
|
||||
dry_run=self.dry_run,
|
||||
force=self.force,
|
||||
requiref90=requiref90,
|
||||
c_compiler=self.compiler)
|
||||
if fcompiler is not None:
|
||||
dist = self.distribution
|
||||
base_config_fc = dist.get_option_dict('config_fc').copy()
|
||||
base_config_fc.update(config_fc)
|
||||
fcompiler.customize(base_config_fc)
|
||||
|
||||
# check availability of Fortran compilers
|
||||
if (f_sources or fmodule_sources) and fcompiler is None:
|
||||
raise DistutilsError("library %s has Fortran sources"
|
||||
" but no Fortran compiler found" % (lib_name))
|
||||
|
||||
if fcompiler is not None:
|
||||
fcompiler.extra_f77_compile_args = build_info.get(
|
||||
'extra_f77_compile_args') or []
|
||||
fcompiler.extra_f90_compile_args = build_info.get(
|
||||
'extra_f90_compile_args') or []
|
||||
|
||||
macros = build_info.get('macros')
|
||||
if macros is None:
|
||||
macros = []
|
||||
include_dirs = build_info.get('include_dirs')
|
||||
if include_dirs is None:
|
||||
include_dirs = []
|
||||
extra_postargs = build_info.get('extra_compiler_args') or []
|
||||
|
||||
include_dirs.extend(get_numpy_include_dirs())
|
||||
# where compiled F90 module files are:
|
||||
module_dirs = build_info.get('module_dirs') or []
|
||||
module_build_dir = os.path.dirname(lib_file)
|
||||
if requiref90:
|
||||
self.mkpath(module_build_dir)
|
||||
|
||||
if compiler.compiler_type == 'msvc':
|
||||
# this hack works around the msvc compiler attributes
|
||||
# problem, msvc uses its own convention :(
|
||||
c_sources += cxx_sources
|
||||
cxx_sources = []
|
||||
|
||||
# filtering C dispatch-table sources when optimization is not disabled,
|
||||
# otherwise treated as normal sources.
|
||||
copt_c_sources = []
|
||||
copt_cxx_sources = []
|
||||
copt_baseline_flags = []
|
||||
copt_macros = []
|
||||
if not self.disable_optimization:
|
||||
bsrc_dir = self.get_finalized_command("build_src").build_src
|
||||
dispatch_hpath = os.path.join("numpy", "distutils", "include")
|
||||
dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath)
|
||||
include_dirs.append(dispatch_hpath)
|
||||
|
||||
copt_build_src = None if self.inplace else bsrc_dir
|
||||
for _srcs, _dst, _ext in (
|
||||
((c_sources,), copt_c_sources, ('.dispatch.c',)),
|
||||
((c_sources, cxx_sources), copt_cxx_sources,
|
||||
('.dispatch.cpp', '.dispatch.cxx'))
|
||||
):
|
||||
for _src in _srcs:
|
||||
_dst += [
|
||||
_src.pop(_src.index(s))
|
||||
for s in _src[:] if s.endswith(_ext)
|
||||
]
|
||||
copt_baseline_flags = self.compiler_opt.cpu_baseline_flags()
|
||||
else:
|
||||
copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1))
|
||||
|
||||
objects = []
|
||||
if copt_cxx_sources:
|
||||
log.info("compiling C++ dispatch-able sources")
|
||||
objects += self.compiler_opt.try_dispatch(
|
||||
copt_c_sources,
|
||||
output_dir=self.build_temp,
|
||||
src_dir=copt_build_src,
|
||||
macros=macros + copt_macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_postargs,
|
||||
ccompiler=cxx_compiler
|
||||
)
|
||||
|
||||
if copt_c_sources:
|
||||
log.info("compiling C dispatch-able sources")
|
||||
objects += self.compiler_opt.try_dispatch(copt_c_sources,
|
||||
output_dir=self.build_temp,
|
||||
src_dir=copt_build_src,
|
||||
macros=macros + copt_macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_postargs)
|
||||
|
||||
if c_sources:
|
||||
log.info("compiling C sources")
|
||||
objects += compiler.compile(c_sources,
|
||||
output_dir=self.build_temp,
|
||||
macros=macros + copt_macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_postargs + copt_baseline_flags)
|
||||
|
||||
if cxx_sources:
|
||||
log.info("compiling C++ sources")
|
||||
cxx_compiler = compiler.cxx_compiler()
|
||||
cxx_objects = cxx_compiler.compile(cxx_sources,
|
||||
output_dir=self.build_temp,
|
||||
macros=macros + copt_macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_postargs + copt_baseline_flags)
|
||||
objects.extend(cxx_objects)
|
||||
|
||||
if f_sources or fmodule_sources:
|
||||
extra_postargs = []
|
||||
f_objects = []
|
||||
|
||||
if requiref90:
|
||||
if fcompiler.module_dir_switch is None:
|
||||
existing_modules = glob('*.mod')
|
||||
extra_postargs += fcompiler.module_options(
|
||||
module_dirs, module_build_dir)
|
||||
|
||||
if fmodule_sources:
|
||||
log.info("compiling Fortran 90 module sources")
|
||||
f_objects += fcompiler.compile(fmodule_sources,
|
||||
output_dir=self.build_temp,
|
||||
macros=macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_postargs)
|
||||
|
||||
if requiref90 and self._f_compiler.module_dir_switch is None:
|
||||
# move new compiled F90 module files to module_build_dir
|
||||
for f in glob('*.mod'):
|
||||
if f in existing_modules:
|
||||
continue
|
||||
t = os.path.join(module_build_dir, f)
|
||||
if os.path.abspath(f) == os.path.abspath(t):
|
||||
continue
|
||||
if os.path.isfile(t):
|
||||
os.remove(t)
|
||||
try:
|
||||
self.move_file(f, module_build_dir)
|
||||
except DistutilsFileError:
|
||||
log.warn('failed to move %r to %r'
|
||||
% (f, module_build_dir))
|
||||
|
||||
if f_sources:
|
||||
log.info("compiling Fortran sources")
|
||||
f_objects += fcompiler.compile(f_sources,
|
||||
output_dir=self.build_temp,
|
||||
macros=macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_postargs)
|
||||
else:
|
||||
f_objects = []
|
||||
|
||||
if f_objects and not fcompiler.can_ccompiler_link(compiler):
|
||||
# Default linker cannot link Fortran object files, and results
|
||||
# need to be wrapped later. Instead of creating a real static
|
||||
# library, just keep track of the object files.
|
||||
listfn = os.path.join(self.build_clib,
|
||||
lib_name + '.fobjects')
|
||||
with open(listfn, 'w') as f:
|
||||
f.write("\n".join(os.path.abspath(obj) for obj in f_objects))
|
||||
|
||||
listfn = os.path.join(self.build_clib,
|
||||
lib_name + '.cobjects')
|
||||
with open(listfn, 'w') as f:
|
||||
f.write("\n".join(os.path.abspath(obj) for obj in objects))
|
||||
|
||||
# create empty "library" file for dependency tracking
|
||||
lib_fname = os.path.join(self.build_clib,
|
||||
lib_name + compiler.static_lib_extension)
|
||||
with open(lib_fname, 'wb') as f:
|
||||
pass
|
||||
else:
|
||||
# assume that default linker is suitable for
|
||||
# linking Fortran object files
|
||||
objects.extend(f_objects)
|
||||
compiler.create_static_lib(objects, lib_name,
|
||||
output_dir=self.build_clib,
|
||||
debug=self.debug)
|
||||
|
||||
# fix library dependencies
|
||||
clib_libraries = build_info.get('libraries', [])
|
||||
for lname, binfo in libraries:
|
||||
if lname in clib_libraries:
|
||||
clib_libraries.extend(binfo.get('libraries', []))
|
||||
if clib_libraries:
|
||||
build_info['libraries'] = clib_libraries
|
@@ -1,708 +0,0 @@
|
||||
""" Modified version of build_ext that handles fortran source files.
|
||||
|
||||
"""
|
||||
import os
|
||||
import subprocess
|
||||
from glob import glob
|
||||
|
||||
from distutils.dep_util import newer_group
|
||||
from distutils.command.build_ext import build_ext as old_build_ext
|
||||
from distutils.errors import DistutilsFileError, DistutilsSetupError,\
|
||||
DistutilsError
|
||||
from distutils.file_util import copy_file
|
||||
|
||||
from numpy.distutils import log
|
||||
from numpy.distutils.exec_command import filepath_from_subprocess_output
|
||||
from numpy.distutils.system_info import combine_paths
|
||||
from numpy.distutils.misc_util import (
|
||||
filter_sources, get_ext_source_files, get_numpy_include_dirs,
|
||||
has_cxx_sources, has_f_sources, is_sequence
|
||||
)
|
||||
from numpy.distutils.command.config_compiler import show_fortran_compilers
|
||||
from numpy.distutils.ccompiler_opt import new_ccompiler_opt, CCompilerOpt
|
||||
|
||||
class build_ext (old_build_ext):
|
||||
|
||||
description = "build C/C++/F extensions (compile/link to build directory)"
|
||||
|
||||
user_options = old_build_ext.user_options + [
|
||||
('fcompiler=', None,
|
||||
"specify the Fortran compiler type"),
|
||||
('parallel=', 'j',
|
||||
"number of parallel jobs"),
|
||||
('warn-error', None,
|
||||
"turn all warnings into errors (-Werror)"),
|
||||
('cpu-baseline=', None,
|
||||
"specify a list of enabled baseline CPU optimizations"),
|
||||
('cpu-dispatch=', None,
|
||||
"specify a list of dispatched CPU optimizations"),
|
||||
('disable-optimization', None,
|
||||
"disable CPU optimized code(dispatch,simd,fast...)"),
|
||||
('simd-test=', None,
|
||||
"specify a list of CPU optimizations to be tested against NumPy SIMD interface"),
|
||||
]
|
||||
|
||||
help_options = old_build_ext.help_options + [
|
||||
('help-fcompiler', None, "list available Fortran compilers",
|
||||
show_fortran_compilers),
|
||||
]
|
||||
|
||||
boolean_options = old_build_ext.boolean_options + ['warn-error', 'disable-optimization']
|
||||
|
||||
def initialize_options(self):
|
||||
old_build_ext.initialize_options(self)
|
||||
self.fcompiler = None
|
||||
self.parallel = None
|
||||
self.warn_error = None
|
||||
self.cpu_baseline = None
|
||||
self.cpu_dispatch = None
|
||||
self.disable_optimization = None
|
||||
self.simd_test = None
|
||||
|
||||
def finalize_options(self):
|
||||
if self.parallel:
|
||||
try:
|
||||
self.parallel = int(self.parallel)
|
||||
except ValueError as e:
|
||||
raise ValueError("--parallel/-j argument must be an integer") from e
|
||||
|
||||
# Ensure that self.include_dirs and self.distribution.include_dirs
|
||||
# refer to the same list object. finalize_options will modify
|
||||
# self.include_dirs, but self.distribution.include_dirs is used
|
||||
# during the actual build.
|
||||
# self.include_dirs is None unless paths are specified with
|
||||
# --include-dirs.
|
||||
# The include paths will be passed to the compiler in the order:
|
||||
# numpy paths, --include-dirs paths, Python include path.
|
||||
if isinstance(self.include_dirs, str):
|
||||
self.include_dirs = self.include_dirs.split(os.pathsep)
|
||||
incl_dirs = self.include_dirs or []
|
||||
if self.distribution.include_dirs is None:
|
||||
self.distribution.include_dirs = []
|
||||
self.include_dirs = self.distribution.include_dirs
|
||||
self.include_dirs.extend(incl_dirs)
|
||||
|
||||
old_build_ext.finalize_options(self)
|
||||
self.set_undefined_options('build',
|
||||
('parallel', 'parallel'),
|
||||
('warn_error', 'warn_error'),
|
||||
('cpu_baseline', 'cpu_baseline'),
|
||||
('cpu_dispatch', 'cpu_dispatch'),
|
||||
('disable_optimization', 'disable_optimization'),
|
||||
('simd_test', 'simd_test')
|
||||
)
|
||||
CCompilerOpt.conf_target_groups["simd_test"] = self.simd_test
|
||||
|
||||
def run(self):
|
||||
if not self.extensions:
|
||||
return
|
||||
|
||||
# Make sure that extension sources are complete.
|
||||
self.run_command('build_src')
|
||||
|
||||
if self.distribution.has_c_libraries():
|
||||
if self.inplace:
|
||||
if self.distribution.have_run.get('build_clib'):
|
||||
log.warn('build_clib already run, it is too late to '
|
||||
'ensure in-place build of build_clib')
|
||||
build_clib = self.distribution.get_command_obj(
|
||||
'build_clib')
|
||||
else:
|
||||
build_clib = self.distribution.get_command_obj(
|
||||
'build_clib')
|
||||
build_clib.inplace = 1
|
||||
build_clib.ensure_finalized()
|
||||
build_clib.run()
|
||||
self.distribution.have_run['build_clib'] = 1
|
||||
|
||||
else:
|
||||
self.run_command('build_clib')
|
||||
build_clib = self.get_finalized_command('build_clib')
|
||||
self.library_dirs.append(build_clib.build_clib)
|
||||
else:
|
||||
build_clib = None
|
||||
|
||||
# Not including C libraries to the list of
|
||||
# extension libraries automatically to prevent
|
||||
# bogus linking commands. Extensions must
|
||||
# explicitly specify the C libraries that they use.
|
||||
|
||||
from distutils.ccompiler import new_compiler
|
||||
from numpy.distutils.fcompiler import new_fcompiler
|
||||
|
||||
compiler_type = self.compiler
|
||||
# Initialize C compiler:
|
||||
self.compiler = new_compiler(compiler=compiler_type,
|
||||
verbose=self.verbose,
|
||||
dry_run=self.dry_run,
|
||||
force=self.force)
|
||||
self.compiler.customize(self.distribution)
|
||||
self.compiler.customize_cmd(self)
|
||||
|
||||
if self.warn_error:
|
||||
self.compiler.compiler.append('-Werror')
|
||||
self.compiler.compiler_so.append('-Werror')
|
||||
|
||||
self.compiler.show_customization()
|
||||
|
||||
if not self.disable_optimization:
|
||||
dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h")
|
||||
dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath)
|
||||
opt_cache_path = os.path.abspath(
|
||||
os.path.join(self.build_temp, 'ccompiler_opt_cache_ext.py')
|
||||
)
|
||||
if hasattr(self, "compiler_opt"):
|
||||
# By default `CCompilerOpt` update the cache at the exit of
|
||||
# the process, which may lead to duplicate building
|
||||
# (see build_extension()/force_rebuild) if run() called
|
||||
# multiple times within the same os process/thread without
|
||||
# giving the chance the previous instances of `CCompilerOpt`
|
||||
# to update the cache.
|
||||
self.compiler_opt.cache_flush()
|
||||
|
||||
self.compiler_opt = new_ccompiler_opt(
|
||||
compiler=self.compiler, dispatch_hpath=dispatch_hpath,
|
||||
cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch,
|
||||
cache_path=opt_cache_path
|
||||
)
|
||||
def report(copt):
|
||||
log.info("\n########### EXT COMPILER OPTIMIZATION ###########")
|
||||
log.info(copt.report(full=True))
|
||||
|
||||
import atexit
|
||||
atexit.register(report, self.compiler_opt)
|
||||
|
||||
# Setup directory for storing generated extra DLL files on Windows
|
||||
self.extra_dll_dir = os.path.join(self.build_temp, '.libs')
|
||||
if not os.path.isdir(self.extra_dll_dir):
|
||||
os.makedirs(self.extra_dll_dir)
|
||||
|
||||
# Create mapping of libraries built by build_clib:
|
||||
clibs = {}
|
||||
if build_clib is not None:
|
||||
for libname, build_info in build_clib.libraries or []:
|
||||
if libname in clibs and clibs[libname] != build_info:
|
||||
log.warn('library %r defined more than once,'
|
||||
' overwriting build_info\n%s... \nwith\n%s...'
|
||||
% (libname, repr(clibs[libname])[:300], repr(build_info)[:300]))
|
||||
clibs[libname] = build_info
|
||||
# .. and distribution libraries:
|
||||
for libname, build_info in self.distribution.libraries or []:
|
||||
if libname in clibs:
|
||||
# build_clib libraries have a precedence before distribution ones
|
||||
continue
|
||||
clibs[libname] = build_info
|
||||
|
||||
# Determine if C++/Fortran 77/Fortran 90 compilers are needed.
|
||||
# Update extension libraries, library_dirs, and macros.
|
||||
all_languages = set()
|
||||
for ext in self.extensions:
|
||||
ext_languages = set()
|
||||
c_libs = []
|
||||
c_lib_dirs = []
|
||||
macros = []
|
||||
for libname in ext.libraries:
|
||||
if libname in clibs:
|
||||
binfo = clibs[libname]
|
||||
c_libs += binfo.get('libraries', [])
|
||||
c_lib_dirs += binfo.get('library_dirs', [])
|
||||
for m in binfo.get('macros', []):
|
||||
if m not in macros:
|
||||
macros.append(m)
|
||||
|
||||
for l in clibs.get(libname, {}).get('source_languages', []):
|
||||
ext_languages.add(l)
|
||||
if c_libs:
|
||||
new_c_libs = ext.libraries + c_libs
|
||||
log.info('updating extension %r libraries from %r to %r'
|
||||
% (ext.name, ext.libraries, new_c_libs))
|
||||
ext.libraries = new_c_libs
|
||||
ext.library_dirs = ext.library_dirs + c_lib_dirs
|
||||
if macros:
|
||||
log.info('extending extension %r defined_macros with %r'
|
||||
% (ext.name, macros))
|
||||
ext.define_macros = ext.define_macros + macros
|
||||
|
||||
# determine extension languages
|
||||
if has_f_sources(ext.sources):
|
||||
ext_languages.add('f77')
|
||||
if has_cxx_sources(ext.sources):
|
||||
ext_languages.add('c++')
|
||||
l = ext.language or self.compiler.detect_language(ext.sources)
|
||||
if l:
|
||||
ext_languages.add(l)
|
||||
# reset language attribute for choosing proper linker
|
||||
if 'c++' in ext_languages:
|
||||
ext_language = 'c++'
|
||||
elif 'f90' in ext_languages:
|
||||
ext_language = 'f90'
|
||||
elif 'f77' in ext_languages:
|
||||
ext_language = 'f77'
|
||||
else:
|
||||
ext_language = 'c' # default
|
||||
if l and l != ext_language and ext.language:
|
||||
log.warn('resetting extension %r language from %r to %r.' %
|
||||
(ext.name, l, ext_language))
|
||||
ext.language = ext_language
|
||||
# global language
|
||||
all_languages.update(ext_languages)
|
||||
|
||||
need_f90_compiler = 'f90' in all_languages
|
||||
need_f77_compiler = 'f77' in all_languages
|
||||
need_cxx_compiler = 'c++' in all_languages
|
||||
|
||||
# Initialize C++ compiler:
|
||||
if need_cxx_compiler:
|
||||
self._cxx_compiler = new_compiler(compiler=compiler_type,
|
||||
verbose=self.verbose,
|
||||
dry_run=self.dry_run,
|
||||
force=self.force)
|
||||
compiler = self._cxx_compiler
|
||||
compiler.customize(self.distribution, need_cxx=need_cxx_compiler)
|
||||
compiler.customize_cmd(self)
|
||||
compiler.show_customization()
|
||||
self._cxx_compiler = compiler.cxx_compiler()
|
||||
else:
|
||||
self._cxx_compiler = None
|
||||
|
||||
# Initialize Fortran 77 compiler:
|
||||
if need_f77_compiler:
|
||||
ctype = self.fcompiler
|
||||
self._f77_compiler = new_fcompiler(compiler=self.fcompiler,
|
||||
verbose=self.verbose,
|
||||
dry_run=self.dry_run,
|
||||
force=self.force,
|
||||
requiref90=False,
|
||||
c_compiler=self.compiler)
|
||||
fcompiler = self._f77_compiler
|
||||
if fcompiler:
|
||||
ctype = fcompiler.compiler_type
|
||||
fcompiler.customize(self.distribution)
|
||||
if fcompiler and fcompiler.get_version():
|
||||
fcompiler.customize_cmd(self)
|
||||
fcompiler.show_customization()
|
||||
else:
|
||||
self.warn('f77_compiler=%s is not available.' %
|
||||
(ctype))
|
||||
self._f77_compiler = None
|
||||
else:
|
||||
self._f77_compiler = None
|
||||
|
||||
# Initialize Fortran 90 compiler:
|
||||
if need_f90_compiler:
|
||||
ctype = self.fcompiler
|
||||
self._f90_compiler = new_fcompiler(compiler=self.fcompiler,
|
||||
verbose=self.verbose,
|
||||
dry_run=self.dry_run,
|
||||
force=self.force,
|
||||
requiref90=True,
|
||||
c_compiler=self.compiler)
|
||||
fcompiler = self._f90_compiler
|
||||
if fcompiler:
|
||||
ctype = fcompiler.compiler_type
|
||||
fcompiler.customize(self.distribution)
|
||||
if fcompiler and fcompiler.get_version():
|
||||
fcompiler.customize_cmd(self)
|
||||
fcompiler.show_customization()
|
||||
else:
|
||||
self.warn('f90_compiler=%s is not available.' %
|
||||
(ctype))
|
||||
self._f90_compiler = None
|
||||
else:
|
||||
self._f90_compiler = None
|
||||
|
||||
# Build extensions
|
||||
self.build_extensions()
|
||||
|
||||
# Copy over any extra DLL files
|
||||
# FIXME: In the case where there are more than two packages,
|
||||
# we blindly assume that both packages need all of the libraries,
|
||||
# resulting in a larger wheel than is required. This should be fixed,
|
||||
# but it's so rare that I won't bother to handle it.
|
||||
pkg_roots = {
|
||||
self.get_ext_fullname(ext.name).split('.')[0]
|
||||
for ext in self.extensions
|
||||
}
|
||||
for pkg_root in pkg_roots:
|
||||
shared_lib_dir = os.path.join(pkg_root, '.libs')
|
||||
if not self.inplace:
|
||||
shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir)
|
||||
for fn in os.listdir(self.extra_dll_dir):
|
||||
if not os.path.isdir(shared_lib_dir):
|
||||
os.makedirs(shared_lib_dir)
|
||||
if not fn.lower().endswith('.dll'):
|
||||
continue
|
||||
runtime_lib = os.path.join(self.extra_dll_dir, fn)
|
||||
copy_file(runtime_lib, shared_lib_dir)
|
||||
|
||||
def swig_sources(self, sources, extensions=None):
|
||||
# Do nothing. Swig sources have been handled in build_src command.
|
||||
return sources
|
||||
|
||||
def build_extension(self, ext):
|
||||
sources = ext.sources
|
||||
if sources is None or not is_sequence(sources):
|
||||
raise DistutilsSetupError(
|
||||
("in 'ext_modules' option (extension '%s'), " +
|
||||
"'sources' must be present and must be " +
|
||||
"a list of source filenames") % ext.name)
|
||||
sources = list(sources)
|
||||
|
||||
if not sources:
|
||||
return
|
||||
|
||||
fullname = self.get_ext_fullname(ext.name)
|
||||
if self.inplace:
|
||||
modpath = fullname.split('.')
|
||||
package = '.'.join(modpath[0:-1])
|
||||
base = modpath[-1]
|
||||
build_py = self.get_finalized_command('build_py')
|
||||
package_dir = build_py.get_package_dir(package)
|
||||
ext_filename = os.path.join(package_dir,
|
||||
self.get_ext_filename(base))
|
||||
else:
|
||||
ext_filename = os.path.join(self.build_lib,
|
||||
self.get_ext_filename(fullname))
|
||||
depends = sources + ext.depends
|
||||
|
||||
force_rebuild = self.force
|
||||
if not self.disable_optimization and not self.compiler_opt.is_cached():
|
||||
log.debug("Detected changes on compiler optimizations")
|
||||
force_rebuild = True
|
||||
if not (force_rebuild or newer_group(depends, ext_filename, 'newer')):
|
||||
log.debug("skipping '%s' extension (up-to-date)", ext.name)
|
||||
return
|
||||
else:
|
||||
log.info("building '%s' extension", ext.name)
|
||||
|
||||
extra_args = ext.extra_compile_args or []
|
||||
macros = ext.define_macros[:]
|
||||
for undef in ext.undef_macros:
|
||||
macros.append((undef,))
|
||||
|
||||
c_sources, cxx_sources, f_sources, fmodule_sources = \
|
||||
filter_sources(ext.sources)
|
||||
|
||||
if self.compiler.compiler_type == 'msvc':
|
||||
if cxx_sources:
|
||||
# Needed to compile kiva.agg._agg extension.
|
||||
extra_args.append('/Zm1000')
|
||||
# this hack works around the msvc compiler attributes
|
||||
# problem, msvc uses its own convention :(
|
||||
c_sources += cxx_sources
|
||||
cxx_sources = []
|
||||
|
||||
# Set Fortran/C++ compilers for compilation and linking.
|
||||
if ext.language == 'f90':
|
||||
fcompiler = self._f90_compiler
|
||||
elif ext.language == 'f77':
|
||||
fcompiler = self._f77_compiler
|
||||
else: # in case ext.language is c++, for instance
|
||||
fcompiler = self._f90_compiler or self._f77_compiler
|
||||
if fcompiler is not None:
|
||||
fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(
|
||||
ext, 'extra_f77_compile_args') else []
|
||||
fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(
|
||||
ext, 'extra_f90_compile_args') else []
|
||||
cxx_compiler = self._cxx_compiler
|
||||
|
||||
# check for the availability of required compilers
|
||||
if cxx_sources and cxx_compiler is None:
|
||||
raise DistutilsError("extension %r has C++ sources"
|
||||
"but no C++ compiler found" % (ext.name))
|
||||
if (f_sources or fmodule_sources) and fcompiler is None:
|
||||
raise DistutilsError("extension %r has Fortran sources "
|
||||
"but no Fortran compiler found" % (ext.name))
|
||||
if ext.language in ['f77', 'f90'] and fcompiler is None:
|
||||
self.warn("extension %r has Fortran libraries "
|
||||
"but no Fortran linker found, using default linker" % (ext.name))
|
||||
if ext.language == 'c++' and cxx_compiler is None:
|
||||
self.warn("extension %r has C++ libraries "
|
||||
"but no C++ linker found, using default linker" % (ext.name))
|
||||
|
||||
kws = {'depends': ext.depends}
|
||||
output_dir = self.build_temp
|
||||
|
||||
include_dirs = ext.include_dirs + get_numpy_include_dirs()
|
||||
|
||||
# filtering C dispatch-table sources when optimization is not disabled,
|
||||
# otherwise treated as normal sources.
|
||||
copt_c_sources = []
|
||||
copt_cxx_sources = []
|
||||
copt_baseline_flags = []
|
||||
copt_macros = []
|
||||
if not self.disable_optimization:
|
||||
bsrc_dir = self.get_finalized_command("build_src").build_src
|
||||
dispatch_hpath = os.path.join("numpy", "distutils", "include")
|
||||
dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath)
|
||||
include_dirs.append(dispatch_hpath)
|
||||
|
||||
copt_build_src = None if self.inplace else bsrc_dir
|
||||
for _srcs, _dst, _ext in (
|
||||
((c_sources,), copt_c_sources, ('.dispatch.c',)),
|
||||
((c_sources, cxx_sources), copt_cxx_sources,
|
||||
('.dispatch.cpp', '.dispatch.cxx'))
|
||||
):
|
||||
for _src in _srcs:
|
||||
_dst += [
|
||||
_src.pop(_src.index(s))
|
||||
for s in _src[:] if s.endswith(_ext)
|
||||
]
|
||||
copt_baseline_flags = self.compiler_opt.cpu_baseline_flags()
|
||||
else:
|
||||
copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1))
|
||||
|
||||
c_objects = []
|
||||
if copt_cxx_sources:
|
||||
log.info("compiling C++ dispatch-able sources")
|
||||
c_objects += self.compiler_opt.try_dispatch(
|
||||
copt_cxx_sources,
|
||||
output_dir=output_dir,
|
||||
src_dir=copt_build_src,
|
||||
macros=macros + copt_macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_args,
|
||||
ccompiler=cxx_compiler,
|
||||
**kws
|
||||
)
|
||||
if copt_c_sources:
|
||||
log.info("compiling C dispatch-able sources")
|
||||
c_objects += self.compiler_opt.try_dispatch(copt_c_sources,
|
||||
output_dir=output_dir,
|
||||
src_dir=copt_build_src,
|
||||
macros=macros + copt_macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_args,
|
||||
**kws)
|
||||
if c_sources:
|
||||
log.info("compiling C sources")
|
||||
c_objects += self.compiler.compile(c_sources,
|
||||
output_dir=output_dir,
|
||||
macros=macros + copt_macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_args + copt_baseline_flags,
|
||||
**kws)
|
||||
if cxx_sources:
|
||||
log.info("compiling C++ sources")
|
||||
c_objects += cxx_compiler.compile(cxx_sources,
|
||||
output_dir=output_dir,
|
||||
macros=macros + copt_macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_args + copt_baseline_flags,
|
||||
**kws)
|
||||
|
||||
extra_postargs = []
|
||||
f_objects = []
|
||||
if fmodule_sources:
|
||||
log.info("compiling Fortran 90 module sources")
|
||||
module_dirs = ext.module_dirs[:]
|
||||
module_build_dir = os.path.join(
|
||||
self.build_temp, os.path.dirname(
|
||||
self.get_ext_filename(fullname)))
|
||||
|
||||
self.mkpath(module_build_dir)
|
||||
if fcompiler.module_dir_switch is None:
|
||||
existing_modules = glob('*.mod')
|
||||
extra_postargs += fcompiler.module_options(
|
||||
module_dirs, module_build_dir)
|
||||
f_objects += fcompiler.compile(fmodule_sources,
|
||||
output_dir=self.build_temp,
|
||||
macros=macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_postargs,
|
||||
depends=ext.depends)
|
||||
|
||||
if fcompiler.module_dir_switch is None:
|
||||
for f in glob('*.mod'):
|
||||
if f in existing_modules:
|
||||
continue
|
||||
t = os.path.join(module_build_dir, f)
|
||||
if os.path.abspath(f) == os.path.abspath(t):
|
||||
continue
|
||||
if os.path.isfile(t):
|
||||
os.remove(t)
|
||||
try:
|
||||
self.move_file(f, module_build_dir)
|
||||
except DistutilsFileError:
|
||||
log.warn('failed to move %r to %r' %
|
||||
(f, module_build_dir))
|
||||
if f_sources:
|
||||
log.info("compiling Fortran sources")
|
||||
f_objects += fcompiler.compile(f_sources,
|
||||
output_dir=self.build_temp,
|
||||
macros=macros,
|
||||
include_dirs=include_dirs,
|
||||
debug=self.debug,
|
||||
extra_postargs=extra_postargs,
|
||||
depends=ext.depends)
|
||||
|
||||
if f_objects and not fcompiler.can_ccompiler_link(self.compiler):
|
||||
unlinkable_fobjects = f_objects
|
||||
objects = c_objects
|
||||
else:
|
||||
unlinkable_fobjects = []
|
||||
objects = c_objects + f_objects
|
||||
|
||||
if ext.extra_objects:
|
||||
objects.extend(ext.extra_objects)
|
||||
extra_args = ext.extra_link_args or []
|
||||
libraries = self.get_libraries(ext)[:]
|
||||
library_dirs = ext.library_dirs[:]
|
||||
|
||||
linker = self.compiler.link_shared_object
|
||||
# Always use system linker when using MSVC compiler.
|
||||
if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'):
|
||||
# expand libraries with fcompiler libraries as we are
|
||||
# not using fcompiler linker
|
||||
self._libs_with_msvc_and_fortran(
|
||||
fcompiler, libraries, library_dirs)
|
||||
|
||||
elif ext.language in ['f77', 'f90'] and fcompiler is not None:
|
||||
linker = fcompiler.link_shared_object
|
||||
if ext.language == 'c++' and cxx_compiler is not None:
|
||||
linker = cxx_compiler.link_shared_object
|
||||
|
||||
if fcompiler is not None:
|
||||
objects, libraries = self._process_unlinkable_fobjects(
|
||||
objects, libraries,
|
||||
fcompiler, library_dirs,
|
||||
unlinkable_fobjects)
|
||||
|
||||
linker(objects, ext_filename,
|
||||
libraries=libraries,
|
||||
library_dirs=library_dirs,
|
||||
runtime_library_dirs=ext.runtime_library_dirs,
|
||||
extra_postargs=extra_args,
|
||||
export_symbols=self.get_export_symbols(ext),
|
||||
debug=self.debug,
|
||||
build_temp=self.build_temp,
|
||||
target_lang=ext.language)
|
||||
|
||||
def _add_dummy_mingwex_sym(self, c_sources):
|
||||
build_src = self.get_finalized_command("build_src").build_src
|
||||
build_clib = self.get_finalized_command("build_clib").build_clib
|
||||
objects = self.compiler.compile([os.path.join(build_src,
|
||||
"gfortran_vs2003_hack.c")],
|
||||
output_dir=self.build_temp)
|
||||
self.compiler.create_static_lib(
|
||||
objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug)
|
||||
|
||||
def _process_unlinkable_fobjects(self, objects, libraries,
|
||||
fcompiler, library_dirs,
|
||||
unlinkable_fobjects):
|
||||
libraries = list(libraries)
|
||||
objects = list(objects)
|
||||
unlinkable_fobjects = list(unlinkable_fobjects)
|
||||
|
||||
# Expand possible fake static libraries to objects;
|
||||
# make sure to iterate over a copy of the list as
|
||||
# "fake" libraries will be removed as they are
|
||||
# enountered
|
||||
for lib in libraries[:]:
|
||||
for libdir in library_dirs:
|
||||
fake_lib = os.path.join(libdir, lib + '.fobjects')
|
||||
if os.path.isfile(fake_lib):
|
||||
# Replace fake static library
|
||||
libraries.remove(lib)
|
||||
with open(fake_lib, 'r') as f:
|
||||
unlinkable_fobjects.extend(f.read().splitlines())
|
||||
|
||||
# Expand C objects
|
||||
c_lib = os.path.join(libdir, lib + '.cobjects')
|
||||
with open(c_lib, 'r') as f:
|
||||
objects.extend(f.read().splitlines())
|
||||
|
||||
# Wrap unlinkable objects to a linkable one
|
||||
if unlinkable_fobjects:
|
||||
fobjects = [os.path.abspath(obj) for obj in unlinkable_fobjects]
|
||||
wrapped = fcompiler.wrap_unlinkable_objects(
|
||||
fobjects, output_dir=self.build_temp,
|
||||
extra_dll_dir=self.extra_dll_dir)
|
||||
objects.extend(wrapped)
|
||||
|
||||
return objects, libraries
|
||||
|
||||
def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries,
|
||||
c_library_dirs):
|
||||
if fcompiler is None:
|
||||
return
|
||||
|
||||
for libname in c_libraries:
|
||||
if libname.startswith('msvc'):
|
||||
continue
|
||||
fileexists = False
|
||||
for libdir in c_library_dirs or []:
|
||||
libfile = os.path.join(libdir, '%s.lib' % (libname))
|
||||
if os.path.isfile(libfile):
|
||||
fileexists = True
|
||||
break
|
||||
if fileexists:
|
||||
continue
|
||||
# make g77-compiled static libs available to MSVC
|
||||
fileexists = False
|
||||
for libdir in c_library_dirs:
|
||||
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
|
||||
if os.path.isfile(libfile):
|
||||
# copy libname.a file to name.lib so that MSVC linker
|
||||
# can find it
|
||||
libfile2 = os.path.join(self.build_temp, libname + '.lib')
|
||||
copy_file(libfile, libfile2)
|
||||
if self.build_temp not in c_library_dirs:
|
||||
c_library_dirs.append(self.build_temp)
|
||||
fileexists = True
|
||||
break
|
||||
if fileexists:
|
||||
continue
|
||||
log.warn('could not find library %r in directories %s'
|
||||
% (libname, c_library_dirs))
|
||||
|
||||
# Always use system linker when using MSVC compiler.
|
||||
f_lib_dirs = []
|
||||
for dir in fcompiler.library_dirs:
|
||||
# correct path when compiling in Cygwin but with normal Win
|
||||
# Python
|
||||
if dir.startswith('/usr/lib'):
|
||||
try:
|
||||
dir = subprocess.check_output(['cygpath', '-w', dir])
|
||||
except (OSError, subprocess.CalledProcessError):
|
||||
pass
|
||||
else:
|
||||
dir = filepath_from_subprocess_output(dir)
|
||||
f_lib_dirs.append(dir)
|
||||
c_library_dirs.extend(f_lib_dirs)
|
||||
|
||||
# make g77-compiled static libs available to MSVC
|
||||
for lib in fcompiler.libraries:
|
||||
if not lib.startswith('msvc'):
|
||||
c_libraries.append(lib)
|
||||
p = combine_paths(f_lib_dirs, 'lib' + lib + '.a')
|
||||
if p:
|
||||
dst_name = os.path.join(self.build_temp, lib + '.lib')
|
||||
if not os.path.isfile(dst_name):
|
||||
copy_file(p[0], dst_name)
|
||||
if self.build_temp not in c_library_dirs:
|
||||
c_library_dirs.append(self.build_temp)
|
||||
|
||||
def get_source_files(self):
|
||||
self.check_extensions_list(self.extensions)
|
||||
filenames = []
|
||||
for ext in self.extensions:
|
||||
filenames.extend(get_ext_source_files(ext))
|
||||
return filenames
|
||||
|
||||
def get_outputs(self):
|
||||
self.check_extensions_list(self.extensions)
|
||||
|
||||
outputs = []
|
||||
for ext in self.extensions:
|
||||
if not ext.sources:
|
||||
continue
|
||||
fullname = self.get_ext_fullname(ext.name)
|
||||
outputs.append(os.path.join(self.build_lib,
|
||||
self.get_ext_filename(fullname)))
|
||||
return outputs
|
@@ -1,31 +0,0 @@
|
||||
from distutils.command.build_py import build_py as old_build_py
|
||||
from numpy.distutils.misc_util import is_string
|
||||
|
||||
class build_py(old_build_py):
|
||||
|
||||
def run(self):
|
||||
build_src = self.get_finalized_command('build_src')
|
||||
if build_src.py_modules_dict and self.packages is None:
|
||||
self.packages = list(build_src.py_modules_dict.keys ())
|
||||
old_build_py.run(self)
|
||||
|
||||
def find_package_modules(self, package, package_dir):
|
||||
modules = old_build_py.find_package_modules(self, package, package_dir)
|
||||
|
||||
# Find build_src generated *.py files.
|
||||
build_src = self.get_finalized_command('build_src')
|
||||
modules += build_src.py_modules_dict.get(package, [])
|
||||
|
||||
return modules
|
||||
|
||||
def find_modules(self):
|
||||
old_py_modules = self.py_modules[:]
|
||||
new_py_modules = [_m for _m in self.py_modules if is_string(_m)]
|
||||
self.py_modules[:] = new_py_modules
|
||||
modules = old_build_py.find_modules(self)
|
||||
self.py_modules[:] = old_py_modules
|
||||
|
||||
return modules
|
||||
|
||||
# XXX: Fix find_source_files for item in py_modules such that item is 3-tuple
|
||||
# and item[2] is source file.
|
@@ -1,49 +0,0 @@
|
||||
""" Modified version of build_scripts that handles building scripts from functions.
|
||||
|
||||
"""
|
||||
from distutils.command.build_scripts import build_scripts as old_build_scripts
|
||||
from numpy.distutils import log
|
||||
from numpy.distutils.misc_util import is_string
|
||||
|
||||
class build_scripts(old_build_scripts):
|
||||
|
||||
def generate_scripts(self, scripts):
|
||||
new_scripts = []
|
||||
func_scripts = []
|
||||
for script in scripts:
|
||||
if is_string(script):
|
||||
new_scripts.append(script)
|
||||
else:
|
||||
func_scripts.append(script)
|
||||
if not func_scripts:
|
||||
return new_scripts
|
||||
|
||||
build_dir = self.build_dir
|
||||
self.mkpath(build_dir)
|
||||
for func in func_scripts:
|
||||
script = func(build_dir)
|
||||
if not script:
|
||||
continue
|
||||
if is_string(script):
|
||||
log.info(" adding '%s' to scripts" % (script,))
|
||||
new_scripts.append(script)
|
||||
else:
|
||||
[log.info(" adding '%s' to scripts" % (s,)) for s in script]
|
||||
new_scripts.extend(list(script))
|
||||
return new_scripts
|
||||
|
||||
def run (self):
|
||||
if not self.scripts:
|
||||
return
|
||||
|
||||
self.scripts = self.generate_scripts(self.scripts)
|
||||
# Now make sure that the distribution object has this list of scripts.
|
||||
# setuptools' develop command requires that this be a list of filenames,
|
||||
# not functions.
|
||||
self.distribution.scripts = self.scripts
|
||||
|
||||
return old_build_scripts.run(self)
|
||||
|
||||
def get_source_files(self):
|
||||
from numpy.distutils.misc_util import get_script_files
|
||||
return get_script_files(self.scripts)
|
@@ -1,773 +0,0 @@
|
||||
""" Build swig and f2py sources.
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import shlex
|
||||
import copy
|
||||
|
||||
from distutils.command import build_ext
|
||||
from distutils.dep_util import newer_group, newer
|
||||
from distutils.util import get_platform
|
||||
from distutils.errors import DistutilsError, DistutilsSetupError
|
||||
|
||||
|
||||
# this import can't be done here, as it uses numpy stuff only available
|
||||
# after it's installed
|
||||
#import numpy.f2py
|
||||
from numpy.distutils import log
|
||||
from numpy.distutils.misc_util import (
|
||||
fortran_ext_match, appendpath, is_string, is_sequence, get_cmd
|
||||
)
|
||||
from numpy.distutils.from_template import process_file as process_f_file
|
||||
from numpy.distutils.conv_template import process_file as process_c_file
|
||||
|
||||
def subst_vars(target, source, d):
|
||||
"""Substitute any occurrence of @foo@ by d['foo'] from source file into
|
||||
target."""
|
||||
var = re.compile('@([a-zA-Z_]+)@')
|
||||
with open(source, 'r') as fs:
|
||||
with open(target, 'w') as ft:
|
||||
for l in fs:
|
||||
m = var.search(l)
|
||||
if m:
|
||||
ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)]))
|
||||
else:
|
||||
ft.write(l)
|
||||
|
||||
class build_src(build_ext.build_ext):
|
||||
|
||||
description = "build sources from SWIG, F2PY files or a function"
|
||||
|
||||
user_options = [
|
||||
('build-src=', 'd', "directory to \"build\" sources to"),
|
||||
('f2py-opts=', None, "list of f2py command line options"),
|
||||
('swig=', None, "path to the SWIG executable"),
|
||||
('swig-opts=', None, "list of SWIG command line options"),
|
||||
('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"),
|
||||
('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete
|
||||
('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete
|
||||
('force', 'f', "forcibly build everything (ignore file timestamps)"),
|
||||
('inplace', 'i',
|
||||
"ignore build-lib and put compiled extensions into the source " +
|
||||
"directory alongside your pure Python modules"),
|
||||
('verbose-cfg', None,
|
||||
"change logging level from WARN to INFO which will show all " +
|
||||
"compiler output")
|
||||
]
|
||||
|
||||
boolean_options = ['force', 'inplace', 'verbose-cfg']
|
||||
|
||||
help_options = []
|
||||
|
||||
def initialize_options(self):
|
||||
self.extensions = None
|
||||
self.package = None
|
||||
self.py_modules = None
|
||||
self.py_modules_dict = None
|
||||
self.build_src = None
|
||||
self.build_lib = None
|
||||
self.build_base = None
|
||||
self.force = None
|
||||
self.inplace = None
|
||||
self.package_dir = None
|
||||
self.f2pyflags = None # obsolete
|
||||
self.f2py_opts = None
|
||||
self.swigflags = None # obsolete
|
||||
self.swig_opts = None
|
||||
self.swig_cpp = None
|
||||
self.swig = None
|
||||
self.verbose_cfg = None
|
||||
|
||||
def finalize_options(self):
|
||||
self.set_undefined_options('build',
|
||||
('build_base', 'build_base'),
|
||||
('build_lib', 'build_lib'),
|
||||
('force', 'force'))
|
||||
if self.package is None:
|
||||
self.package = self.distribution.ext_package
|
||||
self.extensions = self.distribution.ext_modules
|
||||
self.libraries = self.distribution.libraries or []
|
||||
self.py_modules = self.distribution.py_modules or []
|
||||
self.data_files = self.distribution.data_files or []
|
||||
|
||||
if self.build_src is None:
|
||||
plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
|
||||
self.build_src = os.path.join(self.build_base, 'src'+plat_specifier)
|
||||
|
||||
# py_modules_dict is used in build_py.find_package_modules
|
||||
self.py_modules_dict = {}
|
||||
|
||||
if self.f2pyflags:
|
||||
if self.f2py_opts:
|
||||
log.warn('ignoring --f2pyflags as --f2py-opts already used')
|
||||
else:
|
||||
self.f2py_opts = self.f2pyflags
|
||||
self.f2pyflags = None
|
||||
if self.f2py_opts is None:
|
||||
self.f2py_opts = []
|
||||
else:
|
||||
self.f2py_opts = shlex.split(self.f2py_opts)
|
||||
|
||||
if self.swigflags:
|
||||
if self.swig_opts:
|
||||
log.warn('ignoring --swigflags as --swig-opts already used')
|
||||
else:
|
||||
self.swig_opts = self.swigflags
|
||||
self.swigflags = None
|
||||
|
||||
if self.swig_opts is None:
|
||||
self.swig_opts = []
|
||||
else:
|
||||
self.swig_opts = shlex.split(self.swig_opts)
|
||||
|
||||
# use options from build_ext command
|
||||
build_ext = self.get_finalized_command('build_ext')
|
||||
if self.inplace is None:
|
||||
self.inplace = build_ext.inplace
|
||||
if self.swig_cpp is None:
|
||||
self.swig_cpp = build_ext.swig_cpp
|
||||
for c in ['swig', 'swig_opt']:
|
||||
o = '--'+c.replace('_', '-')
|
||||
v = getattr(build_ext, c, None)
|
||||
if v:
|
||||
if getattr(self, c):
|
||||
log.warn('both build_src and build_ext define %s option' % (o))
|
||||
else:
|
||||
log.info('using "%s=%s" option from build_ext command' % (o, v))
|
||||
setattr(self, c, v)
|
||||
|
||||
def run(self):
|
||||
log.info("build_src")
|
||||
if not (self.extensions or self.libraries):
|
||||
return
|
||||
self.build_sources()
|
||||
|
||||
def build_sources(self):
|
||||
|
||||
if self.inplace:
|
||||
self.get_package_dir = \
|
||||
self.get_finalized_command('build_py').get_package_dir
|
||||
|
||||
self.build_py_modules_sources()
|
||||
|
||||
for libname_info in self.libraries:
|
||||
self.build_library_sources(*libname_info)
|
||||
|
||||
if self.extensions:
|
||||
self.check_extensions_list(self.extensions)
|
||||
|
||||
for ext in self.extensions:
|
||||
self.build_extension_sources(ext)
|
||||
|
||||
self.build_data_files_sources()
|
||||
self.build_npy_pkg_config()
|
||||
|
||||
def build_data_files_sources(self):
|
||||
if not self.data_files:
|
||||
return
|
||||
log.info('building data_files sources')
|
||||
from numpy.distutils.misc_util import get_data_files
|
||||
new_data_files = []
|
||||
for data in self.data_files:
|
||||
if isinstance(data, str):
|
||||
new_data_files.append(data)
|
||||
elif isinstance(data, tuple):
|
||||
d, files = data
|
||||
if self.inplace:
|
||||
build_dir = self.get_package_dir('.'.join(d.split(os.sep)))
|
||||
else:
|
||||
build_dir = os.path.join(self.build_src, d)
|
||||
funcs = [f for f in files if hasattr(f, '__call__')]
|
||||
files = [f for f in files if not hasattr(f, '__call__')]
|
||||
for f in funcs:
|
||||
if f.__code__.co_argcount==1:
|
||||
s = f(build_dir)
|
||||
else:
|
||||
s = f()
|
||||
if s is not None:
|
||||
if isinstance(s, list):
|
||||
files.extend(s)
|
||||
elif isinstance(s, str):
|
||||
files.append(s)
|
||||
else:
|
||||
raise TypeError(repr(s))
|
||||
filenames = get_data_files((d, files))
|
||||
new_data_files.append((d, filenames))
|
||||
else:
|
||||
raise TypeError(repr(data))
|
||||
self.data_files[:] = new_data_files
|
||||
|
||||
|
||||
def _build_npy_pkg_config(self, info, gd):
|
||||
template, install_dir, subst_dict = info
|
||||
template_dir = os.path.dirname(template)
|
||||
for k, v in gd.items():
|
||||
subst_dict[k] = v
|
||||
|
||||
if self.inplace == 1:
|
||||
generated_dir = os.path.join(template_dir, install_dir)
|
||||
else:
|
||||
generated_dir = os.path.join(self.build_src, template_dir,
|
||||
install_dir)
|
||||
generated = os.path.basename(os.path.splitext(template)[0])
|
||||
generated_path = os.path.join(generated_dir, generated)
|
||||
if not os.path.exists(generated_dir):
|
||||
os.makedirs(generated_dir)
|
||||
|
||||
subst_vars(generated_path, template, subst_dict)
|
||||
|
||||
# Where to install relatively to install prefix
|
||||
full_install_dir = os.path.join(template_dir, install_dir)
|
||||
return full_install_dir, generated_path
|
||||
|
||||
def build_npy_pkg_config(self):
|
||||
log.info('build_src: building npy-pkg config files')
|
||||
|
||||
# XXX: another ugly workaround to circumvent distutils brain damage. We
|
||||
# need the install prefix here, but finalizing the options of the
|
||||
# install command when only building sources cause error. Instead, we
|
||||
# copy the install command instance, and finalize the copy so that it
|
||||
# does not disrupt how distutils want to do things when with the
|
||||
# original install command instance.
|
||||
install_cmd = copy.copy(get_cmd('install'))
|
||||
if not install_cmd.finalized == 1:
|
||||
install_cmd.finalize_options()
|
||||
build_npkg = False
|
||||
if self.inplace == 1:
|
||||
top_prefix = '.'
|
||||
build_npkg = True
|
||||
elif hasattr(install_cmd, 'install_libbase'):
|
||||
top_prefix = install_cmd.install_libbase
|
||||
build_npkg = True
|
||||
|
||||
if build_npkg:
|
||||
for pkg, infos in self.distribution.installed_pkg_config.items():
|
||||
pkg_path = self.distribution.package_dir[pkg]
|
||||
prefix = os.path.join(os.path.abspath(top_prefix), pkg_path)
|
||||
d = {'prefix': prefix}
|
||||
for info in infos:
|
||||
install_dir, generated = self._build_npy_pkg_config(info, d)
|
||||
self.distribution.data_files.append((install_dir,
|
||||
[generated]))
|
||||
|
||||
def build_py_modules_sources(self):
|
||||
if not self.py_modules:
|
||||
return
|
||||
log.info('building py_modules sources')
|
||||
new_py_modules = []
|
||||
for source in self.py_modules:
|
||||
if is_sequence(source) and len(source)==3:
|
||||
package, module_base, source = source
|
||||
if self.inplace:
|
||||
build_dir = self.get_package_dir(package)
|
||||
else:
|
||||
build_dir = os.path.join(self.build_src,
|
||||
os.path.join(*package.split('.')))
|
||||
if hasattr(source, '__call__'):
|
||||
target = os.path.join(build_dir, module_base + '.py')
|
||||
source = source(target)
|
||||
if source is None:
|
||||
continue
|
||||
modules = [(package, module_base, source)]
|
||||
if package not in self.py_modules_dict:
|
||||
self.py_modules_dict[package] = []
|
||||
self.py_modules_dict[package] += modules
|
||||
else:
|
||||
new_py_modules.append(source)
|
||||
self.py_modules[:] = new_py_modules
|
||||
|
||||
def build_library_sources(self, lib_name, build_info):
|
||||
sources = list(build_info.get('sources', []))
|
||||
|
||||
if not sources:
|
||||
return
|
||||
|
||||
log.info('building library "%s" sources' % (lib_name))
|
||||
|
||||
sources = self.generate_sources(sources, (lib_name, build_info))
|
||||
|
||||
sources = self.template_sources(sources, (lib_name, build_info))
|
||||
|
||||
sources, h_files = self.filter_h_files(sources)
|
||||
|
||||
if h_files:
|
||||
log.info('%s - nothing done with h_files = %s',
|
||||
self.package, h_files)
|
||||
|
||||
#for f in h_files:
|
||||
# self.distribution.headers.append((lib_name,f))
|
||||
|
||||
build_info['sources'] = sources
|
||||
return
|
||||
|
||||
def build_extension_sources(self, ext):
|
||||
|
||||
sources = list(ext.sources)
|
||||
|
||||
log.info('building extension "%s" sources' % (ext.name))
|
||||
|
||||
fullname = self.get_ext_fullname(ext.name)
|
||||
|
||||
modpath = fullname.split('.')
|
||||
package = '.'.join(modpath[0:-1])
|
||||
|
||||
if self.inplace:
|
||||
self.ext_target_dir = self.get_package_dir(package)
|
||||
|
||||
sources = self.generate_sources(sources, ext)
|
||||
sources = self.template_sources(sources, ext)
|
||||
sources = self.swig_sources(sources, ext)
|
||||
sources = self.f2py_sources(sources, ext)
|
||||
sources = self.pyrex_sources(sources, ext)
|
||||
|
||||
sources, py_files = self.filter_py_files(sources)
|
||||
|
||||
if package not in self.py_modules_dict:
|
||||
self.py_modules_dict[package] = []
|
||||
modules = []
|
||||
for f in py_files:
|
||||
module = os.path.splitext(os.path.basename(f))[0]
|
||||
modules.append((package, module, f))
|
||||
self.py_modules_dict[package] += modules
|
||||
|
||||
sources, h_files = self.filter_h_files(sources)
|
||||
|
||||
if h_files:
|
||||
log.info('%s - nothing done with h_files = %s',
|
||||
package, h_files)
|
||||
#for f in h_files:
|
||||
# self.distribution.headers.append((package,f))
|
||||
|
||||
ext.sources = sources
|
||||
|
||||
def generate_sources(self, sources, extension):
|
||||
new_sources = []
|
||||
func_sources = []
|
||||
for source in sources:
|
||||
if is_string(source):
|
||||
new_sources.append(source)
|
||||
else:
|
||||
func_sources.append(source)
|
||||
if not func_sources:
|
||||
return new_sources
|
||||
if self.inplace and not is_sequence(extension):
|
||||
build_dir = self.ext_target_dir
|
||||
else:
|
||||
if is_sequence(extension):
|
||||
name = extension[0]
|
||||
# if 'include_dirs' not in extension[1]:
|
||||
# extension[1]['include_dirs'] = []
|
||||
# incl_dirs = extension[1]['include_dirs']
|
||||
else:
|
||||
name = extension.name
|
||||
# incl_dirs = extension.include_dirs
|
||||
#if self.build_src not in incl_dirs:
|
||||
# incl_dirs.append(self.build_src)
|
||||
build_dir = os.path.join(*([self.build_src]
|
||||
+name.split('.')[:-1]))
|
||||
self.mkpath(build_dir)
|
||||
|
||||
if self.verbose_cfg:
|
||||
new_level = log.INFO
|
||||
else:
|
||||
new_level = log.WARN
|
||||
old_level = log.set_threshold(new_level)
|
||||
|
||||
for func in func_sources:
|
||||
source = func(extension, build_dir)
|
||||
if not source:
|
||||
continue
|
||||
if is_sequence(source):
|
||||
[log.info(" adding '%s' to sources." % (s,)) for s in source]
|
||||
new_sources.extend(source)
|
||||
else:
|
||||
log.info(" adding '%s' to sources." % (source,))
|
||||
new_sources.append(source)
|
||||
log.set_threshold(old_level)
|
||||
return new_sources
|
||||
|
||||
def filter_py_files(self, sources):
|
||||
return self.filter_files(sources, ['.py'])
|
||||
|
||||
def filter_h_files(self, sources):
|
||||
return self.filter_files(sources, ['.h', '.hpp', '.inc'])
|
||||
|
||||
def filter_files(self, sources, exts = []):
|
||||
new_sources = []
|
||||
files = []
|
||||
for source in sources:
|
||||
(base, ext) = os.path.splitext(source)
|
||||
if ext in exts:
|
||||
files.append(source)
|
||||
else:
|
||||
new_sources.append(source)
|
||||
return new_sources, files
|
||||
|
||||
def template_sources(self, sources, extension):
|
||||
new_sources = []
|
||||
if is_sequence(extension):
|
||||
depends = extension[1].get('depends')
|
||||
include_dirs = extension[1].get('include_dirs')
|
||||
else:
|
||||
depends = extension.depends
|
||||
include_dirs = extension.include_dirs
|
||||
for source in sources:
|
||||
(base, ext) = os.path.splitext(source)
|
||||
if ext == '.src': # Template file
|
||||
if self.inplace:
|
||||
target_dir = os.path.dirname(base)
|
||||
else:
|
||||
target_dir = appendpath(self.build_src, os.path.dirname(base))
|
||||
self.mkpath(target_dir)
|
||||
target_file = os.path.join(target_dir, os.path.basename(base))
|
||||
if (self.force or newer_group([source] + depends, target_file)):
|
||||
if _f_pyf_ext_match(base):
|
||||
log.info("from_template:> %s" % (target_file))
|
||||
outstr = process_f_file(source)
|
||||
else:
|
||||
log.info("conv_template:> %s" % (target_file))
|
||||
outstr = process_c_file(source)
|
||||
with open(target_file, 'w') as fid:
|
||||
fid.write(outstr)
|
||||
if _header_ext_match(target_file):
|
||||
d = os.path.dirname(target_file)
|
||||
if d not in include_dirs:
|
||||
log.info(" adding '%s' to include_dirs." % (d))
|
||||
include_dirs.append(d)
|
||||
new_sources.append(target_file)
|
||||
else:
|
||||
new_sources.append(source)
|
||||
return new_sources
|
||||
|
||||
def pyrex_sources(self, sources, extension):
|
||||
"""Pyrex not supported; this remains for Cython support (see below)"""
|
||||
new_sources = []
|
||||
ext_name = extension.name.split('.')[-1]
|
||||
for source in sources:
|
||||
(base, ext) = os.path.splitext(source)
|
||||
if ext == '.pyx':
|
||||
target_file = self.generate_a_pyrex_source(base, ext_name,
|
||||
source,
|
||||
extension)
|
||||
new_sources.append(target_file)
|
||||
else:
|
||||
new_sources.append(source)
|
||||
return new_sources
|
||||
|
||||
def generate_a_pyrex_source(self, base, ext_name, source, extension):
|
||||
"""Pyrex is not supported, but some projects monkeypatch this method.
|
||||
|
||||
That allows compiling Cython code, see gh-6955.
|
||||
This method will remain here for compatibility reasons.
|
||||
"""
|
||||
return []
|
||||
|
||||
def f2py_sources(self, sources, extension):
|
||||
new_sources = []
|
||||
f2py_sources = []
|
||||
f_sources = []
|
||||
f2py_targets = {}
|
||||
target_dirs = []
|
||||
ext_name = extension.name.split('.')[-1]
|
||||
skip_f2py = 0
|
||||
|
||||
for source in sources:
|
||||
(base, ext) = os.path.splitext(source)
|
||||
if ext == '.pyf': # F2PY interface file
|
||||
if self.inplace:
|
||||
target_dir = os.path.dirname(base)
|
||||
else:
|
||||
target_dir = appendpath(self.build_src, os.path.dirname(base))
|
||||
if os.path.isfile(source):
|
||||
name = get_f2py_modulename(source)
|
||||
if name != ext_name:
|
||||
raise DistutilsSetupError('mismatch of extension names: %s '
|
||||
'provides %r but expected %r' % (
|
||||
source, name, ext_name))
|
||||
target_file = os.path.join(target_dir, name+'module.c')
|
||||
else:
|
||||
log.debug(' source %s does not exist: skipping f2py\'ing.' \
|
||||
% (source))
|
||||
name = ext_name
|
||||
skip_f2py = 1
|
||||
target_file = os.path.join(target_dir, name+'module.c')
|
||||
if not os.path.isfile(target_file):
|
||||
log.warn(' target %s does not exist:\n '\
|
||||
'Assuming %smodule.c was generated with '\
|
||||
'"build_src --inplace" command.' \
|
||||
% (target_file, name))
|
||||
target_dir = os.path.dirname(base)
|
||||
target_file = os.path.join(target_dir, name+'module.c')
|
||||
if not os.path.isfile(target_file):
|
||||
raise DistutilsSetupError("%r missing" % (target_file,))
|
||||
log.info(' Yes! Using %r as up-to-date target.' \
|
||||
% (target_file))
|
||||
target_dirs.append(target_dir)
|
||||
f2py_sources.append(source)
|
||||
f2py_targets[source] = target_file
|
||||
new_sources.append(target_file)
|
||||
elif fortran_ext_match(ext):
|
||||
f_sources.append(source)
|
||||
else:
|
||||
new_sources.append(source)
|
||||
|
||||
if not (f2py_sources or f_sources):
|
||||
return new_sources
|
||||
|
||||
for d in target_dirs:
|
||||
self.mkpath(d)
|
||||
|
||||
f2py_options = extension.f2py_options + self.f2py_opts
|
||||
|
||||
if self.distribution.libraries:
|
||||
for name, build_info in self.distribution.libraries:
|
||||
if name in extension.libraries:
|
||||
f2py_options.extend(build_info.get('f2py_options', []))
|
||||
|
||||
log.info("f2py options: %s" % (f2py_options))
|
||||
|
||||
if f2py_sources:
|
||||
if len(f2py_sources) != 1:
|
||||
raise DistutilsSetupError(
|
||||
'only one .pyf file is allowed per extension module but got'\
|
||||
' more: %r' % (f2py_sources,))
|
||||
source = f2py_sources[0]
|
||||
target_file = f2py_targets[source]
|
||||
target_dir = os.path.dirname(target_file) or '.'
|
||||
depends = [source] + extension.depends
|
||||
if (self.force or newer_group(depends, target_file, 'newer')) \
|
||||
and not skip_f2py:
|
||||
log.info("f2py: %s" % (source))
|
||||
import numpy.f2py
|
||||
numpy.f2py.run_main(f2py_options
|
||||
+ ['--build-dir', target_dir, source])
|
||||
else:
|
||||
log.debug(" skipping '%s' f2py interface (up-to-date)" % (source))
|
||||
else:
|
||||
#XXX TODO: --inplace support for sdist command
|
||||
if is_sequence(extension):
|
||||
name = extension[0]
|
||||
else: name = extension.name
|
||||
target_dir = os.path.join(*([self.build_src]
|
||||
+name.split('.')[:-1]))
|
||||
target_file = os.path.join(target_dir, ext_name + 'module.c')
|
||||
new_sources.append(target_file)
|
||||
depends = f_sources + extension.depends
|
||||
if (self.force or newer_group(depends, target_file, 'newer')) \
|
||||
and not skip_f2py:
|
||||
log.info("f2py:> %s" % (target_file))
|
||||
self.mkpath(target_dir)
|
||||
import numpy.f2py
|
||||
numpy.f2py.run_main(f2py_options + ['--lower',
|
||||
'--build-dir', target_dir]+\
|
||||
['-m', ext_name]+f_sources)
|
||||
else:
|
||||
log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\
|
||||
% (target_file))
|
||||
|
||||
if not os.path.isfile(target_file):
|
||||
raise DistutilsError("f2py target file %r not generated" % (target_file,))
|
||||
|
||||
build_dir = os.path.join(self.build_src, target_dir)
|
||||
target_c = os.path.join(build_dir, 'fortranobject.c')
|
||||
target_h = os.path.join(build_dir, 'fortranobject.h')
|
||||
log.info(" adding '%s' to sources." % (target_c))
|
||||
new_sources.append(target_c)
|
||||
if build_dir not in extension.include_dirs:
|
||||
log.info(" adding '%s' to include_dirs." % (build_dir))
|
||||
extension.include_dirs.append(build_dir)
|
||||
|
||||
if not skip_f2py:
|
||||
import numpy.f2py
|
||||
d = os.path.dirname(numpy.f2py.__file__)
|
||||
source_c = os.path.join(d, 'src', 'fortranobject.c')
|
||||
source_h = os.path.join(d, 'src', 'fortranobject.h')
|
||||
if newer(source_c, target_c) or newer(source_h, target_h):
|
||||
self.mkpath(os.path.dirname(target_c))
|
||||
self.copy_file(source_c, target_c)
|
||||
self.copy_file(source_h, target_h)
|
||||
else:
|
||||
if not os.path.isfile(target_c):
|
||||
raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,))
|
||||
if not os.path.isfile(target_h):
|
||||
raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,))
|
||||
|
||||
for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']:
|
||||
filename = os.path.join(target_dir, ext_name + name_ext)
|
||||
if os.path.isfile(filename):
|
||||
log.info(" adding '%s' to sources." % (filename))
|
||||
f_sources.append(filename)
|
||||
|
||||
return new_sources + f_sources
|
||||
|
||||
def swig_sources(self, sources, extension):
|
||||
# Assuming SWIG 1.3.14 or later. See compatibility note in
|
||||
# http://www.swig.org/Doc1.3/Python.html#Python_nn6
|
||||
|
||||
new_sources = []
|
||||
swig_sources = []
|
||||
swig_targets = {}
|
||||
target_dirs = []
|
||||
py_files = [] # swig generated .py files
|
||||
target_ext = '.c'
|
||||
if '-c++' in extension.swig_opts:
|
||||
typ = 'c++'
|
||||
is_cpp = True
|
||||
extension.swig_opts.remove('-c++')
|
||||
elif self.swig_cpp:
|
||||
typ = 'c++'
|
||||
is_cpp = True
|
||||
else:
|
||||
typ = None
|
||||
is_cpp = False
|
||||
skip_swig = 0
|
||||
ext_name = extension.name.split('.')[-1]
|
||||
|
||||
for source in sources:
|
||||
(base, ext) = os.path.splitext(source)
|
||||
if ext == '.i': # SWIG interface file
|
||||
# the code below assumes that the sources list
|
||||
# contains not more than one .i SWIG interface file
|
||||
if self.inplace:
|
||||
target_dir = os.path.dirname(base)
|
||||
py_target_dir = self.ext_target_dir
|
||||
else:
|
||||
target_dir = appendpath(self.build_src, os.path.dirname(base))
|
||||
py_target_dir = target_dir
|
||||
if os.path.isfile(source):
|
||||
name = get_swig_modulename(source)
|
||||
if name != ext_name[1:]:
|
||||
raise DistutilsSetupError(
|
||||
'mismatch of extension names: %s provides %r'
|
||||
' but expected %r' % (source, name, ext_name[1:]))
|
||||
if typ is None:
|
||||
typ = get_swig_target(source)
|
||||
is_cpp = typ=='c++'
|
||||
else:
|
||||
typ2 = get_swig_target(source)
|
||||
if typ2 is None:
|
||||
log.warn('source %r does not define swig target, assuming %s swig target' \
|
||||
% (source, typ))
|
||||
elif typ!=typ2:
|
||||
log.warn('expected %r but source %r defines %r swig target' \
|
||||
% (typ, source, typ2))
|
||||
if typ2=='c++':
|
||||
log.warn('resetting swig target to c++ (some targets may have .c extension)')
|
||||
is_cpp = True
|
||||
else:
|
||||
log.warn('assuming that %r has c++ swig target' % (source))
|
||||
if is_cpp:
|
||||
target_ext = '.cpp'
|
||||
target_file = os.path.join(target_dir, '%s_wrap%s' \
|
||||
% (name, target_ext))
|
||||
else:
|
||||
log.warn(' source %s does not exist: skipping swig\'ing.' \
|
||||
% (source))
|
||||
name = ext_name[1:]
|
||||
skip_swig = 1
|
||||
target_file = _find_swig_target(target_dir, name)
|
||||
if not os.path.isfile(target_file):
|
||||
log.warn(' target %s does not exist:\n '\
|
||||
'Assuming %s_wrap.{c,cpp} was generated with '\
|
||||
'"build_src --inplace" command.' \
|
||||
% (target_file, name))
|
||||
target_dir = os.path.dirname(base)
|
||||
target_file = _find_swig_target(target_dir, name)
|
||||
if not os.path.isfile(target_file):
|
||||
raise DistutilsSetupError("%r missing" % (target_file,))
|
||||
log.warn(' Yes! Using %r as up-to-date target.' \
|
||||
% (target_file))
|
||||
target_dirs.append(target_dir)
|
||||
new_sources.append(target_file)
|
||||
py_files.append(os.path.join(py_target_dir, name+'.py'))
|
||||
swig_sources.append(source)
|
||||
swig_targets[source] = new_sources[-1]
|
||||
else:
|
||||
new_sources.append(source)
|
||||
|
||||
if not swig_sources:
|
||||
return new_sources
|
||||
|
||||
if skip_swig:
|
||||
return new_sources + py_files
|
||||
|
||||
for d in target_dirs:
|
||||
self.mkpath(d)
|
||||
|
||||
swig = self.swig or self.find_swig()
|
||||
swig_cmd = [swig, "-python"] + extension.swig_opts
|
||||
if is_cpp:
|
||||
swig_cmd.append('-c++')
|
||||
for d in extension.include_dirs:
|
||||
swig_cmd.append('-I'+d)
|
||||
for source in swig_sources:
|
||||
target = swig_targets[source]
|
||||
depends = [source] + extension.depends
|
||||
if self.force or newer_group(depends, target, 'newer'):
|
||||
log.info("%s: %s" % (os.path.basename(swig) \
|
||||
+ (is_cpp and '++' or ''), source))
|
||||
self.spawn(swig_cmd + self.swig_opts \
|
||||
+ ["-o", target, '-outdir', py_target_dir, source])
|
||||
else:
|
||||
log.debug(" skipping '%s' swig interface (up-to-date)" \
|
||||
% (source))
|
||||
|
||||
return new_sources + py_files
|
||||
|
||||
_f_pyf_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
|
||||
_header_ext_match = re.compile(r'.*\.(inc|h|hpp)\Z', re.I).match
|
||||
|
||||
#### SWIG related auxiliary functions ####
|
||||
_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P<package>[\w_]+)".*\)|)\s*(?P<name>[\w_]+)',
|
||||
re.I).match
|
||||
_has_c_header = re.compile(r'-\*-\s*c\s*-\*-', re.I).search
|
||||
_has_cpp_header = re.compile(r'-\*-\s*c\+\+\s*-\*-', re.I).search
|
||||
|
||||
def get_swig_target(source):
|
||||
with open(source, 'r') as f:
|
||||
result = None
|
||||
line = f.readline()
|
||||
if _has_cpp_header(line):
|
||||
result = 'c++'
|
||||
if _has_c_header(line):
|
||||
result = 'c'
|
||||
return result
|
||||
|
||||
def get_swig_modulename(source):
|
||||
with open(source, 'r') as f:
|
||||
name = None
|
||||
for line in f:
|
||||
m = _swig_module_name_match(line)
|
||||
if m:
|
||||
name = m.group('name')
|
||||
break
|
||||
return name
|
||||
|
||||
def _find_swig_target(target_dir, name):
|
||||
for ext in ['.cpp', '.c']:
|
||||
target = os.path.join(target_dir, '%s_wrap%s' % (name, ext))
|
||||
if os.path.isfile(target):
|
||||
break
|
||||
return target
|
||||
|
||||
#### F2PY related auxiliary functions ####
|
||||
|
||||
_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]+)',
|
||||
re.I).match
|
||||
_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*?'
|
||||
r'__user__[\w_]*)', re.I).match
|
||||
|
||||
def get_f2py_modulename(source):
|
||||
name = None
|
||||
with open(source) as f:
|
||||
for line in f:
|
||||
m = _f2py_module_name_match(line)
|
||||
if m:
|
||||
if _f2py_user_module_name_match(line): # skip *__user__* names
|
||||
continue
|
||||
name = m.group('name')
|
||||
break
|
||||
return name
|
||||
|
||||
##########################################
|
@@ -1,517 +0,0 @@
|
||||
# Added Fortran compiler support to config. Currently useful only for
|
||||
# try_compile call. try_run works but is untested for most of Fortran
|
||||
# compilers (they must define linker_exe first).
|
||||
# Pearu Peterson
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import textwrap
|
||||
import warnings
|
||||
|
||||
from distutils.command.config import config as old_config
|
||||
from distutils.command.config import LANG_EXT
|
||||
from distutils import log
|
||||
from distutils.file_util import copy_file
|
||||
from distutils.ccompiler import CompileError, LinkError
|
||||
import distutils
|
||||
from numpy.distutils.exec_command import filepath_from_subprocess_output
|
||||
from numpy.distutils.mingw32ccompiler import generate_manifest
|
||||
from numpy.distutils.command.autodist import (check_gcc_function_attribute,
|
||||
check_gcc_function_attribute_with_intrinsics,
|
||||
check_gcc_variable_attribute,
|
||||
check_gcc_version_at_least,
|
||||
check_inline,
|
||||
check_restrict,
|
||||
check_compiler_gcc)
|
||||
|
||||
LANG_EXT['f77'] = '.f'
|
||||
LANG_EXT['f90'] = '.f90'
|
||||
|
||||
class config(old_config):
|
||||
old_config.user_options += [
|
||||
('fcompiler=', None, "specify the Fortran compiler type"),
|
||||
]
|
||||
|
||||
def initialize_options(self):
|
||||
self.fcompiler = None
|
||||
old_config.initialize_options(self)
|
||||
|
||||
def _check_compiler (self):
|
||||
old_config._check_compiler(self)
|
||||
from numpy.distutils.fcompiler import FCompiler, new_fcompiler
|
||||
|
||||
if sys.platform == 'win32' and (self.compiler.compiler_type in
|
||||
('msvc', 'intelw', 'intelemw')):
|
||||
# XXX: hack to circumvent a python 2.6 bug with msvc9compiler:
|
||||
# initialize call query_vcvarsall, which throws an IOError, and
|
||||
# causes an error along the way without much information. We try to
|
||||
# catch it here, hoping it is early enough, and print an helpful
|
||||
# message instead of Error: None.
|
||||
if not self.compiler.initialized:
|
||||
try:
|
||||
self.compiler.initialize()
|
||||
except IOError as e:
|
||||
msg = textwrap.dedent("""\
|
||||
Could not initialize compiler instance: do you have Visual Studio
|
||||
installed? If you are trying to build with MinGW, please use "python setup.py
|
||||
build -c mingw32" instead. If you have Visual Studio installed, check it is
|
||||
correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2,
|
||||
VS 2010 for >= 3.3).
|
||||
|
||||
Original exception was: %s, and the Compiler class was %s
|
||||
============================================================================""") \
|
||||
% (e, self.compiler.__class__.__name__)
|
||||
print(textwrap.dedent("""\
|
||||
============================================================================"""))
|
||||
raise distutils.errors.DistutilsPlatformError(msg) from e
|
||||
|
||||
# After MSVC is initialized, add an explicit /MANIFEST to linker
|
||||
# flags. See issues gh-4245 and gh-4101 for details. Also
|
||||
# relevant are issues 4431 and 16296 on the Python bug tracker.
|
||||
from distutils import msvc9compiler
|
||||
if msvc9compiler.get_build_version() >= 10:
|
||||
for ldflags in [self.compiler.ldflags_shared,
|
||||
self.compiler.ldflags_shared_debug]:
|
||||
if '/MANIFEST' not in ldflags:
|
||||
ldflags.append('/MANIFEST')
|
||||
|
||||
if not isinstance(self.fcompiler, FCompiler):
|
||||
self.fcompiler = new_fcompiler(compiler=self.fcompiler,
|
||||
dry_run=self.dry_run, force=1,
|
||||
c_compiler=self.compiler)
|
||||
if self.fcompiler is not None:
|
||||
self.fcompiler.customize(self.distribution)
|
||||
if self.fcompiler.get_version():
|
||||
self.fcompiler.customize_cmd(self)
|
||||
self.fcompiler.show_customization()
|
||||
|
||||
def _wrap_method(self, mth, lang, args):
|
||||
from distutils.ccompiler import CompileError
|
||||
from distutils.errors import DistutilsExecError
|
||||
save_compiler = self.compiler
|
||||
if lang in ['f77', 'f90']:
|
||||
self.compiler = self.fcompiler
|
||||
if self.compiler is None:
|
||||
raise CompileError('%s compiler is not set' % (lang,))
|
||||
try:
|
||||
ret = mth(*((self,)+args))
|
||||
except (DistutilsExecError, CompileError) as e:
|
||||
self.compiler = save_compiler
|
||||
raise CompileError from e
|
||||
self.compiler = save_compiler
|
||||
return ret
|
||||
|
||||
def _compile (self, body, headers, include_dirs, lang):
|
||||
src, obj = self._wrap_method(old_config._compile, lang,
|
||||
(body, headers, include_dirs, lang))
|
||||
# _compile in unixcompiler.py sometimes creates .d dependency files.
|
||||
# Clean them up.
|
||||
self.temp_files.append(obj + '.d')
|
||||
return src, obj
|
||||
|
||||
def _link (self, body,
|
||||
headers, include_dirs,
|
||||
libraries, library_dirs, lang):
|
||||
if self.compiler.compiler_type=='msvc':
|
||||
libraries = (libraries or [])[:]
|
||||
library_dirs = (library_dirs or [])[:]
|
||||
if lang in ['f77', 'f90']:
|
||||
lang = 'c' # always use system linker when using MSVC compiler
|
||||
if self.fcompiler:
|
||||
for d in self.fcompiler.library_dirs or []:
|
||||
# correct path when compiling in Cygwin but with
|
||||
# normal Win Python
|
||||
if d.startswith('/usr/lib'):
|
||||
try:
|
||||
d = subprocess.check_output(['cygpath',
|
||||
'-w', d])
|
||||
except (OSError, subprocess.CalledProcessError):
|
||||
pass
|
||||
else:
|
||||
d = filepath_from_subprocess_output(d)
|
||||
library_dirs.append(d)
|
||||
for libname in self.fcompiler.libraries or []:
|
||||
if libname not in libraries:
|
||||
libraries.append(libname)
|
||||
for libname in libraries:
|
||||
if libname.startswith('msvc'): continue
|
||||
fileexists = False
|
||||
for libdir in library_dirs or []:
|
||||
libfile = os.path.join(libdir, '%s.lib' % (libname))
|
||||
if os.path.isfile(libfile):
|
||||
fileexists = True
|
||||
break
|
||||
if fileexists: continue
|
||||
# make g77-compiled static libs available to MSVC
|
||||
fileexists = False
|
||||
for libdir in library_dirs:
|
||||
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
|
||||
if os.path.isfile(libfile):
|
||||
# copy libname.a file to name.lib so that MSVC linker
|
||||
# can find it
|
||||
libfile2 = os.path.join(libdir, '%s.lib' % (libname))
|
||||
copy_file(libfile, libfile2)
|
||||
self.temp_files.append(libfile2)
|
||||
fileexists = True
|
||||
break
|
||||
if fileexists: continue
|
||||
log.warn('could not find library %r in directories %s' \
|
||||
% (libname, library_dirs))
|
||||
elif self.compiler.compiler_type == 'mingw32':
|
||||
generate_manifest(self)
|
||||
return self._wrap_method(old_config._link, lang,
|
||||
(body, headers, include_dirs,
|
||||
libraries, library_dirs, lang))
|
||||
|
||||
def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'):
|
||||
self._check_compiler()
|
||||
return self.try_compile(
|
||||
"/* we need a dummy line to make distutils happy */",
|
||||
[header], include_dirs)
|
||||
|
||||
def check_decl(self, symbol,
|
||||
headers=None, include_dirs=None):
|
||||
self._check_compiler()
|
||||
body = textwrap.dedent("""
|
||||
int main(void)
|
||||
{
|
||||
#ifndef %s
|
||||
(void) %s;
|
||||
#endif
|
||||
;
|
||||
return 0;
|
||||
}""") % (symbol, symbol)
|
||||
|
||||
return self.try_compile(body, headers, include_dirs)
|
||||
|
||||
def check_macro_true(self, symbol,
|
||||
headers=None, include_dirs=None):
|
||||
self._check_compiler()
|
||||
body = textwrap.dedent("""
|
||||
int main(void)
|
||||
{
|
||||
#if %s
|
||||
#else
|
||||
#error false or undefined macro
|
||||
#endif
|
||||
;
|
||||
return 0;
|
||||
}""") % (symbol,)
|
||||
|
||||
return self.try_compile(body, headers, include_dirs)
|
||||
|
||||
def check_type(self, type_name, headers=None, include_dirs=None,
|
||||
library_dirs=None):
|
||||
"""Check type availability. Return True if the type can be compiled,
|
||||
False otherwise"""
|
||||
self._check_compiler()
|
||||
|
||||
# First check the type can be compiled
|
||||
body = textwrap.dedent(r"""
|
||||
int main(void) {
|
||||
if ((%(name)s *) 0)
|
||||
return 0;
|
||||
if (sizeof (%(name)s))
|
||||
return 0;
|
||||
}
|
||||
""") % {'name': type_name}
|
||||
|
||||
st = False
|
||||
try:
|
||||
try:
|
||||
self._compile(body % {'type': type_name},
|
||||
headers, include_dirs, 'c')
|
||||
st = True
|
||||
except distutils.errors.CompileError:
|
||||
st = False
|
||||
finally:
|
||||
self._clean()
|
||||
|
||||
return st
|
||||
|
||||
def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None):
|
||||
"""Check size of a given type."""
|
||||
self._check_compiler()
|
||||
|
||||
# First check the type can be compiled
|
||||
body = textwrap.dedent(r"""
|
||||
typedef %(type)s npy_check_sizeof_type;
|
||||
int main (void)
|
||||
{
|
||||
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];
|
||||
test_array [0] = 0
|
||||
|
||||
;
|
||||
return 0;
|
||||
}
|
||||
""")
|
||||
self._compile(body % {'type': type_name},
|
||||
headers, include_dirs, 'c')
|
||||
self._clean()
|
||||
|
||||
if expected:
|
||||
body = textwrap.dedent(r"""
|
||||
typedef %(type)s npy_check_sizeof_type;
|
||||
int main (void)
|
||||
{
|
||||
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];
|
||||
test_array [0] = 0
|
||||
|
||||
;
|
||||
return 0;
|
||||
}
|
||||
""")
|
||||
for size in expected:
|
||||
try:
|
||||
self._compile(body % {'type': type_name, 'size': size},
|
||||
headers, include_dirs, 'c')
|
||||
self._clean()
|
||||
return size
|
||||
except CompileError:
|
||||
pass
|
||||
|
||||
# this fails to *compile* if size > sizeof(type)
|
||||
body = textwrap.dedent(r"""
|
||||
typedef %(type)s npy_check_sizeof_type;
|
||||
int main (void)
|
||||
{
|
||||
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];
|
||||
test_array [0] = 0
|
||||
|
||||
;
|
||||
return 0;
|
||||
}
|
||||
""")
|
||||
|
||||
# The principle is simple: we first find low and high bounds of size
|
||||
# for the type, where low/high are looked up on a log scale. Then, we
|
||||
# do a binary search to find the exact size between low and high
|
||||
low = 0
|
||||
mid = 0
|
||||
while True:
|
||||
try:
|
||||
self._compile(body % {'type': type_name, 'size': mid},
|
||||
headers, include_dirs, 'c')
|
||||
self._clean()
|
||||
break
|
||||
except CompileError:
|
||||
#log.info("failure to test for bound %d" % mid)
|
||||
low = mid + 1
|
||||
mid = 2 * mid + 1
|
||||
|
||||
high = mid
|
||||
# Binary search:
|
||||
while low != high:
|
||||
mid = (high - low) // 2 + low
|
||||
try:
|
||||
self._compile(body % {'type': type_name, 'size': mid},
|
||||
headers, include_dirs, 'c')
|
||||
self._clean()
|
||||
high = mid
|
||||
except CompileError:
|
||||
low = mid + 1
|
||||
return low
|
||||
|
||||
def check_func(self, func,
|
||||
headers=None, include_dirs=None,
|
||||
libraries=None, library_dirs=None,
|
||||
decl=False, call=False, call_args=None):
|
||||
# clean up distutils's config a bit: add void to main(), and
|
||||
# return a value.
|
||||
self._check_compiler()
|
||||
body = []
|
||||
if decl:
|
||||
if type(decl) == str:
|
||||
body.append(decl)
|
||||
else:
|
||||
body.append("int %s (void);" % func)
|
||||
# Handle MSVC intrinsics: force MS compiler to make a function call.
|
||||
# Useful to test for some functions when built with optimization on, to
|
||||
# avoid build error because the intrinsic and our 'fake' test
|
||||
# declaration do not match.
|
||||
body.append("#ifdef _MSC_VER")
|
||||
body.append("#pragma function(%s)" % func)
|
||||
body.append("#endif")
|
||||
body.append("int main (void) {")
|
||||
if call:
|
||||
if call_args is None:
|
||||
call_args = ''
|
||||
body.append(" %s(%s);" % (func, call_args))
|
||||
else:
|
||||
body.append(" %s;" % func)
|
||||
body.append(" return 0;")
|
||||
body.append("}")
|
||||
body = '\n'.join(body) + "\n"
|
||||
|
||||
return self.try_link(body, headers, include_dirs,
|
||||
libraries, library_dirs)
|
||||
|
||||
def check_funcs_once(self, funcs,
|
||||
headers=None, include_dirs=None,
|
||||
libraries=None, library_dirs=None,
|
||||
decl=False, call=False, call_args=None):
|
||||
"""Check a list of functions at once.
|
||||
|
||||
This is useful to speed up things, since all the functions in the funcs
|
||||
list will be put in one compilation unit.
|
||||
|
||||
Arguments
|
||||
---------
|
||||
funcs : seq
|
||||
list of functions to test
|
||||
include_dirs : seq
|
||||
list of header paths
|
||||
libraries : seq
|
||||
list of libraries to link the code snippet to
|
||||
library_dirs : seq
|
||||
list of library paths
|
||||
decl : dict
|
||||
for every (key, value), the declaration in the value will be
|
||||
used for function in key. If a function is not in the
|
||||
dictionary, no declaration will be used.
|
||||
call : dict
|
||||
for every item (f, value), if the value is True, a call will be
|
||||
done to the function f.
|
||||
"""
|
||||
self._check_compiler()
|
||||
body = []
|
||||
if decl:
|
||||
for f, v in decl.items():
|
||||
if v:
|
||||
body.append("int %s (void);" % f)
|
||||
|
||||
# Handle MS intrinsics. See check_func for more info.
|
||||
body.append("#ifdef _MSC_VER")
|
||||
for func in funcs:
|
||||
body.append("#pragma function(%s)" % func)
|
||||
body.append("#endif")
|
||||
|
||||
body.append("int main (void) {")
|
||||
if call:
|
||||
for f in funcs:
|
||||
if f in call and call[f]:
|
||||
if not (call_args and f in call_args and call_args[f]):
|
||||
args = ''
|
||||
else:
|
||||
args = call_args[f]
|
||||
body.append(" %s(%s);" % (f, args))
|
||||
else:
|
||||
body.append(" %s;" % f)
|
||||
else:
|
||||
for f in funcs:
|
||||
body.append(" %s;" % f)
|
||||
body.append(" return 0;")
|
||||
body.append("}")
|
||||
body = '\n'.join(body) + "\n"
|
||||
|
||||
return self.try_link(body, headers, include_dirs,
|
||||
libraries, library_dirs)
|
||||
|
||||
def check_inline(self):
|
||||
"""Return the inline keyword recognized by the compiler, empty string
|
||||
otherwise."""
|
||||
return check_inline(self)
|
||||
|
||||
def check_restrict(self):
|
||||
"""Return the restrict keyword recognized by the compiler, empty string
|
||||
otherwise."""
|
||||
return check_restrict(self)
|
||||
|
||||
def check_compiler_gcc(self):
|
||||
"""Return True if the C compiler is gcc"""
|
||||
return check_compiler_gcc(self)
|
||||
|
||||
def check_gcc_function_attribute(self, attribute, name):
|
||||
return check_gcc_function_attribute(self, attribute, name)
|
||||
|
||||
def check_gcc_function_attribute_with_intrinsics(self, attribute, name,
|
||||
code, include):
|
||||
return check_gcc_function_attribute_with_intrinsics(self, attribute,
|
||||
name, code, include)
|
||||
|
||||
def check_gcc_variable_attribute(self, attribute):
|
||||
return check_gcc_variable_attribute(self, attribute)
|
||||
|
||||
def check_gcc_version_at_least(self, major, minor=0, patchlevel=0):
|
||||
"""Return True if the GCC version is greater than or equal to the
|
||||
specified version."""
|
||||
return check_gcc_version_at_least(self, major, minor, patchlevel)
|
||||
|
||||
def get_output(self, body, headers=None, include_dirs=None,
|
||||
libraries=None, library_dirs=None,
|
||||
lang="c", use_tee=None):
|
||||
"""Try to compile, link to an executable, and run a program
|
||||
built from 'body' and 'headers'. Returns the exit status code
|
||||
of the program and its output.
|
||||
"""
|
||||
# 2008-11-16, RemoveMe
|
||||
warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n"
|
||||
"Usage of get_output is deprecated: please do not \n"
|
||||
"use it anymore, and avoid configuration checks \n"
|
||||
"involving running executable on the target machine.\n"
|
||||
"+++++++++++++++++++++++++++++++++++++++++++++++++\n",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
self._check_compiler()
|
||||
exitcode, output = 255, ''
|
||||
try:
|
||||
grabber = GrabStdout()
|
||||
try:
|
||||
src, obj, exe = self._link(body, headers, include_dirs,
|
||||
libraries, library_dirs, lang)
|
||||
grabber.restore()
|
||||
except Exception:
|
||||
output = grabber.data
|
||||
grabber.restore()
|
||||
raise
|
||||
exe = os.path.join('.', exe)
|
||||
try:
|
||||
# specify cwd arg for consistency with
|
||||
# historic usage pattern of exec_command()
|
||||
# also, note that exe appears to be a string,
|
||||
# which exec_command() handled, but we now
|
||||
# use a list for check_output() -- this assumes
|
||||
# that exe is always a single command
|
||||
output = subprocess.check_output([exe], cwd='.')
|
||||
except subprocess.CalledProcessError as exc:
|
||||
exitstatus = exc.returncode
|
||||
output = ''
|
||||
except OSError:
|
||||
# preserve the EnvironmentError exit status
|
||||
# used historically in exec_command()
|
||||
exitstatus = 127
|
||||
output = ''
|
||||
else:
|
||||
output = filepath_from_subprocess_output(output)
|
||||
if hasattr(os, 'WEXITSTATUS'):
|
||||
exitcode = os.WEXITSTATUS(exitstatus)
|
||||
if os.WIFSIGNALED(exitstatus):
|
||||
sig = os.WTERMSIG(exitstatus)
|
||||
log.error('subprocess exited with signal %d' % (sig,))
|
||||
if sig == signal.SIGINT:
|
||||
# control-C
|
||||
raise KeyboardInterrupt
|
||||
else:
|
||||
exitcode = exitstatus
|
||||
log.info("success!")
|
||||
except (CompileError, LinkError):
|
||||
log.info("failure.")
|
||||
self._clean()
|
||||
return exitcode, output
|
||||
|
||||
class GrabStdout:
|
||||
|
||||
def __init__(self):
|
||||
self.sys_stdout = sys.stdout
|
||||
self.data = ''
|
||||
sys.stdout = self
|
||||
|
||||
def write (self, data):
|
||||
self.sys_stdout.write(data)
|
||||
self.data += data
|
||||
|
||||
def flush (self):
|
||||
self.sys_stdout.flush()
|
||||
|
||||
def restore(self):
|
||||
sys.stdout = self.sys_stdout
|
@@ -1,126 +0,0 @@
|
||||
from distutils.core import Command
|
||||
from numpy.distutils import log
|
||||
|
||||
#XXX: Linker flags
|
||||
|
||||
def show_fortran_compilers(_cache=None):
|
||||
# Using cache to prevent infinite recursion.
|
||||
if _cache:
|
||||
return
|
||||
elif _cache is None:
|
||||
_cache = []
|
||||
_cache.append(1)
|
||||
from numpy.distutils.fcompiler import show_fcompilers
|
||||
import distutils.core
|
||||
dist = distutils.core._setup_distribution
|
||||
show_fcompilers(dist)
|
||||
|
||||
class config_fc(Command):
|
||||
""" Distutils command to hold user specified options
|
||||
to Fortran compilers.
|
||||
|
||||
config_fc command is used by the FCompiler.customize() method.
|
||||
"""
|
||||
|
||||
description = "specify Fortran 77/Fortran 90 compiler information"
|
||||
|
||||
user_options = [
|
||||
('fcompiler=', None, "specify Fortran compiler type"),
|
||||
('f77exec=', None, "specify F77 compiler command"),
|
||||
('f90exec=', None, "specify F90 compiler command"),
|
||||
('f77flags=', None, "specify F77 compiler flags"),
|
||||
('f90flags=', None, "specify F90 compiler flags"),
|
||||
('opt=', None, "specify optimization flags"),
|
||||
('arch=', None, "specify architecture specific optimization flags"),
|
||||
('debug', 'g', "compile with debugging information"),
|
||||
('noopt', None, "compile without optimization"),
|
||||
('noarch', None, "compile without arch-dependent optimization"),
|
||||
]
|
||||
|
||||
help_options = [
|
||||
('help-fcompiler', None, "list available Fortran compilers",
|
||||
show_fortran_compilers),
|
||||
]
|
||||
|
||||
boolean_options = ['debug', 'noopt', 'noarch']
|
||||
|
||||
def initialize_options(self):
|
||||
self.fcompiler = None
|
||||
self.f77exec = None
|
||||
self.f90exec = None
|
||||
self.f77flags = None
|
||||
self.f90flags = None
|
||||
self.opt = None
|
||||
self.arch = None
|
||||
self.debug = None
|
||||
self.noopt = None
|
||||
self.noarch = None
|
||||
|
||||
def finalize_options(self):
|
||||
log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options')
|
||||
build_clib = self.get_finalized_command('build_clib')
|
||||
build_ext = self.get_finalized_command('build_ext')
|
||||
config = self.get_finalized_command('config')
|
||||
build = self.get_finalized_command('build')
|
||||
cmd_list = [self, config, build_clib, build_ext, build]
|
||||
for a in ['fcompiler']:
|
||||
l = []
|
||||
for c in cmd_list:
|
||||
v = getattr(c, a)
|
||||
if v is not None:
|
||||
if not isinstance(v, str): v = v.compiler_type
|
||||
if v not in l: l.append(v)
|
||||
if not l: v1 = None
|
||||
else: v1 = l[0]
|
||||
if len(l)>1:
|
||||
log.warn(' commands have different --%s options: %s'\
|
||||
', using first in list as default' % (a, l))
|
||||
if v1:
|
||||
for c in cmd_list:
|
||||
if getattr(c, a) is None: setattr(c, a, v1)
|
||||
|
||||
def run(self):
|
||||
# Do nothing.
|
||||
return
|
||||
|
||||
class config_cc(Command):
|
||||
""" Distutils command to hold user specified options
|
||||
to C/C++ compilers.
|
||||
"""
|
||||
|
||||
description = "specify C/C++ compiler information"
|
||||
|
||||
user_options = [
|
||||
('compiler=', None, "specify C/C++ compiler type"),
|
||||
]
|
||||
|
||||
def initialize_options(self):
|
||||
self.compiler = None
|
||||
|
||||
def finalize_options(self):
|
||||
log.info('unifing config_cc, config, build_clib, build_ext, build commands --compiler options')
|
||||
build_clib = self.get_finalized_command('build_clib')
|
||||
build_ext = self.get_finalized_command('build_ext')
|
||||
config = self.get_finalized_command('config')
|
||||
build = self.get_finalized_command('build')
|
||||
cmd_list = [self, config, build_clib, build_ext, build]
|
||||
for a in ['compiler']:
|
||||
l = []
|
||||
for c in cmd_list:
|
||||
v = getattr(c, a)
|
||||
if v is not None:
|
||||
if not isinstance(v, str): v = v.compiler_type
|
||||
if v not in l: l.append(v)
|
||||
if not l: v1 = None
|
||||
else: v1 = l[0]
|
||||
if len(l)>1:
|
||||
log.warn(' commands have different --%s options: %s'\
|
||||
', using first in list as default' % (a, l))
|
||||
if v1:
|
||||
for c in cmd_list:
|
||||
if getattr(c, a) is None: setattr(c, a, v1)
|
||||
return
|
||||
|
||||
def run(self):
|
||||
# Do nothing.
|
||||
return
|
@@ -1,15 +0,0 @@
|
||||
""" Override the develop command from setuptools so we can ensure that our
|
||||
generated files (from build_src or build_scripts) are properly converted to real
|
||||
files with filenames.
|
||||
|
||||
"""
|
||||
from setuptools.command.develop import develop as old_develop
|
||||
|
||||
class develop(old_develop):
|
||||
__doc__ = old_develop.__doc__
|
||||
def install_for_development(self):
|
||||
# Build sources in-place, too.
|
||||
self.reinitialize_command('build_src', inplace=1)
|
||||
# Make sure scripts are built.
|
||||
self.run_command('build_scripts')
|
||||
old_develop.install_for_development(self)
|
@@ -1,25 +0,0 @@
|
||||
import sys
|
||||
|
||||
from setuptools.command.egg_info import egg_info as _egg_info
|
||||
|
||||
class egg_info(_egg_info):
|
||||
def run(self):
|
||||
if 'sdist' in sys.argv:
|
||||
import warnings
|
||||
import textwrap
|
||||
msg = textwrap.dedent("""
|
||||
`build_src` is being run, this may lead to missing
|
||||
files in your sdist! You want to use distutils.sdist
|
||||
instead of the setuptools version:
|
||||
|
||||
from distutils.command.sdist import sdist
|
||||
cmdclass={'sdist': sdist}"
|
||||
|
||||
See numpy's setup.py or gh-7131 for details.""")
|
||||
warnings.warn(msg, UserWarning, stacklevel=2)
|
||||
|
||||
# We need to ensure that build_src has been executed in order to give
|
||||
# setuptools' egg_info command real filenames instead of functions which
|
||||
# generate files.
|
||||
self.run_command("build_src")
|
||||
_egg_info.run(self)
|
@@ -1,79 +0,0 @@
|
||||
import sys
|
||||
if 'setuptools' in sys.modules:
|
||||
import setuptools.command.install as old_install_mod
|
||||
have_setuptools = True
|
||||
else:
|
||||
import distutils.command.install as old_install_mod
|
||||
have_setuptools = False
|
||||
from distutils.file_util import write_file
|
||||
|
||||
old_install = old_install_mod.install
|
||||
|
||||
class install(old_install):
|
||||
|
||||
# Always run install_clib - the command is cheap, so no need to bypass it;
|
||||
# but it's not run by setuptools -- so it's run again in install_data
|
||||
sub_commands = old_install.sub_commands + [
|
||||
('install_clib', lambda x: True)
|
||||
]
|
||||
|
||||
def finalize_options (self):
|
||||
old_install.finalize_options(self)
|
||||
self.install_lib = self.install_libbase
|
||||
|
||||
def setuptools_run(self):
|
||||
""" The setuptools version of the .run() method.
|
||||
|
||||
We must pull in the entire code so we can override the level used in the
|
||||
_getframe() call since we wrap this call by one more level.
|
||||
"""
|
||||
from distutils.command.install import install as distutils_install
|
||||
|
||||
# Explicit request for old-style install? Just do it
|
||||
if self.old_and_unmanageable or self.single_version_externally_managed:
|
||||
return distutils_install.run(self)
|
||||
|
||||
# Attempt to detect whether we were called from setup() or by another
|
||||
# command. If we were called by setup(), our caller will be the
|
||||
# 'run_command' method in 'distutils.dist', and *its* caller will be
|
||||
# the 'run_commands' method. If we were called any other way, our
|
||||
# immediate caller *might* be 'run_command', but it won't have been
|
||||
# called by 'run_commands'. This is slightly kludgy, but seems to
|
||||
# work.
|
||||
#
|
||||
caller = sys._getframe(3)
|
||||
caller_module = caller.f_globals.get('__name__', '')
|
||||
caller_name = caller.f_code.co_name
|
||||
|
||||
if caller_module != 'distutils.dist' or caller_name!='run_commands':
|
||||
# We weren't called from the command line or setup(), so we
|
||||
# should run in backward-compatibility mode to support bdist_*
|
||||
# commands.
|
||||
distutils_install.run(self)
|
||||
else:
|
||||
self.do_egg_install()
|
||||
|
||||
def run(self):
|
||||
if not have_setuptools:
|
||||
r = old_install.run(self)
|
||||
else:
|
||||
r = self.setuptools_run()
|
||||
if self.record:
|
||||
# bdist_rpm fails when INSTALLED_FILES contains
|
||||
# paths with spaces. Such paths must be enclosed
|
||||
# with double-quotes.
|
||||
with open(self.record, 'r') as f:
|
||||
lines = []
|
||||
need_rewrite = False
|
||||
for l in f:
|
||||
l = l.rstrip()
|
||||
if ' ' in l:
|
||||
need_rewrite = True
|
||||
l = '"%s"' % (l)
|
||||
lines.append(l)
|
||||
if need_rewrite:
|
||||
self.execute(write_file,
|
||||
(self.record, lines),
|
||||
"re-writing list of installed files to '%s'" %
|
||||
self.record)
|
||||
return r
|
@@ -1,40 +0,0 @@
|
||||
import os
|
||||
from distutils.core import Command
|
||||
from distutils.ccompiler import new_compiler
|
||||
from numpy.distutils.misc_util import get_cmd
|
||||
|
||||
class install_clib(Command):
|
||||
description = "Command to install installable C libraries"
|
||||
|
||||
user_options = []
|
||||
|
||||
def initialize_options(self):
|
||||
self.install_dir = None
|
||||
self.outfiles = []
|
||||
|
||||
def finalize_options(self):
|
||||
self.set_undefined_options('install', ('install_lib', 'install_dir'))
|
||||
|
||||
def run (self):
|
||||
build_clib_cmd = get_cmd("build_clib")
|
||||
if not build_clib_cmd.build_clib:
|
||||
# can happen if the user specified `--skip-build`
|
||||
build_clib_cmd.finalize_options()
|
||||
build_dir = build_clib_cmd.build_clib
|
||||
|
||||
# We need the compiler to get the library name -> filename association
|
||||
if not build_clib_cmd.compiler:
|
||||
compiler = new_compiler(compiler=None)
|
||||
compiler.customize(self.distribution)
|
||||
else:
|
||||
compiler = build_clib_cmd.compiler
|
||||
|
||||
for l in self.distribution.installed_libraries:
|
||||
target_dir = os.path.join(self.install_dir, l.target_dir)
|
||||
name = compiler.library_filename(l.name)
|
||||
source = os.path.join(build_dir, name)
|
||||
self.mkpath(target_dir)
|
||||
self.outfiles.append(self.copy_file(source, target_dir)[0])
|
||||
|
||||
def get_outputs(self):
|
||||
return self.outfiles
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user