update windows build to Python 3.7

This commit is contained in:
j 2019-01-20 16:05:31 +05:30
commit ddc59ab92d
5761 changed files with 750298 additions and 213405 deletions

52
include/internal/ceval.h Normal file
View file

@ -0,0 +1,52 @@
#ifndef Py_INTERNAL_CEVAL_H
#define Py_INTERNAL_CEVAL_H
#ifdef __cplusplus
extern "C" {
#endif
#include "pyatomic.h"
#include "pythread.h"
struct _pending_calls {
unsigned long main_thread;
PyThread_type_lock lock;
/* Request for running pending calls. */
_Py_atomic_int calls_to_do;
/* Request for looking at the `async_exc` field of the current
thread state.
Guarded by the GIL. */
int async_exc;
#define NPENDINGCALLS 32
struct {
int (*func)(void *);
void *arg;
} calls[NPENDINGCALLS];
int first;
int last;
};
#include "internal/gil.h"
struct _ceval_runtime_state {
int recursion_limit;
/* Records whether tracing is on for any thread. Counts the number
of threads for which tstate->c_tracefunc is non-NULL, so if the
value is 0, we know we don't have to check this thread's
c_tracefunc. This speeds up the if statement in
PyEval_EvalFrameEx() after fast_next_opcode. */
int tracing_possible;
/* This single variable consolidates all requests to break out of
the fast path in the eval loop. */
_Py_atomic_int eval_breaker;
/* Request for dropping the GIL */
_Py_atomic_int gil_drop_request;
struct _pending_calls pending;
struct _gil_runtime_state gil;
};
PyAPI_FUNC(void) _PyEval_Initialize(struct _ceval_runtime_state *);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_CEVAL_H */

View file

@ -0,0 +1,91 @@
#ifndef Py_INTERNAL_CONDVAR_H
#define Py_INTERNAL_CONDVAR_H
#ifndef _POSIX_THREADS
/* This means pthreads are not implemented in libc headers, hence the macro
not present in unistd.h. But they still can be implemented as an external
library (e.g. gnu pth in pthread emulation) */
# ifdef HAVE_PTHREAD_H
# include <pthread.h> /* _POSIX_THREADS */
# endif
#endif
#ifdef _POSIX_THREADS
/*
* POSIX support
*/
#define Py_HAVE_CONDVAR
#include <pthread.h>
#define PyMUTEX_T pthread_mutex_t
#define PyCOND_T pthread_cond_t
#elif defined(NT_THREADS)
/*
* Windows (XP, 2003 server and later, as well as (hopefully) CE) support
*
* Emulated condition variables ones that work with XP and later, plus
* example native support on VISTA and onwards.
*/
#define Py_HAVE_CONDVAR
/* include windows if it hasn't been done before */
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
/* options */
/* non-emulated condition variables are provided for those that want
* to target Windows Vista. Modify this macro to enable them.
*/
#ifndef _PY_EMULATED_WIN_CV
#define _PY_EMULATED_WIN_CV 1 /* use emulated condition variables */
#endif
/* fall back to emulation if not targeting Vista */
#if !defined NTDDI_VISTA || NTDDI_VERSION < NTDDI_VISTA
#undef _PY_EMULATED_WIN_CV
#define _PY_EMULATED_WIN_CV 1
#endif
#if _PY_EMULATED_WIN_CV
typedef CRITICAL_SECTION PyMUTEX_T;
/* The ConditionVariable object. From XP onwards it is easily emulated
with a Semaphore.
Semaphores are available on Windows XP (2003 server) and later.
We use a Semaphore rather than an auto-reset event, because although
an auto-resent event might appear to solve the lost-wakeup bug (race
condition between releasing the outer lock and waiting) because it
maintains state even though a wait hasn't happened, there is still
a lost wakeup problem if more than one thread are interrupted in the
critical place. A semaphore solves that, because its state is
counted, not Boolean.
Because it is ok to signal a condition variable with no one
waiting, we need to keep track of the number of
waiting threads. Otherwise, the semaphore's state could rise
without bound. This also helps reduce the number of "spurious wakeups"
that would otherwise happen.
*/
typedef struct _PyCOND_T
{
HANDLE sem;
int waiting; /* to allow PyCOND_SIGNAL to be a no-op */
} PyCOND_T;
#else /* !_PY_EMULATED_WIN_CV */
/* Use native Win7 primitives if build target is Win7 or higher */
/* SRWLOCK is faster and better than CriticalSection */
typedef SRWLOCK PyMUTEX_T;
typedef CONDITION_VARIABLE PyCOND_T;
#endif /* _PY_EMULATED_WIN_CV */
#endif /* _POSIX_THREADS, NT_THREADS */
#endif /* Py_INTERNAL_CONDVAR_H */

View file

@ -0,0 +1,41 @@
#ifndef Py_INTERNAL_CONTEXT_H
#define Py_INTERNAL_CONTEXT_H
#include "internal/hamt.h"
struct _pycontextobject {
PyObject_HEAD
PyContext *ctx_prev;
PyHamtObject *ctx_vars;
PyObject *ctx_weakreflist;
int ctx_entered;
};
struct _pycontextvarobject {
PyObject_HEAD
PyObject *var_name;
PyObject *var_default;
PyObject *var_cached;
uint64_t var_cached_tsid;
uint64_t var_cached_tsver;
Py_hash_t var_hash;
};
struct _pycontexttokenobject {
PyObject_HEAD
PyContext *tok_ctx;
PyContextVar *tok_var;
PyObject *tok_oldval;
int tok_used;
};
int _PyContext_Init(void);
void _PyContext_Fini(void);
#endif /* !Py_INTERNAL_CONTEXT_H */

46
include/internal/gil.h Normal file
View file

@ -0,0 +1,46 @@
#ifndef Py_INTERNAL_GIL_H
#define Py_INTERNAL_GIL_H
#ifdef __cplusplus
extern "C" {
#endif
#include "pyatomic.h"
#include "internal/condvar.h"
#ifndef Py_HAVE_CONDVAR
#error You need either a POSIX-compatible or a Windows system!
#endif
/* Enable if you want to force the switching of threads at least
every `interval`. */
#undef FORCE_SWITCHING
#define FORCE_SWITCHING
struct _gil_runtime_state {
/* microseconds (the Python API uses seconds, though) */
unsigned long interval;
/* Last PyThreadState holding / having held the GIL. This helps us
know whether anyone else was scheduled after we dropped the GIL. */
_Py_atomic_address last_holder;
/* Whether the GIL is already taken (-1 if uninitialized). This is
atomic because it can be read without any lock taken in ceval.c. */
_Py_atomic_int locked;
/* Number of GIL switches since the beginning. */
unsigned long switch_number;
/* This condition variable allows one or several threads to wait
until the GIL is released. In addition, the mutex also protects
the above variables. */
PyCOND_T cond;
PyMUTEX_T mutex;
#ifdef FORCE_SWITCHING
/* This condition variable helps the GIL-releasing thread wait for
a GIL-awaiting thread to be scheduled and take the GIL. */
PyCOND_T switch_cond;
PyMUTEX_T switch_mutex;
#endif
};
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_GIL_H */

113
include/internal/hamt.h Normal file
View file

@ -0,0 +1,113 @@
#ifndef Py_INTERNAL_HAMT_H
#define Py_INTERNAL_HAMT_H
#define _Py_HAMT_MAX_TREE_DEPTH 7
#define PyHamt_Check(o) (Py_TYPE(o) == &_PyHamt_Type)
/* Abstract tree node. */
typedef struct {
PyObject_HEAD
} PyHamtNode;
/* An HAMT immutable mapping collection. */
typedef struct {
PyObject_HEAD
PyHamtNode *h_root;
PyObject *h_weakreflist;
Py_ssize_t h_count;
} PyHamtObject;
/* A struct to hold the state of depth-first traverse of the tree.
HAMT is an immutable collection. Iterators will hold a strong reference
to it, and every node in the HAMT has strong references to its children.
So for iterators, we can implement zero allocations and zero reference
inc/dec depth-first iteration.
- i_nodes: an array of seven pointers to tree nodes
- i_level: the current node in i_nodes
- i_pos: an array of positions within nodes in i_nodes.
*/
typedef struct {
PyHamtNode *i_nodes[_Py_HAMT_MAX_TREE_DEPTH];
Py_ssize_t i_pos[_Py_HAMT_MAX_TREE_DEPTH];
int8_t i_level;
} PyHamtIteratorState;
/* Base iterator object.
Contains the iteration state, a pointer to the HAMT tree,
and a pointer to the 'yield function'. The latter is a simple
function that returns a key/value tuple for the 'Items' iterator,
just a key for the 'Keys' iterator, and a value for the 'Values'
iterator.
*/
typedef struct {
PyObject_HEAD
PyHamtObject *hi_obj;
PyHamtIteratorState hi_iter;
binaryfunc hi_yield;
} PyHamtIterator;
PyAPI_DATA(PyTypeObject) _PyHamt_Type;
PyAPI_DATA(PyTypeObject) _PyHamt_ArrayNode_Type;
PyAPI_DATA(PyTypeObject) _PyHamt_BitmapNode_Type;
PyAPI_DATA(PyTypeObject) _PyHamt_CollisionNode_Type;
PyAPI_DATA(PyTypeObject) _PyHamtKeys_Type;
PyAPI_DATA(PyTypeObject) _PyHamtValues_Type;
PyAPI_DATA(PyTypeObject) _PyHamtItems_Type;
/* Create a new HAMT immutable mapping. */
PyHamtObject * _PyHamt_New(void);
/* Return a new collection based on "o", but with an additional
key/val pair. */
PyHamtObject * _PyHamt_Assoc(PyHamtObject *o, PyObject *key, PyObject *val);
/* Return a new collection based on "o", but without "key". */
PyHamtObject * _PyHamt_Without(PyHamtObject *o, PyObject *key);
/* Find "key" in the "o" collection.
Return:
- -1: An error occurred.
- 0: "key" wasn't found in "o".
- 1: "key" is in "o"; "*val" is set to its value (a borrowed ref).
*/
int _PyHamt_Find(PyHamtObject *o, PyObject *key, PyObject **val);
/* Check if "v" is equal to "w".
Return:
- 0: v != w
- 1: v == w
- -1: An error occurred.
*/
int _PyHamt_Eq(PyHamtObject *v, PyHamtObject *w);
/* Return the size of "o"; equivalent of "len(o)". */
Py_ssize_t _PyHamt_Len(PyHamtObject *o);
/* Return a Keys iterator over "o". */
PyObject * _PyHamt_NewIterKeys(PyHamtObject *o);
/* Return a Values iterator over "o". */
PyObject * _PyHamt_NewIterValues(PyHamtObject *o);
/* Return a Items iterator over "o". */
PyObject * _PyHamt_NewIterItems(PyHamtObject *o);
int _PyHamt_Init(void);
void _PyHamt_Fini(void);
#endif /* !Py_INTERNAL_HAMT_H */

6
include/internal/hash.h Normal file
View file

@ -0,0 +1,6 @@
#ifndef Py_INTERNAL_HASH_H
#define Py_INTERNAL_HASH_H
uint64_t _Py_KeyedHash(uint64_t, const char *, Py_ssize_t);
#endif

View file

@ -0,0 +1,6 @@
#ifndef Py_INTERNAL_IMPORT_H
#define Py_INTERNAL_IMPORT_H
extern const char *_Py_CheckHashBasedPycsMode;
#endif

151
include/internal/mem.h Normal file
View file

@ -0,0 +1,151 @@
#ifndef Py_INTERNAL_MEM_H
#define Py_INTERNAL_MEM_H
#ifdef __cplusplus
extern "C" {
#endif
#include "objimpl.h"
#include "pymem.h"
/* GC runtime state */
/* If we change this, we need to change the default value in the
signature of gc.collect. */
#define NUM_GENERATIONS 3
/*
NOTE: about the counting of long-lived objects.
To limit the cost of garbage collection, there are two strategies;
- make each collection faster, e.g. by scanning fewer objects
- do less collections
This heuristic is about the latter strategy.
In addition to the various configurable thresholds, we only trigger a
full collection if the ratio
long_lived_pending / long_lived_total
is above a given value (hardwired to 25%).
The reason is that, while "non-full" collections (i.e., collections of
the young and middle generations) will always examine roughly the same
number of objects -- determined by the aforementioned thresholds --,
the cost of a full collection is proportional to the total number of
long-lived objects, which is virtually unbounded.
Indeed, it has been remarked that doing a full collection every
<constant number> of object creations entails a dramatic performance
degradation in workloads which consist in creating and storing lots of
long-lived objects (e.g. building a large list of GC-tracked objects would
show quadratic performance, instead of linear as expected: see issue #4074).
Using the above ratio, instead, yields amortized linear performance in
the total number of objects (the effect of which can be summarized
thusly: "each full garbage collection is more and more costly as the
number of objects grows, but we do fewer and fewer of them").
This heuristic was suggested by Martin von Löwis on python-dev in
June 2008. His original analysis and proposal can be found at:
http://mail.python.org/pipermail/python-dev/2008-June/080579.html
*/
/*
NOTE: about untracking of mutable objects.
Certain types of container cannot participate in a reference cycle, and
so do not need to be tracked by the garbage collector. Untracking these
objects reduces the cost of garbage collections. However, determining
which objects may be untracked is not free, and the costs must be
weighed against the benefits for garbage collection.
There are two possible strategies for when to untrack a container:
i) When the container is created.
ii) When the container is examined by the garbage collector.
Tuples containing only immutable objects (integers, strings etc, and
recursively, tuples of immutable objects) do not need to be tracked.
The interpreter creates a large number of tuples, many of which will
not survive until garbage collection. It is therefore not worthwhile
to untrack eligible tuples at creation time.
Instead, all tuples except the empty tuple are tracked when created.
During garbage collection it is determined whether any surviving tuples
can be untracked. A tuple can be untracked if all of its contents are
already not tracked. Tuples are examined for untracking in all garbage
collection cycles. It may take more than one cycle to untrack a tuple.
Dictionaries containing only immutable objects also do not need to be
tracked. Dictionaries are untracked when created. If a tracked item is
inserted into a dictionary (either as a key or value), the dictionary
becomes tracked. During a full garbage collection (all generations),
the collector will untrack any dictionaries whose contents are not
tracked.
The module provides the python function is_tracked(obj), which returns
the CURRENT tracking status of the object. Subsequent garbage
collections may change the tracking status of the object.
Untracking of certain containers was introduced in issue #4688, and
the algorithm was refined in response to issue #14775.
*/
struct gc_generation {
PyGC_Head head;
int threshold; /* collection threshold */
int count; /* count of allocations or collections of younger
generations */
};
/* Running stats per generation */
struct gc_generation_stats {
/* total number of collections */
Py_ssize_t collections;
/* total number of collected objects */
Py_ssize_t collected;
/* total number of uncollectable objects (put into gc.garbage) */
Py_ssize_t uncollectable;
};
struct _gc_runtime_state {
/* List of objects that still need to be cleaned up, singly linked
* via their gc headers' gc_prev pointers. */
PyObject *trash_delete_later;
/* Current call-stack depth of tp_dealloc calls. */
int trash_delete_nesting;
int enabled;
int debug;
/* linked lists of container objects */
struct gc_generation generations[NUM_GENERATIONS];
PyGC_Head *generation0;
/* a permanent generation which won't be collected */
struct gc_generation permanent_generation;
struct gc_generation_stats generation_stats[NUM_GENERATIONS];
/* true if we are currently running the collector */
int collecting;
/* list of uncollectable objects */
PyObject *garbage;
/* a list of callbacks to be invoked when collection is performed */
PyObject *callbacks;
/* This is the number of objects that survived the last full
collection. It approximates the number of long lived objects
tracked by the GC.
(by "full collection", we mean a collection of the oldest
generation). */
Py_ssize_t long_lived_total;
/* This is the number of objects that survived all "non-full"
collections, and are awaiting to undergo a full collection for
the first time. */
Py_ssize_t long_lived_pending;
};
PyAPI_FUNC(void) _PyGC_Initialize(struct _gc_runtime_state *);
#define _PyGC_generation0 _PyRuntime.gc.generation0
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_MEM_H */

View file

@ -0,0 +1,19 @@
#ifndef Py_INTERNAL_PYGETOPT_H
#define Py_INTERNAL_PYGETOPT_H
extern int _PyOS_opterr;
extern int _PyOS_optind;
extern wchar_t *_PyOS_optarg;
extern void _PyOS_ResetGetOpt(void);
typedef struct {
const wchar_t *name;
int has_arg;
int val;
} _PyOS_LongOption;
extern int _PyOS_GetOpt(int argc, wchar_t **argv, wchar_t *optstring,
const _PyOS_LongOption *longopts, int *longindex);
#endif /* !Py_INTERNAL_PYGETOPT_H */

132
include/internal/pystate.h Normal file
View file

@ -0,0 +1,132 @@
#ifndef Py_INTERNAL_PYSTATE_H
#define Py_INTERNAL_PYSTATE_H
#ifdef __cplusplus
extern "C" {
#endif
#include "pystate.h"
#include "pyatomic.h"
#include "pythread.h"
#include "internal/mem.h"
#include "internal/ceval.h"
#include "internal/warnings.h"
/* GIL state */
struct _gilstate_runtime_state {
int check_enabled;
/* Assuming the current thread holds the GIL, this is the
PyThreadState for the current thread. */
_Py_atomic_address tstate_current;
PyThreadFrameGetter getframe;
/* The single PyInterpreterState used by this process'
GILState implementation
*/
/* TODO: Given interp_main, it may be possible to kill this ref */
PyInterpreterState *autoInterpreterState;
Py_tss_t autoTSSkey;
};
/* hook for PyEval_GetFrame(), requested for Psyco */
#define _PyThreadState_GetFrame _PyRuntime.gilstate.getframe
/* Issue #26558: Flag to disable PyGILState_Check().
If set to non-zero, PyGILState_Check() always return 1. */
#define _PyGILState_check_enabled _PyRuntime.gilstate.check_enabled
typedef struct {
/* Full path to the Python program */
wchar_t *program_full_path;
wchar_t *prefix;
#ifdef MS_WINDOWS
wchar_t *dll_path;
#else
wchar_t *exec_prefix;
#endif
/* Set by Py_SetPath(), or computed by _PyPathConfig_Init() */
wchar_t *module_search_path;
/* Python program name */
wchar_t *program_name;
/* Set by Py_SetPythonHome() or PYTHONHOME environment variable */
wchar_t *home;
} _PyPathConfig;
#define _PyPathConfig_INIT {.module_search_path = NULL}
/* Note: _PyPathConfig_INIT sets other fields to 0/NULL */
PyAPI_DATA(_PyPathConfig) _Py_path_config;
PyAPI_FUNC(_PyInitError) _PyPathConfig_Calculate(
_PyPathConfig *config,
const _PyCoreConfig *core_config);
PyAPI_FUNC(void) _PyPathConfig_Clear(_PyPathConfig *config);
/* interpreter state */
PyAPI_FUNC(PyInterpreterState *) _PyInterpreterState_LookUpID(PY_INT64_T);
PyAPI_FUNC(int) _PyInterpreterState_IDInitref(PyInterpreterState *);
PyAPI_FUNC(void) _PyInterpreterState_IDIncref(PyInterpreterState *);
PyAPI_FUNC(void) _PyInterpreterState_IDDecref(PyInterpreterState *);
/* Full Python runtime state */
typedef struct pyruntimestate {
int initialized;
int core_initialized;
PyThreadState *finalizing;
struct pyinterpreters {
PyThread_type_lock mutex;
PyInterpreterState *head;
PyInterpreterState *main;
/* _next_interp_id is an auto-numbered sequence of small
integers. It gets initialized in _PyInterpreterState_Init(),
which is called in Py_Initialize(), and used in
PyInterpreterState_New(). A negative interpreter ID
indicates an error occurred. The main interpreter will
always have an ID of 0. Overflow results in a RuntimeError.
If that becomes a problem later then we can adjust, e.g. by
using a Python int. */
int64_t next_id;
} interpreters;
#define NEXITFUNCS 32
void (*exitfuncs[NEXITFUNCS])(void);
int nexitfuncs;
struct _gc_runtime_state gc;
struct _warnings_runtime_state warnings;
struct _ceval_runtime_state ceval;
struct _gilstate_runtime_state gilstate;
// XXX Consolidate globals found via the check-c-globals script.
} _PyRuntimeState;
#define _PyRuntimeState_INIT {.initialized = 0, .core_initialized = 0}
/* Note: _PyRuntimeState_INIT sets other fields to 0/NULL */
PyAPI_DATA(_PyRuntimeState) _PyRuntime;
PyAPI_FUNC(_PyInitError) _PyRuntimeState_Init(_PyRuntimeState *);
PyAPI_FUNC(void) _PyRuntimeState_Fini(_PyRuntimeState *);
/* Initialize _PyRuntimeState.
Return NULL on success, or return an error message on failure. */
PyAPI_FUNC(_PyInitError) _PyRuntime_Initialize(void);
#define _Py_CURRENTLY_FINALIZING(tstate) \
(_PyRuntime.finalizing == tstate)
/* Other */
PyAPI_FUNC(_PyInitError) _PyInterpreterState_Enable(_PyRuntimeState *);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_PYSTATE_H */

View file

@ -0,0 +1,21 @@
#ifndef Py_INTERNAL_WARNINGS_H
#define Py_INTERNAL_WARNINGS_H
#ifdef __cplusplus
extern "C" {
#endif
#include "object.h"
struct _warnings_runtime_state {
/* Both 'filters' and 'onceregistry' can be set in warnings.py;
get_warnings_attr() will reset these variables accordingly. */
PyObject *filters; /* List */
PyObject *once_registry; /* Dict */
PyObject *default_action; /* String */
long filters_version;
};
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_WARNINGS_H */