diff --git a/.travis.yml b/.travis.yml index 1c71092cc888..f00dce2dfcfc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,14 +6,14 @@ python: - 3.2 install: - - pip install --use-mirrors nose + - pip install --use-mirrors nose python-dateutil pyparsing # This is a workaround to install numpy with the version of # virtualenv in Travis. If that is updated in the future, this can # be simplified to 'pip install numpy' - 'if [ $TRAVIS_PYTHON_VERSION == "3.2" ]; then pip install https://github.com/y-p/numpy/archive/1.6.2_with_travis_fix.tar.gz; fi' - 'if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then pip install numpy; fi' # should be nop if pre-installed - if [[ $TRAVIS_PYTHON_VERSION == '2.'* ]]; then pip install --use-mirrors PIL; fi - - python setup.py install + - python setup.py install --old-and-unmanageable script: - mkdir ../foo diff --git a/CXX/IndirectPythonInterface.cxx b/CXX/IndirectPythonInterface.cxx index 7286cdf65737..256bd7ebda80 100644 --- a/CXX/IndirectPythonInterface.cxx +++ b/CXX/IndirectPythonInterface.cxx @@ -34,10 +34,524 @@ // DAMAGE. // //----------------------------------------------------------------------------- -#include "CXX/WrapPython.h" +#include "CXX/IndirectPythonInterface.hxx" + +namespace Py +{ +bool _CFunction_Check( PyObject *op ) { return op->ob_type == _CFunction_Type(); } +bool _Complex_Check( PyObject *op ) { return op->ob_type == _Complex_Type(); } +bool _Dict_Check( PyObject *op ) { return op->ob_type == _Dict_Type(); } +bool _Float_Check( PyObject *op ) { return op->ob_type == _Float_Type(); } +bool _Function_Check( PyObject *op ) { return op->ob_type == _Function_Type(); } +bool _Boolean_Check( PyObject *op ) { return op->ob_type == _Bool_Type(); } +bool _List_Check( PyObject *op ) { return op->ob_type == _List_Type(); } +bool _Long_Check( PyObject *op ) { return op->ob_type == _Long_Type(); } +bool _Method_Check( PyObject *op ) { return op->ob_type == _Method_Type(); } +bool _Module_Check( PyObject *op ) { return op->ob_type == _Module_Type(); } +bool _Range_Check( PyObject *op ) { return op->ob_type == _Range_Type(); } +bool _Slice_Check( PyObject *op ) { return op->ob_type == _Slice_Type(); } +bool _TraceBack_Check( PyObject *op ) { return op->ob_type == _TraceBack_Type(); } +bool _Tuple_Check( PyObject *op ) { return op->ob_type == _Tuple_Type(); } +bool _Type_Check( PyObject *op ) { return op->ob_type == _Type_Type(); } +bool _Unicode_Check( PyObject *op ) { return op->ob_type == _Unicode_Type(); } #if PY_MAJOR_VERSION == 2 -#include "Python2/IndirectPythonInterface.cxx" +bool _String_Check( PyObject *op ) { return op->ob_type == _String_Type(); } +bool _Int_Check( PyObject *op ) { return op->ob_type == _Int_Type(); } +bool _CObject_Check( PyObject *op ) { return op->ob_type == _CObject_Type(); } +#endif +#if PY_MAJOR_VERSION >= 3 +bool _Bytes_Check( PyObject *op ) { return op->ob_type == _Bytes_Type(); } +#endif + +#if defined(PY_WIN32_DELAYLOAD_PYTHON_DLL) + +#if defined(MS_WINDOWS) +#include + + +static HMODULE python_dll; + +static PyObject *ptr__Exc_ArithmeticError = NULL; +static PyObject *ptr__Exc_AssertionError = NULL; +static PyObject *ptr__Exc_AttributeError = NULL; +static PyObject *ptr__Exc_EnvironmentError = NULL; +static PyObject *ptr__Exc_EOFError = NULL; +static PyObject *ptr__Exc_Exception = NULL; +static PyObject *ptr__Exc_FloatingPointError = NULL; +static PyObject *ptr__Exc_ImportError = NULL; +static PyObject *ptr__Exc_IndexError = NULL; +static PyObject *ptr__Exc_IOError = NULL; +static PyObject *ptr__Exc_KeyboardInterrupt = NULL; +static PyObject *ptr__Exc_KeyError = NULL; +static PyObject *ptr__Exc_LookupError = NULL; +static PyObject *ptr__Exc_MemoryError = NULL; +static PyObject *ptr__Exc_NameError = NULL; +static PyObject *ptr__Exc_NotImplementedError = NULL; +static PyObject *ptr__Exc_OSError = NULL; +static PyObject *ptr__Exc_OverflowError = NULL; +static PyObject *ptr__Exc_RuntimeError = NULL; +static PyObject *ptr__Exc_StandardError = NULL; +static PyObject *ptr__Exc_SyntaxError = NULL; +static PyObject *ptr__Exc_SystemError = NULL; +static PyObject *ptr__Exc_SystemExit = NULL; +static PyObject *ptr__Exc_TypeError = NULL; +static PyObject *ptr__Exc_ValueError = NULL; +static PyObject *ptr__Exc_ZeroDivisionError = NULL; + +#ifdef MS_WINDOWS +static PyObject *ptr__Exc_WindowsError = NULL; +#endif + +static PyObject *ptr__Exc_IndentationError = NULL; +static PyObject *ptr__Exc_TabError = NULL; +static PyObject *ptr__Exc_UnboundLocalError = NULL; +static PyObject *ptr__Exc_UnicodeError = NULL; +static PyObject *ptr__PyNone = NULL; +static PyObject *ptr__PyFalse = NULL; +static PyObject *ptr__PyTrue = NULL; +static PyTypeObject *ptr__CFunction_Type = NULL; +static PyTypeObject *ptr__Complex_Type = NULL; +static PyTypeObject *ptr__Dict_Type = NULL; +static PyTypeObject *ptr__Float_Type = NULL; +static PyTypeObject *ptr__Function_Type = NULL; +static PyTypeObject *ptr__Bool_Type = NULL; +static PyTypeObject *ptr__List_Type = NULL; +static PyTypeObject *ptr__Long_Type = NULL; +static PyTypeObject *ptr__Method_Type = NULL; +static PyTypeObject *ptr__Module_Type = NULL; +static PyTypeObject *ptr__Range_Type = NULL; +static PyTypeObject *ptr__Slice_Type = NULL; +static PyTypeObject *ptr__TraceBack_Type = NULL; +static PyTypeObject *ptr__Tuple_Type = NULL; +static PyTypeObject *ptr__Type_Type = NULL; +#if PY_MAJOR_VERSION == 2 +static PyTypeObject *ptr__Int_Type = NULL; +static PyTypeObject *ptr__String_Type = NULL; +static PyTypeObject *ptr__CObject_Type = NULL; +#endif +#if PY_MAJOR_VERSION >= 3 +static PyTypeObject *ptr__Bytes_Type = NULL; +#endif + + +static int *ptr_Py_DebugFlag = NULL; +static int *ptr_Py_InteractiveFlag = NULL; +static int *ptr_Py_OptimizeFlag = NULL; +static int *ptr_Py_NoSiteFlag = NULL; +static int *ptr_Py_VerboseFlag = NULL; + +static char **ptr__Py_PackageContext = NULL; + +#ifdef Py_REF_DEBUG +int *ptr_Py_RefTotal; +#endif + + +//-------------------------------------------------------------------------------- +class GetAddressException +{ +public: + GetAddressException( const char *_name ) + : name( _name ) + {} + virtual ~GetAddressException() {} + const char *name; +}; + + +//-------------------------------------------------------------------------------- +static PyObject *GetPyObjectPointer_As_PyObjectPointer( const char *name ) +{ + FARPROC addr = GetProcAddress( python_dll, name ); + if( addr == NULL ) + throw GetAddressException( name ); + + return *(PyObject **)addr; +} + +static PyObject *GetPyObject_As_PyObjectPointer( const char *name ) +{ + FARPROC addr = GetProcAddress( python_dll, name ); + if( addr == NULL ) + throw GetAddressException( name ); + + return (PyObject *)addr; +} + +static PyTypeObject *GetPyTypeObjectPointer_As_PyTypeObjectPointer( const char *name ) +{ + FARPROC addr = GetProcAddress( python_dll, name ); + if( addr == NULL ) + throw GetAddressException( name ); + + return *(PyTypeObject **)addr; +} + +static PyTypeObject *GetPyTypeObject_As_PyTypeObjectPointer( const char *name ) +{ + FARPROC addr = GetProcAddress( python_dll, name ); + if( addr == NULL ) + throw GetAddressException( name ); + + return (PyTypeObject *)addr; +} + +static int *GetInt_as_IntPointer( const char *name ) +{ + FARPROC addr = GetProcAddress( python_dll, name ); + if( addr == NULL ) + throw GetAddressException( name ); + + return (int *)addr; +} + +static char **GetCharPointer_as_CharPointerPointer( const char *name ) +{ + FARPROC addr = GetProcAddress( python_dll, name ); + if( addr == NULL ) + throw GetAddressException( name ); + + return (char **)addr; +} + + +#ifdef _DEBUG +static const char python_dll_name_format[] = "PYTHON%1.1d%1.1d_D.DLL"; #else -#include "Python3/IndirectPythonInterface.cxx" +static const char python_dll_name_format[] = "PYTHON%1.1d%1.1d.DLL"; +#endif + +//-------------------------------------------------------------------------------- +bool InitialisePythonIndirectInterface() +{ + char python_dll_name[sizeof(python_dll_name_format)]; + + _snprintf( python_dll_name, sizeof(python_dll_name_format) / sizeof(char) - 1, python_dll_name_format, PY_MAJOR_VERSION, PY_MINOR_VERSION ); + + python_dll = LoadLibraryA( python_dll_name ); + if( python_dll == NULL ) + return false; + + try + { +#ifdef Py_REF_DEBUG + ptr_Py_RefTotal = GetInt_as_IntPointer( "_Py_RefTotal" ); +#endif + ptr_Py_DebugFlag = GetInt_as_IntPointer( "Py_DebugFlag" ); + ptr_Py_InteractiveFlag = GetInt_as_IntPointer( "Py_InteractiveFlag" ); + ptr_Py_OptimizeFlag = GetInt_as_IntPointer( "Py_OptimizeFlag" ); + ptr_Py_NoSiteFlag = GetInt_as_IntPointer( "Py_NoSiteFlag" ); + ptr_Py_VerboseFlag = GetInt_as_IntPointer( "Py_VerboseFlag" ); + ptr__Py_PackageContext = GetCharPointer_as_CharPointerPointer( "_Py_PackageContext" ); + + ptr__Exc_ArithmeticError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_ArithmeticError" ); + ptr__Exc_AssertionError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_AssertionError" ); + ptr__Exc_AttributeError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_AttributeError" ); + ptr__Exc_EnvironmentError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_EnvironmentError" ); + ptr__Exc_EOFError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_EOFError" ); + ptr__Exc_Exception = GetPyObjectPointer_As_PyObjectPointer( "PyExc_Exception" ); + ptr__Exc_FloatingPointError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_FloatingPointError" ); + ptr__Exc_ImportError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_ImportError" ); + ptr__Exc_IndexError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_IndexError" ); + ptr__Exc_IOError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_IOError" ); + ptr__Exc_KeyboardInterrupt = GetPyObjectPointer_As_PyObjectPointer( "PyExc_KeyboardInterrupt" ); + ptr__Exc_KeyError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_KeyError" ); + ptr__Exc_LookupError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_LookupError" ); + ptr__Exc_MemoryError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_MemoryError" ); + ptr__Exc_NameError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_NameError" ); + ptr__Exc_NotImplementedError= GetPyObjectPointer_As_PyObjectPointer( "PyExc_NotImplementedError" ); + ptr__Exc_OSError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_OSError" ); + ptr__Exc_OverflowError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_OverflowError" ); + ptr__Exc_RuntimeError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_RuntimeError" ); + ptr__Exc_StandardError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_StandardError" ); + ptr__Exc_SyntaxError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_SyntaxError" ); + ptr__Exc_SystemError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_SystemError" ); + ptr__Exc_SystemExit = GetPyObjectPointer_As_PyObjectPointer( "PyExc_SystemExit" ); + ptr__Exc_TypeError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_TypeError" ); + ptr__Exc_ValueError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_ValueError" ); +#ifdef MS_WINDOWS + ptr__Exc_WindowsError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_WindowsError" ); +#endif + ptr__Exc_ZeroDivisionError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_ZeroDivisionError" ); + ptr__Exc_IndentationError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_IndentationError" ); + ptr__Exc_TabError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_TabError" ); + ptr__Exc_UnboundLocalError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_UnboundLocalError" ); + ptr__Exc_UnicodeError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_UnicodeError" ); + ptr__PyNone = GetPyObject_As_PyObjectPointer( "_Py_NoneStruct" ); + + ptr__PyFalse = GetPyObject_As_PyObjectPointer( "_Py_ZeroStruct" ); + ptr__PyTrue = GetPyObject_As_PyObjectPointer( "_Py_TrueStruct" ); + + ptr__CFunction_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyCFunction_Type" ); + ptr__Complex_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyComplex_Type" ); + ptr__Dict_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyDict_Type" ); + ptr__Float_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyFloat_Type" ); + ptr__Function_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyFunction_Type" ); + ptr__Bool_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyBool_Type" ); + ptr__List_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyList_Type" ); + ptr__Long_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyLong_Type" ); + ptr__Method_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyMethod_Type" ); + ptr__Module_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyModule_Type" ); + ptr__Range_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyRange_Type" ); + ptr__Slice_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PySlice_Type" ); + ptr__TraceBack_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyTraceBack_Type" ); + ptr__Tuple_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyTuple_Type" ); + ptr__Type_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyType_Type" ); + ptr__Unicode_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyUnicode_Type" ); +#if PY_MAJOR_VERSION == 2 + ptr__String_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyString_Type" ); + ptr__Int_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyInt_Type" ); + ptr__CObject_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyCObject_Type" ); +#endif +#if PY_MAJOR_VERSION >= 3 + ptr__Bytes_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyBytes_Type" ); +#endif + } + catch( GetAddressException &e ) + { + OutputDebugStringA( python_dll_name ); + OutputDebugStringA( " does not contain symbol " ); + OutputDebugStringA( e.name ); + OutputDebugStringA( "\n" ); + + return false; + } + + return true; +} + +// +// Wrap variables as function calls +// +PyObject *_Exc_ArithmeticError() { return ptr__Exc_ArithmeticError; } +PyObject *_Exc_AssertionError() { return ptr__Exc_AssertionError; } +PyObject *_Exc_AttributeError() { return ptr__Exc_AttributeError; } +PyObject *_Exc_EnvironmentError() { return ptr__Exc_EnvironmentError; } +PyObject *_Exc_EOFError() { return ptr__Exc_EOFError; } +PyObject *_Exc_Exception() { return ptr__Exc_Exception; } +PyObject *_Exc_FloatingPointError() { return ptr__Exc_FloatingPointError; } +PyObject *_Exc_ImportError() { return ptr__Exc_ImportError; } +PyObject *_Exc_IndexError() { return ptr__Exc_IndexError; } +PyObject *_Exc_IOError() { return ptr__Exc_IOError; } +PyObject *_Exc_KeyboardInterrupt() { return ptr__Exc_KeyboardInterrupt; } +PyObject *_Exc_KeyError() { return ptr__Exc_KeyError; } +PyObject *_Exc_LookupError() { return ptr__Exc_LookupError; } +PyObject *_Exc_MemoryError() { return ptr__Exc_MemoryError; } +PyObject *_Exc_NameError() { return ptr__Exc_NameError; } +PyObject *_Exc_NotImplementedError() { return ptr__Exc_NotImplementedError; } +PyObject *_Exc_OSError() { return ptr__Exc_OSError; } +PyObject *_Exc_OverflowError() { return ptr__Exc_OverflowError; } +PyObject *_Exc_RuntimeError() { return ptr__Exc_RuntimeError; } +PyObject *_Exc_StandardError() { return ptr__Exc_StandardError; } +PyObject *_Exc_SyntaxError() { return ptr__Exc_SyntaxError; } +PyObject *_Exc_SystemError() { return ptr__Exc_SystemError; } +PyObject *_Exc_SystemExit() { return ptr__Exc_SystemExit; } +PyObject *_Exc_TypeError() { return ptr__Exc_TypeError; } +PyObject *_Exc_ValueError() { return ptr__Exc_ValueError; } +#ifdef MS_WINDOWS +PyObject *_Exc_WindowsError() { return ptr__Exc_WindowsError; } +#endif +PyObject *_Exc_ZeroDivisionError() { return ptr__Exc_ZeroDivisionError; } +PyObject *_Exc_IndentationError() { return ptr__Exc_IndentationError; } +PyObject *_Exc_TabError() { return ptr__Exc_TabError; } +PyObject *_Exc_UnboundLocalError() { return ptr__Exc_UnboundLocalError; } +PyObject *_Exc_UnicodeError() { return ptr__Exc_UnicodeError; } + +// +// wrap items in Object.h +// +PyObject *_None() { return ptr__PyNone; } + +PyObject *_False() { return ptr__PyFalse; } +PyObject *_True() { return ptr__PyTrue; } + +PyTypeObject *_CFunction_Type() { return ptr__CFunction_Type; } +PyTypeObject *_Complex_Type() { return ptr__Complex_Type; } +PyTypeObject *_Dict_Type() { return ptr__Dict_Type; } +PyTypeObject *_Float_Type() { return ptr__Float_Type; } +PyTypeObject *_Function_Type() { return ptr__Function_Type; } +PyTypeObject *_Bool_Type() { return ptr__Bool_Type; } +PyTypeObject *_List_Type() { return ptr__List_Type; } +PyTypeObject *_Long_Type() { return ptr__Long_Type; } +PyTypeObject *_Method_Type() { return ptr__Method_Type; } +PyTypeObject *_Module_Type() { return ptr__Module_Type; } +PyTypeObject *_Range_Type() { return ptr__Range_Type; } +PyTypeObject *_Slice_Type() { return ptr__Slice_Type; } +PyTypeObject *_TraceBack_Type() { return ptr__TraceBack_Type; } +PyTypeObject *_Tuple_Type() { return ptr__Tuple_Type; } +PyTypeObject *_Type_Type() { return ptr__Type_Type; } +PyTypeObject *_Unicode_Type() { return ptr__Unicode_Type; } +#if PY_MAJOR_VERSION == 2 +PyTypeObject *_String_Type() { return ptr__String_Type; } +PyTypeObject *_Int_Type() { return ptr__Int_Type; } +PyTypeObject *_CObject_Type() { return ptr__CObject_Type; } +#endif +#if PY_MAJOR_VERSION >= 3 +PyTypeObject *_Bytes_Type() { return ptr__Bytes_Type; } +#endif + +char *__Py_PackageContext() { return *ptr__Py_PackageContext; } + + +// +// wrap the Python Flag variables +// +int &_Py_DebugFlag() { return *ptr_Py_DebugFlag; } +int &_Py_InteractiveFlag() { return *ptr_Py_InteractiveFlag; } +int &_Py_OptimizeFlag() { return *ptr_Py_OptimizeFlag; } +int &_Py_NoSiteFlag() { return *ptr_Py_NoSiteFlag; } +int &_Py_VerboseFlag() { return *ptr_Py_VerboseFlag; } + +#if 0 +#define Py_INCREF(op) ( \ + _Py_INC_REFTOTAL _Py_REF_DEBUG_COMMA \ + ((PyObject*)(op))->ob_refcnt++) + +#define Py_DECREF(op) \ + if (_Py_DEC_REFTOTAL _Py_REF_DEBUG_COMMA \ + --((PyObject*)(op))->ob_refcnt != 0) \ + _Py_CHECK_REFCNT(op) \ + else \ + _Py_Dealloc((PyObject *)(op)) +#endif + +void _XINCREF( PyObject *op ) +{ + // This function must match the contents of Py_XINCREF(op) + if( op == NULL ) + return; + +#ifdef Py_REF_DEBUG + (*ptr_Py_RefTotal)++; +#endif + (op)->ob_refcnt++; + +} + +void _XDECREF( PyObject *op ) +{ + // This function must match the contents of Py_XDECREF(op); + if( op == NULL ) + return; + +#ifdef Py_REF_DEBUG + (*ptr_Py_RefTotal)--; +#endif + + if (--(op)->ob_refcnt == 0) + _Py_Dealloc((PyObject *)(op)); +} + + +#else +#error "Can only delay load under Win32" +#endif + +#else + +//================================================================================ +// +// Map onto Macros +// +//================================================================================ + +// +// Wrap variables as function calls +// + +PyObject *_Exc_ArithmeticError() { return ::PyExc_ArithmeticError; } +PyObject *_Exc_AssertionError() { return ::PyExc_AssertionError; } +PyObject *_Exc_AttributeError() { return ::PyExc_AttributeError; } +PyObject *_Exc_EnvironmentError() { return ::PyExc_EnvironmentError; } +PyObject *_Exc_EOFError() { return ::PyExc_EOFError; } +PyObject *_Exc_Exception() { return ::PyExc_Exception; } +PyObject *_Exc_FloatingPointError() { return ::PyExc_FloatingPointError; } +PyObject *_Exc_ImportError() { return ::PyExc_ImportError; } +PyObject *_Exc_IndexError() { return ::PyExc_IndexError; } +PyObject *_Exc_IOError() { return ::PyExc_IOError; } +PyObject *_Exc_KeyboardInterrupt() { return ::PyExc_KeyboardInterrupt; } +PyObject *_Exc_KeyError() { return ::PyExc_KeyError; } +PyObject *_Exc_LookupError() { return ::PyExc_LookupError; } +PyObject *_Exc_MemoryError() { return ::PyExc_MemoryError; } +PyObject *_Exc_NameError() { return ::PyExc_NameError; } +PyObject *_Exc_NotImplementedError() { return ::PyExc_NotImplementedError; } +PyObject *_Exc_OSError() { return ::PyExc_OSError; } +PyObject *_Exc_OverflowError() { return ::PyExc_OverflowError; } +PyObject *_Exc_RuntimeError() { return ::PyExc_RuntimeError; } +PyObject *_Exc_SyntaxError() { return ::PyExc_SyntaxError; } +PyObject *_Exc_SystemError() { return ::PyExc_SystemError; } +PyObject *_Exc_SystemExit() { return ::PyExc_SystemExit; } +PyObject *_Exc_TypeError() { return ::PyExc_TypeError; } +PyObject *_Exc_ValueError() { return ::PyExc_ValueError; } +PyObject *_Exc_ZeroDivisionError() { return ::PyExc_ZeroDivisionError; } +PyObject *_Exc_IndentationError() { return ::PyExc_IndentationError; } +PyObject *_Exc_TabError() { return ::PyExc_TabError; } +PyObject *_Exc_UnboundLocalError() { return ::PyExc_UnboundLocalError; } +PyObject *_Exc_UnicodeError() { return ::PyExc_UnicodeError; } + +#ifdef MS_WINDOWS +PyObject *_Exc_WindowsError() { return ::PyExc_WindowsError; } +#endif + + + + +// +// wrap items in Object.h +// +PyObject *_None() { return &::_Py_NoneStruct; } + +PyObject *_False() { return Py_False; } +PyObject *_True() { return Py_True; } + +PyTypeObject *_CFunction_Type() { return &PyCFunction_Type; } +PyTypeObject *_Complex_Type() { return &PyComplex_Type; } +PyTypeObject *_Dict_Type() { return &PyDict_Type; } +PyTypeObject *_Float_Type() { return &PyFloat_Type; } +PyTypeObject *_Function_Type() { return &PyFunction_Type; } +PyTypeObject *_Bool_Type() { return &PyBool_Type; } +PyTypeObject *_List_Type() { return &PyList_Type; } +PyTypeObject *_Long_Type() { return &PyLong_Type; } +PyTypeObject *_Method_Type() { return &PyMethod_Type; } +PyTypeObject *_Module_Type() { return &PyModule_Type; } +PyTypeObject *_Range_Type() { return &PyRange_Type; } +PyTypeObject *_Slice_Type() { return &PySlice_Type; } +PyTypeObject *_TraceBack_Type() { return &PyTraceBack_Type; } +PyTypeObject *_Tuple_Type() { return &PyTuple_Type; } +PyTypeObject *_Type_Type() { return &PyType_Type; } +PyTypeObject *_Unicode_Type() { return &PyUnicode_Type; } +#if PY_MAJOR_VERSION == 2 +PyTypeObject *_String_Type() { return &PyString_Type; } +PyTypeObject *_Int_Type() { return &PyInt_Type; } +PyTypeObject *_CObject_Type() { return &PyCObject_Type; } +#endif +#if PY_MAJOR_VERSION >= 3 +PyTypeObject *_Bytes_Type() { return &PyBytes_Type; } +#endif + +// +// wrap flags +// +int &_Py_DebugFlag() { return Py_DebugFlag; } +int &_Py_InteractiveFlag() { return Py_InteractiveFlag; } +int &_Py_OptimizeFlag() { return Py_OptimizeFlag; } +int &_Py_NoSiteFlag() { return Py_NoSiteFlag; } +int &_Py_VerboseFlag() { return Py_VerboseFlag; } +char *__Py_PackageContext() { return _Py_PackageContext; } + +// +// Needed to keep the abstactions for delayload interface +// +void _XINCREF( PyObject *op ) +{ + Py_XINCREF( op ); +} + +void _XDECREF( PyObject *op ) +{ + Py_XDECREF( op ); +} + #endif +} diff --git a/CXX/Python2/ExtensionModule.hxx b/CXX/Python2/ExtensionModule.hxx index dde3ec6f854b..3eb436d00048 100644 --- a/CXX/Python2/ExtensionModule.hxx +++ b/CXX/Python2/ExtensionModule.hxx @@ -66,9 +66,6 @@ namespace Py const std::string m_module_name; const std::string m_full_module_name; MethodTable m_method_table; -#if PY3 - PyModuleDef m_module_def; -#endif PyObject *m_module; private: @@ -136,19 +133,11 @@ namespace Py { MethodDefExt *method_def = (*i).second; - #if PY_VERSION_HEX < 0x02070000 - static PyObject *self = PyCObject_FromVoidPtr( this, do_not_dealloc ); - #else - static PyObject *self = PyCapsule_New( this, NULL, NULL ); - #endif + static PyObject *self = PyCObject_FromVoidPtr( this, do_not_dealloc ); Tuple args( 2 ); - args[0] = Object( self ); - #if PY_VERSION_HEX < 0x02070000 - args[1] = Object( PyCObject_FromVoidPtr( method_def, do_not_dealloc ) ); - #else - args[1] = Object( PyCapsule_New( method_def, NULL, NULL ) ); - #endif + args[0] = Object( self, true ); + args[1] = Object( PyCObject_FromVoidPtr( method_def, do_not_dealloc ), true ); PyObject *func = PyCFunction_New ( diff --git a/CXX/Python2/ExtensionOldType.hxx b/CXX/Python2/ExtensionOldType.hxx index 9702b00e7455..cfd2fbe6a578 100644 --- a/CXX/Python2/ExtensionOldType.hxx +++ b/CXX/Python2/ExtensionOldType.hxx @@ -178,11 +178,8 @@ namespace Py Tuple self( 2 ); self[0] = Object( this ); - #if PY_VERSION_HEX < 0x02070000 - self[1] = Object( PyCObject_FromVoidPtr( method_def, do_not_dealloc ), true ); - #else - self[1] = Object( PyCapsule_New( method_def, NULL, NULL ), true ); - #endif + self[1] = Object( PyCObject_FromVoidPtr( method_def, do_not_dealloc ), true ); + PyObject *func = PyCFunction_New( &method_def->ext_meth_def, self.ptr() ); return Object(func, true); @@ -238,12 +235,8 @@ namespace Py PyObject *self_in_cobject = self_and_name_tuple[0].ptr(); T *self = static_cast( self_in_cobject ); - #if PY_VERSION_HEX < 0x02070000 - void *capsule = PyCObject_AsVoidPtr( self_and_name_tuple[1].ptr() ); - #else - void *capsule = PyCapsule_GetPointer( self_and_name_tuple[1].ptr(), NULL ); - #endif - MethodDefExt *meth_def = reinterpret_cast *>( capsule ); + MethodDefExt *meth_def = reinterpret_cast *>( + PyCObject_AsVoidPtr( self_and_name_tuple[1].ptr() ) ); Object result; // Adding try & catch in case of STL debug-mode exceptions. @@ -278,12 +271,8 @@ namespace Py PyObject *self_in_cobject = self_and_name_tuple[0].ptr(); T *self = static_cast( self_in_cobject ); - #if PY_VERSION_HEX < 0x02070000 - void *capsule = PyCObject_AsVoidPtr( self_and_name_tuple[1].ptr() ); - #else - void *capsule = PyCapsule_GetPointer( self_and_name_tuple[1].ptr(), NULL ); - #endif - MethodDefExt *meth_def = reinterpret_cast *>( capsule ); + MethodDefExt *meth_def = reinterpret_cast *>( + PyCObject_AsVoidPtr( self_and_name_tuple[1].ptr() ) ); Tuple args( _args ); Object result; @@ -319,12 +308,8 @@ namespace Py PyObject *self_in_cobject = self_and_name_tuple[0].ptr(); T *self = static_cast( self_in_cobject ); - #if PY_VERSION_HEX < 0x02070000 - void *capsule = PyCObject_AsVoidPtr( self_and_name_tuple[1].ptr() ); - #else - void *capsule = PyCapsule_GetPointer( self_and_name_tuple[1].ptr(), NULL ); - #endif - MethodDefExt *meth_def = reinterpret_cast *>( capsule ); + MethodDefExt *meth_def = reinterpret_cast *>( + PyCObject_AsVoidPtr( self_and_name_tuple[1].ptr() ) ); Tuple args( _args ); diff --git a/CXX/Python2/ExtensionTypeBase.hxx b/CXX/Python2/ExtensionTypeBase.hxx index ad11029e71dc..1dfe4243619a 100644 --- a/CXX/Python2/ExtensionTypeBase.hxx +++ b/CXX/Python2/ExtensionTypeBase.hxx @@ -70,9 +70,7 @@ namespace Py virtual void reinit( Tuple &args, Dict &kwds ); // object basics -#if defined( PYCXX_PYTHON_2TO3 ) || !defined( PY3 ) virtual int print( FILE *, int ); -#endif virtual Object getattr( const char * ); virtual int setattr( const char *, const Object & ); virtual Object getattro( const String & ); diff --git a/CXX/Python2/IndirectPythonInterface.cxx b/CXX/Python2/IndirectPythonInterface.cxx deleted file mode 100644 index 203f3f9170ff..000000000000 --- a/CXX/Python2/IndirectPythonInterface.cxx +++ /dev/null @@ -1,607 +0,0 @@ -//----------------------------------------------------------------------------- -// -// Copyright (c) 1998 - 2007, The Regents of the University of California -// Produced at the Lawrence Livermore National Laboratory -// All rights reserved. -// -// This file is part of PyCXX. For details,see http://cxx.sourceforge.net/. The -// full copyright notice is contained in the file COPYRIGHT located at the root -// of the PyCXX distribution. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// - Redistributions of source code must retain the above copyright notice, -// this list of conditions and the disclaimer below. -// - Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the disclaimer (as noted below) in the -// documentation and/or materials provided with the distribution. -// - Neither the name of the UC/LLNL nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -// ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF -// CALIFORNIA, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -// DAMAGE. -// -//----------------------------------------------------------------------------- - -#include "CXX/IndirectPythonInterface.hxx" - -namespace Py -{ -bool _Buffer_Check( PyObject *op ) { return (op)->ob_type == _Buffer_Type(); } -bool _CFunction_Check( PyObject *op ) { return (op)->ob_type == _CFunction_Type(); } -bool _Class_Check( PyObject *op ) { return (op)->ob_type == _Class_Type(); } -#if PY_VERSION_HEX < 0x02070000 -bool _CObject_Check( PyObject *op ) { return (op)->ob_type == _CObject_Type(); } -#endif -bool _Complex_Check( PyObject *op ) { return (op)->ob_type == _Complex_Type(); } -bool _Dict_Check( PyObject *op ) { return (op)->ob_type == _Dict_Type(); } -bool _File_Check( PyObject *op ) { return (op)->ob_type == _File_Type(); } -bool _Float_Check( PyObject *op ) { return (op)->ob_type == _Float_Type(); } -bool _Function_Check( PyObject *op ) { return (op)->ob_type == _Function_Type(); } -bool _Instance_Check( PyObject *op ) { return (op)->ob_type == _Instance_Type(); } -bool _Boolean_Check( PyObject *op ) { return (op)->ob_type == _Bool_Type(); } -bool _Int_Check( PyObject *op ) { return (op)->ob_type == _Int_Type(); } -bool _List_Check( PyObject *o ) { return o->ob_type == _List_Type(); } -bool _Long_Check( PyObject *op ) { return (op)->ob_type == _Long_Type(); } -bool _Method_Check( PyObject *op ) { return (op)->ob_type == _Method_Type(); } -bool _Module_Check( PyObject *op ) { return (op)->ob_type == _Module_Type(); } -bool _Range_Check( PyObject *op ) { return (op)->ob_type == _Range_Type(); } -bool _Slice_Check( PyObject *op ) { return (op)->ob_type == _Slice_Type(); } -bool _String_Check( PyObject *o ) { return o->ob_type == _String_Type(); } -bool _TraceBack_Check( PyObject *v ) { return (v)->ob_type == _TraceBack_Type(); } -bool _Tuple_Check( PyObject *op ) { return (op)->ob_type == _Tuple_Type(); } -bool _Type_Check( PyObject *op ) { return (op)->ob_type == _Type_Type(); } - -#if PY_MAJOR_VERSION >= 2 -bool _Unicode_Check( PyObject *op ) { return (op)->ob_type == _Unicode_Type(); } -#endif - - - -#if defined(PY_WIN32_DELAYLOAD_PYTHON_DLL) - -#if defined(MS_WINDOWS) -#include - - -static HMODULE python_dll; - -static PyObject *ptr__Exc_ArithmeticError = NULL; -static PyObject *ptr__Exc_AssertionError = NULL; -static PyObject *ptr__Exc_AttributeError = NULL; -static PyObject *ptr__Exc_EnvironmentError = NULL; -static PyObject *ptr__Exc_EOFError = NULL; -static PyObject *ptr__Exc_Exception = NULL; -static PyObject *ptr__Exc_FloatingPointError = NULL; -static PyObject *ptr__Exc_ImportError = NULL; -static PyObject *ptr__Exc_IndexError = NULL; -static PyObject *ptr__Exc_IOError = NULL; -static PyObject *ptr__Exc_KeyboardInterrupt = NULL; -static PyObject *ptr__Exc_KeyError = NULL; -static PyObject *ptr__Exc_LookupError = NULL; -static PyObject *ptr__Exc_MemoryError = NULL; -static PyObject *ptr__Exc_MemoryErrorInst = NULL; -static PyObject *ptr__Exc_NameError = NULL; -static PyObject *ptr__Exc_NotImplementedError = NULL; -static PyObject *ptr__Exc_OSError = NULL; -static PyObject *ptr__Exc_OverflowError = NULL; -static PyObject *ptr__Exc_RuntimeError = NULL; -static PyObject *ptr__Exc_StandardError = NULL; -static PyObject *ptr__Exc_SyntaxError = NULL; -static PyObject *ptr__Exc_SystemError = NULL; -static PyObject *ptr__Exc_SystemExit = NULL; -static PyObject *ptr__Exc_TypeError = NULL; -static PyObject *ptr__Exc_ValueError = NULL; -static PyObject *ptr__Exc_ZeroDivisionError = NULL; - -#ifdef MS_WINDOWS -static PyObject *ptr__Exc_WindowsError = NULL; -#endif - -#if PY_MAJOR_VERSION >= 2 -static PyObject *ptr__Exc_IndentationError = NULL; -static PyObject *ptr__Exc_TabError = NULL; -static PyObject *ptr__Exc_UnboundLocalError = NULL; -static PyObject *ptr__Exc_UnicodeError = NULL; -#endif - -static PyObject *ptr__PyNone = NULL; - -static PyObject *ptr__PyFalse = NULL; -static PyObject *ptr__PyTrue = NULL; - -static PyTypeObject *ptr__Buffer_Type = NULL; -static PyTypeObject *ptr__CFunction_Type = NULL; -static PyTypeObject *ptr__Class_Type = NULL; -#if PY_VERSION_HEX < 0x02070000 -static PyTypeObject *ptr__CObject_Type = NULL; -#endif -static PyTypeObject *ptr__Complex_Type = NULL; -static PyTypeObject *ptr__Dict_Type = NULL; -static PyTypeObject *ptr__File_Type = NULL; -static PyTypeObject *ptr__Float_Type = NULL; -static PyTypeObject *ptr__Function_Type = NULL; -static PyTypeObject *ptr__Instance_Type = NULL; -static PyTypeObject *ptr__Int_Type = NULL; -static PyTypeObject *ptr__List_Type = NULL; -static PyTypeObject *ptr__Long_Type = NULL; -static PyTypeObject *ptr__Method_Type = NULL; -static PyTypeObject *ptr__Module_Type = NULL; -static PyTypeObject *ptr__Range_Type = NULL; -static PyTypeObject *ptr__Slice_Type = NULL; -static PyTypeObject *ptr__String_Type = NULL; -static PyTypeObject *ptr__TraceBack_Type = NULL; -static PyTypeObject *ptr__Tuple_Type = NULL; -static PyTypeObject *ptr__Type_Type = NULL; - -#if PY_MAJOR_VERSION >= 2 -static PyTypeObject *ptr__Unicode_Type = NULL; -#endif - -static int *ptr_Py_DebugFlag = NULL; -static int *ptr_Py_InteractiveFlag = NULL; -static int *ptr_Py_OptimizeFlag = NULL; -static int *ptr_Py_NoSiteFlag = NULL; -static int *ptr_Py_TabcheckFlag = NULL; -static int *ptr_Py_VerboseFlag = NULL; - -#if PY_MAJOR_VERSION >= 2 -static int *ptr_Py_UnicodeFlag = NULL; -#endif - -static char **ptr__Py_PackageContext = NULL; - -#ifdef Py_REF_DEBUG -int *ptr_Py_RefTotal; -#endif - - -//-------------------------------------------------------------------------------- -class GetAddressException -{ -public: - GetAddressException( const char *_name ) - : name( _name ) - {} - virtual ~GetAddressException() {} - const char *name; -}; - - -//-------------------------------------------------------------------------------- -static PyObject *GetPyObjectPointer_As_PyObjectPointer( const char *name ) -{ - FARPROC addr = GetProcAddress( python_dll, name ); - if( addr == NULL ) - throw GetAddressException( name ); - - return *(PyObject **)addr; -} - -static PyObject *GetPyObject_As_PyObjectPointer( const char *name ) -{ - FARPROC addr = GetProcAddress( python_dll, name ); - if( addr == NULL ) - throw GetAddressException( name ); - - return (PyObject *)addr; -} - -static PyTypeObject *GetPyTypeObjectPointer_As_PyTypeObjectPointer( const char *name ) -{ - FARPROC addr = GetProcAddress( python_dll, name ); - if( addr == NULL ) - throw GetAddressException( name ); - - return *(PyTypeObject **)addr; -} - -static PyTypeObject *GetPyTypeObject_As_PyTypeObjectPointer( const char *name ) -{ - FARPROC addr = GetProcAddress( python_dll, name ); - if( addr == NULL ) - throw GetAddressException( name ); - - return (PyTypeObject *)addr; -} - -static int *GetInt_as_IntPointer( const char *name ) -{ - FARPROC addr = GetProcAddress( python_dll, name ); - if( addr == NULL ) - throw GetAddressException( name ); - - return (int *)addr; -} - -static char **GetCharPointer_as_CharPointerPointer( const char *name ) -{ - FARPROC addr = GetProcAddress( python_dll, name ); - if( addr == NULL ) - throw GetAddressException( name ); - - return (char **)addr; -} - - -#ifdef _DEBUG -static const char python_dll_name_format[] = "PYTHON%1.1d%1.1d_D.DLL"; -#else -static const char python_dll_name_format[] = "PYTHON%1.1d%1.1d.DLL"; -#endif - -//-------------------------------------------------------------------------------- -bool InitialisePythonIndirectInterface() -{ - char python_dll_name[sizeof(python_dll_name_format)]; - - sprintf( python_dll_name, python_dll_name_format, PY_MAJOR_VERSION, PY_MINOR_VERSION ); - - python_dll = LoadLibrary( python_dll_name ); - if( python_dll == NULL ) - return false; - - try -{ -#ifdef Py_REF_DEBUG - ptr_Py_RefTotal = GetInt_as_IntPointer( "_Py_RefTotal" ); -#endif - ptr_Py_DebugFlag = GetInt_as_IntPointer( "Py_DebugFlag" ); - ptr_Py_InteractiveFlag = GetInt_as_IntPointer( "Py_InteractiveFlag" ); - ptr_Py_OptimizeFlag = GetInt_as_IntPointer( "Py_OptimizeFlag" ); - ptr_Py_NoSiteFlag = GetInt_as_IntPointer( "Py_NoSiteFlag" ); - ptr_Py_TabcheckFlag = GetInt_as_IntPointer( "Py_TabcheckFlag" ); - ptr_Py_VerboseFlag = GetInt_as_IntPointer( "Py_VerboseFlag" ); -#if PY_MAJOR_VERSION >= 2 - ptr_Py_UnicodeFlag = GetInt_as_IntPointer( "Py_UnicodeFlag" ); -#endif - ptr__Py_PackageContext = GetCharPointer_as_CharPointerPointer( "_Py_PackageContext" ); - - ptr__Exc_ArithmeticError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_ArithmeticError" ); - ptr__Exc_AssertionError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_AssertionError" ); - ptr__Exc_AttributeError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_AttributeError" ); - ptr__Exc_EnvironmentError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_EnvironmentError" ); - ptr__Exc_EOFError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_EOFError" ); - ptr__Exc_Exception = GetPyObjectPointer_As_PyObjectPointer( "PyExc_Exception" ); - ptr__Exc_FloatingPointError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_FloatingPointError" ); - ptr__Exc_ImportError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_ImportError" ); - ptr__Exc_IndexError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_IndexError" ); - ptr__Exc_IOError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_IOError" ); - ptr__Exc_KeyboardInterrupt = GetPyObjectPointer_As_PyObjectPointer( "PyExc_KeyboardInterrupt" ); - ptr__Exc_KeyError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_KeyError" ); - ptr__Exc_LookupError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_LookupError" ); - ptr__Exc_MemoryError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_MemoryError" ); - ptr__Exc_MemoryErrorInst = GetPyObjectPointer_As_PyObjectPointer( "PyExc_MemoryErrorInst" ); - ptr__Exc_NameError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_NameError" ); - ptr__Exc_NotImplementedError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_NotImplementedError" ); - ptr__Exc_OSError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_OSError" ); - ptr__Exc_OverflowError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_OverflowError" ); - ptr__Exc_RuntimeError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_RuntimeError" ); - ptr__Exc_StandardError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_StandardError" ); - ptr__Exc_SyntaxError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_SyntaxError" ); - ptr__Exc_SystemError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_SystemError" ); - ptr__Exc_SystemExit = GetPyObjectPointer_As_PyObjectPointer( "PyExc_SystemExit" ); - ptr__Exc_TypeError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_TypeError" ); - ptr__Exc_ValueError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_ValueError" ); -#ifdef MS_WINDOWS - ptr__Exc_WindowsError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_WindowsError" ); -#endif - ptr__Exc_ZeroDivisionError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_ZeroDivisionError" ); - -#if PY_MAJOR_VERSION >= 2 - ptr__Exc_IndentationError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_IndentationError" ); - ptr__Exc_TabError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_TabError" ); - ptr__Exc_UnboundLocalError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_UnboundLocalError" ); - ptr__Exc_UnicodeError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_UnicodeError" ); -#endif - ptr__PyNone = GetPyObject_As_PyObjectPointer( "_Py_NoneStruct" ); - - ptr__PyFalse = GetPyObject_As_PyObjectPointer( "_Py_ZeroStruct" ); - ptr__PyTrue = GetPyObject_As_PyObjectPointer( "_Py_TrueStruct" ); - - ptr__Buffer_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyBuffer_Type" ); - ptr__CFunction_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyCFunction_Type" ); - ptr__Class_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyClass_Type" ); -#if PY_VERSION_HEX < 0x02070000 - ptr__CObject_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyCObject_Type" ); -#endif - ptr__Complex_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyComplex_Type" ); - ptr__Dict_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyDict_Type" ); - ptr__File_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyFile_Type" ); - ptr__Float_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyFloat_Type" ); - ptr__Function_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyFunction_Type" ); - ptr__Instance_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyInstance_Type" ); - ptr__Int_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyInt_Type" ); - ptr__List_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyList_Type" ); - ptr__Long_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyLong_Type" ); - ptr__Method_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyMethod_Type" ); - ptr__Module_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyModule_Type" ); - ptr__Range_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyRange_Type" ); - ptr__Slice_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PySlice_Type" ); - ptr__String_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyString_Type" ); - ptr__TraceBack_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyTraceBack_Type" ); - ptr__Tuple_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyTuple_Type" ); - ptr__Type_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyType_Type" ); - -#if PY_MAJOR_VERSION >= 2 - ptr__Unicode_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyUnicode_Type" ); -#endif -} - catch( GetAddressException &e ) - { - OutputDebugString( python_dll_name ); - OutputDebugString( " does not contain symbol "); - OutputDebugString( e.name ); - OutputDebugString( "\n" ); - - return false; - } - - return true; -} - -// -// Wrap variables as function calls -// -PyObject * _Exc_ArithmeticError(){ return ptr__Exc_ArithmeticError; } -PyObject * _Exc_AssertionError(){ return ptr__Exc_AssertionError; } -PyObject * _Exc_AttributeError(){ return ptr__Exc_AttributeError; } -PyObject * _Exc_EnvironmentError(){ return ptr__Exc_EnvironmentError; } -PyObject * _Exc_EOFError() { return ptr__Exc_EOFError; } -PyObject * _Exc_Exception() { return ptr__Exc_Exception; } -PyObject * _Exc_FloatingPointError(){ return ptr__Exc_FloatingPointError; } -PyObject * _Exc_ImportError() { return ptr__Exc_ImportError; } -PyObject * _Exc_IndexError() { return ptr__Exc_IndexError; } -PyObject * _Exc_IOError() { return ptr__Exc_IOError; } -PyObject * _Exc_KeyboardInterrupt(){ return ptr__Exc_KeyboardInterrupt; } -PyObject * _Exc_KeyError() { return ptr__Exc_KeyError; } -PyObject * _Exc_LookupError() { return ptr__Exc_LookupError; } -PyObject * _Exc_MemoryError() { return ptr__Exc_MemoryError; } -PyObject * _Exc_MemoryErrorInst(){ return ptr__Exc_MemoryErrorInst; } -PyObject * _Exc_NameError() { return ptr__Exc_NameError; } -PyObject * _Exc_NotImplementedError(){ return ptr__Exc_NotImplementedError; } -PyObject * _Exc_OSError() { return ptr__Exc_OSError; } -PyObject * _Exc_OverflowError() { return ptr__Exc_OverflowError; } -PyObject * _Exc_RuntimeError() { return ptr__Exc_RuntimeError; } -PyObject * _Exc_StandardError() { return ptr__Exc_StandardError; } -PyObject * _Exc_SyntaxError() { return ptr__Exc_SyntaxError; } -PyObject * _Exc_SystemError() { return ptr__Exc_SystemError; } -PyObject * _Exc_SystemExit() { return ptr__Exc_SystemExit; } -PyObject * _Exc_TypeError() { return ptr__Exc_TypeError; } -PyObject * _Exc_ValueError() { return ptr__Exc_ValueError; } -#ifdef MS_WINDOWS -PyObject * _Exc_WindowsError() { return ptr__Exc_WindowsError; } -#endif -PyObject * _Exc_ZeroDivisionError(){ return ptr__Exc_ZeroDivisionError; } - -#if PY_MAJOR_VERSION >= 2 -PyObject * _Exc_IndentationError(){ return ptr__Exc_IndentationError; } -PyObject * _Exc_TabError() { return ptr__Exc_TabError; } -PyObject * _Exc_UnboundLocalError(){ return ptr__Exc_UnboundLocalError; } -PyObject * _Exc_UnicodeError() { return ptr__Exc_UnicodeError; } -#endif - -// -// wrap items in Object.h -// -PyObject * _None() { return ptr__PyNone; } - -PyObject * _False() { return ptr__PyFalse; } -PyObject * _True() { return ptr__PyTrue; } - -PyTypeObject * _Buffer_Type() { return ptr__Buffer_Type; } -PyTypeObject * _CFunction_Type(){ return ptr__CFunction_Type; } -PyTypeObject * _Class_Type() { return ptr__Class_Type; } -#if PY_VERSION_HEX < 0x02070000 -PyTypeObject * _CObject_Type() { return ptr__CObject_Type; } -#endif -PyTypeObject * _Complex_Type() { return ptr__Complex_Type; } -PyTypeObject * _Dict_Type() { return ptr__Dict_Type; } -PyTypeObject * _File_Type() { return ptr__File_Type; } -PyTypeObject * _Float_Type() { return ptr__Float_Type; } -PyTypeObject * _Function_Type() { return ptr__Function_Type; } -PyTypeObject * _Instance_Type() { return ptr__Instance_Type; } -PyTypeObject * _Bool_Type() { return ptr__Bool_Type; } -PyTypeObject * _Int_Type() { return ptr__Int_Type; } -PyTypeObject * _List_Type() { return ptr__List_Type; } -PyTypeObject * _Long_Type() { return ptr__Long_Type; } -PyTypeObject * _Method_Type() { return ptr__Method_Type; } -PyTypeObject * _Module_Type() { return ptr__Module_Type; } -PyTypeObject * _Range_Type() { return ptr__Range_Type; } -PyTypeObject * _Slice_Type() { return ptr__Slice_Type; } -PyTypeObject * _String_Type() { return ptr__String_Type; } -PyTypeObject * _TraceBack_Type(){ return ptr__TraceBack_Type; } -PyTypeObject * _Tuple_Type() { return ptr__Tuple_Type; } -PyTypeObject * _Type_Type() { return ptr__Type_Type; } - -#if PY_MAJOR_VERSION >= 2 -PyTypeObject * _Unicode_Type() { return ptr__Unicode_Type; } -#endif - -char *__Py_PackageContext() { return *ptr__Py_PackageContext; } - - -// -// wrap the Python Flag variables -// -int &_Py_DebugFlag() { return *ptr_Py_DebugFlag; } -int &_Py_InteractiveFlag() { return *ptr_Py_InteractiveFlag; } -int &_Py_OptimizeFlag() { return *ptr_Py_OptimizeFlag; } -int &_Py_NoSiteFlag() { return *ptr_Py_NoSiteFlag; } -int &_Py_TabcheckFlag() { return *ptr_Py_TabcheckFlag; } -int &_Py_VerboseFlag() { return *ptr_Py_VerboseFlag; } -#if PY_MAJOR_VERSION >= 2 -int &_Py_UnicodeFlag() { return *ptr_Py_UnicodeFlag; } -#endif - -void _XINCREF( PyObject *op ) -{ - // This function must match the contents of Py_XINCREF(op) - if( op == NULL ) - return; - -#ifdef Py_REF_DEBUG - (*ptr_Py_RefTotal)++; -#endif - (op)->ob_refcnt++; - -} - -void _XDECREF( PyObject *op ) -{ - // This function must match the contents of Py_XDECREF(op); - if( op == NULL ) - return; - -#ifdef Py_REF_DEBUG - (*ptr_Py_RefTotal)--; -#endif - - if (--(op)->ob_refcnt == 0) - _Py_Dealloc((PyObject *)(op)); -} - - -#else -#error "Can only delay load under Win32" -#endif - -#else - -// -// Duplicated these declarations from rangeobject.h which is missing the -// extern "C". This has been reported as a bug upto and include 2.1 -// -extern "C" DL_IMPORT(PyTypeObject) PyRange_Type; -extern "C" DL_IMPORT(PyObject *) PyRange_New(long, long, long, int); - - -//================================================================================ -// -// Map onto Macros -// -//================================================================================ - -// -// Wrap variables as function calls -// - -PyObject * _Exc_ArithmeticError() { return ::PyExc_ArithmeticError; } -PyObject * _Exc_AssertionError() { return ::PyExc_AssertionError; } -PyObject * _Exc_AttributeError() { return ::PyExc_AttributeError; } -PyObject * _Exc_EnvironmentError() { return ::PyExc_EnvironmentError; } -PyObject * _Exc_EOFError() { return ::PyExc_EOFError; } -PyObject * _Exc_Exception() { return ::PyExc_Exception; } -PyObject * _Exc_FloatingPointError() { return ::PyExc_FloatingPointError; } -PyObject * _Exc_ImportError() { return ::PyExc_ImportError; } -PyObject * _Exc_IndexError() { return ::PyExc_IndexError; } -PyObject * _Exc_IOError() { return ::PyExc_IOError; } -PyObject * _Exc_KeyboardInterrupt() { return ::PyExc_KeyboardInterrupt; } -PyObject * _Exc_KeyError() { return ::PyExc_KeyError; } -PyObject * _Exc_LookupError() { return ::PyExc_LookupError; } -PyObject * _Exc_MemoryError() { return ::PyExc_MemoryError; } -PyObject * _Exc_MemoryErrorInst() { return ::PyExc_MemoryErrorInst; } -PyObject * _Exc_NameError() { return ::PyExc_NameError; } -PyObject * _Exc_NotImplementedError() { return ::PyExc_NotImplementedError; } -PyObject * _Exc_OSError() { return ::PyExc_OSError; } -PyObject * _Exc_OverflowError() { return ::PyExc_OverflowError; } -PyObject * _Exc_RuntimeError() { return ::PyExc_RuntimeError; } -PyObject * _Exc_StandardError() { return ::PyExc_StandardError; } -PyObject * _Exc_SyntaxError() { return ::PyExc_SyntaxError; } -PyObject * _Exc_SystemError() { return ::PyExc_SystemError; } -PyObject * _Exc_SystemExit() { return ::PyExc_SystemExit; } -PyObject * _Exc_TypeError() { return ::PyExc_TypeError; } -PyObject * _Exc_ValueError() { return ::PyExc_ValueError; } -PyObject * _Exc_ZeroDivisionError() { return ::PyExc_ZeroDivisionError; } - -#ifdef MS_WINDOWS -PyObject * _Exc_WindowsError() { return ::PyExc_WindowsError; } -#endif - - -#if PY_MAJOR_VERSION >= 2 -PyObject * _Exc_IndentationError() { return ::PyExc_IndentationError; } -PyObject * _Exc_TabError() { return ::PyExc_TabError; } -PyObject * _Exc_UnboundLocalError() { return ::PyExc_UnboundLocalError; } -PyObject * _Exc_UnicodeError() { return ::PyExc_UnicodeError; } -#endif - - -// -// wrap items in Object.h -// -PyObject * _None() { return &::_Py_NoneStruct; } - -PyObject * _False() { return Py_False; } -PyObject * _True() { return Py_True; } - -PyTypeObject * _Buffer_Type() { return &PyBuffer_Type; } -PyTypeObject * _CFunction_Type() { return &PyCFunction_Type; } -PyTypeObject * _Class_Type() { return &PyClass_Type; } -#if PY_VERSION_HEX < 0x02070000 -PyTypeObject * _CObject_Type() { return &PyCObject_Type; } -#endif -PyTypeObject * _Complex_Type() { return &PyComplex_Type; } -PyTypeObject * _Dict_Type() { return &PyDict_Type; } -PyTypeObject * _File_Type() { return &PyFile_Type; } -PyTypeObject * _Float_Type() { return &PyFloat_Type; } -PyTypeObject * _Function_Type() { return &PyFunction_Type; } -PyTypeObject * _Instance_Type() { return &PyInstance_Type; } -PyTypeObject * _Bool_Type() { return &PyBool_Type; } -PyTypeObject * _Int_Type() { return &PyInt_Type; } -PyTypeObject * _List_Type() { return &PyList_Type; } -PyTypeObject * _Long_Type() { return &PyLong_Type; } -PyTypeObject * _Method_Type() { return &PyMethod_Type; } -PyTypeObject * _Module_Type() { return &PyModule_Type; } -PyTypeObject * _Range_Type() { return &PyRange_Type; } -PyTypeObject * _Slice_Type() { return &PySlice_Type; } -PyTypeObject * _String_Type() { return &PyString_Type; } -PyTypeObject * _TraceBack_Type() { return &PyTraceBack_Type; } -PyTypeObject * _Tuple_Type() { return &PyTuple_Type; } -PyTypeObject * _Type_Type() { return &PyType_Type; } - -#if PY_MAJOR_VERSION >= 2 -PyTypeObject * _Unicode_Type() { return &PyUnicode_Type; } -#endif - -// -// wrap flags -// -int &_Py_DebugFlag() { return Py_DebugFlag; } -int &_Py_InteractiveFlag(){ return Py_InteractiveFlag; } -int &_Py_OptimizeFlag() { return Py_OptimizeFlag; } -int &_Py_NoSiteFlag() { return Py_NoSiteFlag; } -int &_Py_TabcheckFlag() { return Py_TabcheckFlag; } -int &_Py_VerboseFlag() { return Py_VerboseFlag; } -#if PY_MAJOR_VERSION >= 2 -int &_Py_UnicodeFlag() { return Py_UnicodeFlag; } -#endif -char *__Py_PackageContext(){ return _Py_PackageContext; } - -// -// Needed to keep the abstactions for delayload interface -// -void _XINCREF( PyObject *op ) -{ - Py_XINCREF(op); -} - -void _XDECREF( PyObject *op ) -{ - Py_XDECREF(op); -} - -#endif -} diff --git a/CXX/Python2/IndirectPythonInterface.hxx b/CXX/Python2/IndirectPythonInterface.hxx index 33d4b83f34fc..a29a394c6c6a 100644 --- a/CXX/Python2/IndirectPythonInterface.hxx +++ b/CXX/Python2/IndirectPythonInterface.hxx @@ -113,10 +113,8 @@ bool _Instance_Check( PyObject *op ); PyTypeObject * _Method_Type(); bool _Method_Check( PyObject *op ); -#if PY_VERSION_HEX < 0x02070000 PyTypeObject * _CObject_Type(); bool _CObject_Check( PyObject *op ); -#endif PyTypeObject * _Complex_Type(); bool _Complex_Check( PyObject *op ); diff --git a/CXX/Python2/PythonType.hxx b/CXX/Python2/PythonType.hxx index a89a6c90481e..fc45a4bc6c5e 100644 --- a/CXX/Python2/PythonType.hxx +++ b/CXX/Python2/PythonType.hxx @@ -57,17 +57,15 @@ namespace Py PythonType &doc( const char *d ); PythonType &supportClass( void ); -#if !defined( PY3 ) PythonType &dealloc( void (*f)( PyObject* ) ); -#endif -#if defined( PYCXX_PYTHON_2TO3 ) || !defined( PY3 ) +#if defined( PYCXX_PYTHON_2TO3 ) PythonType &supportPrint( void ); #endif PythonType &supportGetattr( void ); PythonType &supportSetattr( void ); PythonType &supportGetattro( void ); PythonType &supportSetattro( void ); -#if defined( PYCXX_PYTHON_2TO3 ) || !defined( PY3 ) +#if defined( PYCXX_PYTHON_2TO3 ) PythonType &supportCompare( void ); #endif PythonType &supportRichCompare( void ); diff --git a/CXX/Python2/cxx_extensions.cxx b/CXX/Python2/cxx_extensions.cxx index 6ae62c7cfb05..611a335d1164 100644 --- a/CXX/Python2/cxx_extensions.cxx +++ b/CXX/Python2/cxx_extensions.cxx @@ -254,12 +254,16 @@ extern "C" // All the following functions redirect the call from Python // onto the matching virtual function in PythonExtensionBase // +#if defined( PYCXX_PYTHON_2TO3 ) static int print_handler( PyObject *, FILE *, int ); +#endif static PyObject *getattr_handler( PyObject *, char * ); static int setattr_handler( PyObject *, char *, PyObject * ); static PyObject *getattro_handler( PyObject *, PyObject * ); static int setattro_handler( PyObject *, PyObject *, PyObject * ); +#if defined( PYCXX_PYTHON_2TO3 ) static int compare_handler( PyObject *, PyObject * ); +#endif static PyObject *rich_compare_handler( PyObject *, PyObject *, int ); static PyObject *repr_handler( PyObject * ); static PyObject *str_handler( PyObject * ); @@ -290,9 +294,7 @@ extern "C" static PyObject *number_invert_handler( PyObject * ); static PyObject *number_int_handler( PyObject * ); static PyObject *number_float_handler( PyObject * ); -#if !defined( PY3 ) static PyObject *number_long_handler( PyObject * ); -#endif static PyObject *number_oct_handler( PyObject * ); static PyObject *number_hex_handler( PyObject * ); static PyObject *number_add_handler( PyObject *, PyObject * ); @@ -335,13 +337,9 @@ PythonType &PythonType::supportSequenceType() sequence_table->sq_concat = sequence_concat_handler; sequence_table->sq_repeat = sequence_repeat_handler; sequence_table->sq_item = sequence_item_handler; -#if !defined( PY3 ) sequence_table->sq_slice = sequence_slice_handler; -#endif sequence_table->sq_ass_item = sequence_ass_item_handler; // BAS setup seperately? -#if !defined( PY3 ) sequence_table->sq_ass_slice = sequence_ass_slice_handler; // BAS setup seperately? -#endif } return *this; } @@ -370,36 +368,26 @@ PythonType &PythonType::supportNumberType() number_table->nb_add = number_add_handler; number_table->nb_subtract = number_subtract_handler; number_table->nb_multiply = number_multiply_handler; -#if !defined( PY3 ) number_table->nb_divide = number_divide_handler; -#endif number_table->nb_remainder = number_remainder_handler; number_table->nb_divmod = number_divmod_handler; number_table->nb_power = number_power_handler; number_table->nb_negative = number_negative_handler; number_table->nb_positive = number_positive_handler; number_table->nb_absolute = number_absolute_handler; -#if !defined( PY3 ) number_table->nb_nonzero = number_nonzero_handler; -#endif number_table->nb_invert = number_invert_handler; number_table->nb_lshift = number_lshift_handler; number_table->nb_rshift = number_rshift_handler; number_table->nb_and = number_and_handler; number_table->nb_xor = number_xor_handler; number_table->nb_or = number_or_handler; -#if !defined( PY3 ) number_table->nb_coerce = 0; -#endif number_table->nb_int = number_int_handler; -#if !defined( PY3 ) number_table->nb_long = number_long_handler; -#endif number_table->nb_float = number_float_handler; -#if !defined( PY3 ) number_table->nb_oct = number_oct_handler; number_table->nb_hex = number_hex_handler; -#endif } return *this; } @@ -411,11 +399,9 @@ PythonType &PythonType::supportBufferType() buffer_table = new PyBufferProcs; memset( buffer_table, 0, sizeof( PyBufferProcs ) ); // ensure new fields are 0 table->tp_as_buffer = buffer_table; -#if !defined( PY3 ) buffer_table->bf_getreadbuffer = buffer_getreadbuffer_handler; buffer_table->bf_getwritebuffer = buffer_getwritebuffer_handler; buffer_table->bf_getsegcount = buffer_getsegcount_handler; -#endif } return *this; } @@ -434,10 +420,9 @@ PythonType::PythonType( size_t basic_size, int itemsize, const char *default_nam memset( table, 0, sizeof( PyTypeObject ) ); // ensure new fields are 0 *reinterpret_cast( table ) = py_object_initializer; -#if !defined( PY3 ) table->ob_type = _Type_Type(); table->ob_size = 0; -#endif + table->tp_name = const_cast( default_name ); table->tp_basicsize = basic_size; table->tp_itemsize = itemsize; @@ -575,11 +560,13 @@ PythonType &PythonType::dealloc( void( *f )( PyObject * )) return *this; } +#if defined( PYCXX_PYTHON_2TO3 ) PythonType &PythonType::supportPrint() { table->tp_print = print_handler; return *this; } +#endif PythonType &PythonType::supportGetattr() { @@ -605,11 +592,13 @@ PythonType &PythonType::supportSetattro() return *this; } +#if defined( PYCXX_PYTHON_2TO3 ) PythonType &PythonType::supportCompare() { table->tp_compare = compare_handler; return *this; } +#endif #if PY_MAJOR_VERSION > 2 || (PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 1) PythonType &PythonType::supportRichCompare() @@ -669,6 +658,7 @@ PythonExtensionBase *getPythonExtensionBase( PyObject *self ) } +#if defined( PYCXX_PYTHON_2TO3 ) extern "C" int print_handler( PyObject *self, FILE *fp, int flags ) { try @@ -681,6 +671,7 @@ extern "C" int print_handler( PyObject *self, FILE *fp, int flags ) return -1; // indicate error } } +#endif extern "C" PyObject *getattr_handler( PyObject *self, char *name ) { @@ -734,6 +725,7 @@ extern "C" int setattro_handler( PyObject *self, PyObject *name, PyObject *value } } +#if defined( PYCXX_PYTHON_2TO3 ) extern "C" int compare_handler( PyObject *self, PyObject *other ) { try @@ -746,6 +738,7 @@ extern "C" int compare_handler( PyObject *self, PyObject *other ) return -1; // indicate error } } +#endif #if PY_MAJOR_VERSION > 2 || (PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 1) extern "C" PyObject *rich_compare_handler( PyObject *self, PyObject *other, int op ) @@ -1719,11 +1712,7 @@ extern "C" PyObject *method_keyword_call_handler( PyObject *_self_and_name_tuple Tuple self_and_name_tuple( _self_and_name_tuple ); PyObject *self_in_cobject = self_and_name_tuple[0].ptr(); - #if PY_VERSION_HEX < 0x02070000 - void *self_as_void = PyCObject_AsVoidPtr( self_in_cobject ); - #else - void *self_as_void = PyCapsule_GetPointer( self_in_cobject, NULL ); - #endif + void *self_as_void = PyCObject_AsVoidPtr( self_in_cobject ); if( self_as_void == NULL ) return NULL; @@ -1739,11 +1728,7 @@ extern "C" PyObject *method_keyword_call_handler( PyObject *_self_and_name_tuple ( self->invoke_method_keyword ( - #if PY_VERSION_HEX < 0x02070000 - PyCObject_AsVoidPtr( self_and_name_tuple[1].ptr() ), - #else - PyCapsule_GetPointer( self_and_name_tuple[1].ptr(), NULL ), - #endif + PyCObject_AsVoidPtr( self_and_name_tuple[1].ptr() ), args, keywords ) @@ -1759,11 +1744,7 @@ extern "C" PyObject *method_keyword_call_handler( PyObject *_self_and_name_tuple ( self->invoke_method_keyword ( - #if PY_VERSION_HEX < 0x02070000 - PyCObject_AsVoidPtr( self_and_name_tuple[1].ptr() ), - #else - PyCapsule_GetPointer( self_and_name_tuple[1].ptr(), NULL ), - #endif + PyCObject_AsVoidPtr( self_and_name_tuple[1].ptr() ), args, keywords ) @@ -1785,11 +1766,7 @@ extern "C" PyObject *method_varargs_call_handler( PyObject *_self_and_name_tuple Tuple self_and_name_tuple( _self_and_name_tuple ); PyObject *self_in_cobject = self_and_name_tuple[0].ptr(); - #if PY_VERSION_HEX < 0x02070000 - void *self_as_void = PyCObject_AsVoidPtr( self_in_cobject ); - #else - void *self_as_void = PyCapsule_GetPointer( self_in_cobject, NULL ); - #endif + void *self_as_void = PyCObject_AsVoidPtr( self_in_cobject ); if( self_as_void == NULL ) return NULL; @@ -1800,11 +1777,7 @@ extern "C" PyObject *method_varargs_call_handler( PyObject *_self_and_name_tuple ( self->invoke_method_varargs ( - #if PY_VERSION_HEX < 0x02070000 - PyCObject_AsVoidPtr( self_and_name_tuple[1].ptr() ), - #else - PyCapsule_GetPointer( self_and_name_tuple[1].ptr(), NULL ), - #endif + PyCObject_AsVoidPtr( self_and_name_tuple[1].ptr() ), args ) ); diff --git a/CXX/Python3/ExtensionModule.hxx b/CXX/Python3/ExtensionModule.hxx index a892a6f8cde5..75eb77568ae4 100644 --- a/CXX/Python3/ExtensionModule.hxx +++ b/CXX/Python3/ExtensionModule.hxx @@ -135,8 +135,8 @@ namespace Py static PyObject *self = PyCapsule_New( this, NULL, NULL ); Tuple args( 2 ); - args[0] = Object( self ); - args[1] = Object( PyCapsule_New( method_def, NULL, NULL ) ); + args[0] = Object( self, true ); + args[1] = Object( PyCapsule_New( method_def, NULL, NULL ), true ); PyObject *func = PyCFunction_New ( diff --git a/CXX/Python3/IndirectPythonInterface.cxx b/CXX/Python3/IndirectPythonInterface.cxx deleted file mode 100644 index 2b7ff41b75da..000000000000 --- a/CXX/Python3/IndirectPythonInterface.cxx +++ /dev/null @@ -1,518 +0,0 @@ -//----------------------------------------------------------------------------- -// -// Copyright (c) 1998 - 2007, The Regents of the University of California -// Produced at the Lawrence Livermore National Laboratory -// All rights reserved. -// -// This file is part of PyCXX. For details,see http://cxx.sourceforge.net/. The -// full copyright notice is contained in the file COPYRIGHT located at the root -// of the PyCXX distribution. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// - Redistributions of source code must retain the above copyright notice, -// this list of conditions and the disclaimer below. -// - Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the disclaimer (as noted below) in the -// documentation and/or materials provided with the distribution. -// - Neither the name of the UC/LLNL nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -// ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF -// CALIFORNIA, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -// DAMAGE. -// -//----------------------------------------------------------------------------- - -#include "CXX/IndirectPythonInterface.hxx" - -namespace Py -{ -bool _CFunction_Check( PyObject *op ) { return op->ob_type == _CFunction_Type(); } -bool _Complex_Check( PyObject *op ) { return op->ob_type == _Complex_Type(); } -bool _Dict_Check( PyObject *op ) { return op->ob_type == _Dict_Type(); } -bool _Float_Check( PyObject *op ) { return op->ob_type == _Float_Type(); } -bool _Function_Check( PyObject *op ) { return op->ob_type == _Function_Type(); } -bool _Boolean_Check( PyObject *op ) { return op->ob_type == _Bool_Type(); } -bool _List_Check( PyObject *op ) { return op->ob_type == _List_Type(); } -bool _Long_Check( PyObject *op ) { return op->ob_type == _Long_Type(); } -bool _Method_Check( PyObject *op ) { return op->ob_type == _Method_Type(); } -bool _Module_Check( PyObject *op ) { return op->ob_type == _Module_Type(); } -bool _Range_Check( PyObject *op ) { return op->ob_type == _Range_Type(); } -bool _Slice_Check( PyObject *op ) { return op->ob_type == _Slice_Type(); } -bool _TraceBack_Check( PyObject *op ) { return op->ob_type == _TraceBack_Type(); } -bool _Tuple_Check( PyObject *op ) { return op->ob_type == _Tuple_Type(); } -bool _Type_Check( PyObject *op ) { return op->ob_type == _Type_Type(); } -bool _Unicode_Check( PyObject *op ) { return op->ob_type == _Unicode_Type(); } -bool _Bytes_Check( PyObject *op ) { return op->ob_type == _Bytes_Type(); } - -#if defined(PY_WIN32_DELAYLOAD_PYTHON_DLL) - -#if defined(MS_WINDOWS) -#include - - -static HMODULE python_dll; - -static PyObject *ptr__Exc_ArithmeticError = NULL; -static PyObject *ptr__Exc_AssertionError = NULL; -static PyObject *ptr__Exc_AttributeError = NULL; -static PyObject *ptr__Exc_EnvironmentError = NULL; -static PyObject *ptr__Exc_EOFError = NULL; -static PyObject *ptr__Exc_Exception = NULL; -static PyObject *ptr__Exc_FloatingPointError = NULL; -static PyObject *ptr__Exc_ImportError = NULL; -static PyObject *ptr__Exc_IndexError = NULL; -static PyObject *ptr__Exc_IOError = NULL; -static PyObject *ptr__Exc_KeyboardInterrupt = NULL; -static PyObject *ptr__Exc_KeyError = NULL; -static PyObject *ptr__Exc_LookupError = NULL; -static PyObject *ptr__Exc_MemoryError = NULL; -static PyObject *ptr__Exc_NameError = NULL; -static PyObject *ptr__Exc_NotImplementedError = NULL; -static PyObject *ptr__Exc_OSError = NULL; -static PyObject *ptr__Exc_OverflowError = NULL; -static PyObject *ptr__Exc_RuntimeError = NULL; -static PyObject *ptr__Exc_StandardError = NULL; -static PyObject *ptr__Exc_SyntaxError = NULL; -static PyObject *ptr__Exc_SystemError = NULL; -static PyObject *ptr__Exc_SystemExit = NULL; -static PyObject *ptr__Exc_TypeError = NULL; -static PyObject *ptr__Exc_ValueError = NULL; -static PyObject *ptr__Exc_ZeroDivisionError = NULL; - -#ifdef MS_WINDOWS -static PyObject *ptr__Exc_WindowsError = NULL; -#endif - -static PyObject *ptr__Exc_IndentationError = NULL; -static PyObject *ptr__Exc_TabError = NULL; -static PyObject *ptr__Exc_UnboundLocalError = NULL; -static PyObject *ptr__Exc_UnicodeError = NULL; -static PyObject *ptr__PyNone = NULL; -static PyObject *ptr__PyFalse = NULL; -static PyObject *ptr__PyTrue = NULL; -static PyTypeObject *ptr__CFunction_Type = NULL; -static PyTypeObject *ptr__Complex_Type = NULL; -static PyTypeObject *ptr__Dict_Type = NULL; -static PyTypeObject *ptr__Float_Type = NULL; -static PyTypeObject *ptr__Function_Type = NULL; -static PyTypeObject *ptr__List_Type = NULL; -static PyTypeObject *ptr__Long_Type = NULL; -static PyTypeObject *ptr__Method_Type = NULL; -static PyTypeObject *ptr__Module_Type = NULL; -static PyTypeObject *ptr__Range_Type = NULL; -static PyTypeObject *ptr__Slice_Type = NULL; -static PyTypeObject *ptr__TraceBack_Type = NULL; -static PyTypeObject *ptr__Tuple_Type = NULL; -static PyTypeObject *ptr__Type_Type = NULL; - -static int *ptr_Py_DebugFlag = NULL; -static int *ptr_Py_InteractiveFlag = NULL; -static int *ptr_Py_OptimizeFlag = NULL; -static int *ptr_Py_NoSiteFlag = NULL; -static int *ptr_Py_VerboseFlag = NULL; - -static char **ptr__Py_PackageContext = NULL; - -#ifdef Py_REF_DEBUG -int *ptr_Py_RefTotal; -#endif - - -//-------------------------------------------------------------------------------- -class GetAddressException -{ -public: - GetAddressException( const char *_name ) - : name( _name ) - {} - virtual ~GetAddressException() {} - const char *name; -}; - - -//-------------------------------------------------------------------------------- -static PyObject *GetPyObjectPointer_As_PyObjectPointer( const char *name ) -{ - FARPROC addr = GetProcAddress( python_dll, name ); - if( addr == NULL ) - throw GetAddressException( name ); - - return *(PyObject **)addr; -} - -static PyObject *GetPyObject_As_PyObjectPointer( const char *name ) -{ - FARPROC addr = GetProcAddress( python_dll, name ); - if( addr == NULL ) - throw GetAddressException( name ); - - return (PyObject *)addr; -} - -static PyTypeObject *GetPyTypeObjectPointer_As_PyTypeObjectPointer( const char *name ) -{ - FARPROC addr = GetProcAddress( python_dll, name ); - if( addr == NULL ) - throw GetAddressException( name ); - - return *(PyTypeObject **)addr; -} - -static PyTypeObject *GetPyTypeObject_As_PyTypeObjectPointer( const char *name ) -{ - FARPROC addr = GetProcAddress( python_dll, name ); - if( addr == NULL ) - throw GetAddressException( name ); - - return (PyTypeObject *)addr; -} - -static int *GetInt_as_IntPointer( const char *name ) -{ - FARPROC addr = GetProcAddress( python_dll, name ); - if( addr == NULL ) - throw GetAddressException( name ); - - return (int *)addr; -} - -static char **GetCharPointer_as_CharPointerPointer( const char *name ) -{ - FARPROC addr = GetProcAddress( python_dll, name ); - if( addr == NULL ) - throw GetAddressException( name ); - - return (char **)addr; -} - - -#ifdef _DEBUG -static const char python_dll_name_format[] = "PYTHON%1.1d%1.1d_D.DLL"; -#else -static const char python_dll_name_format[] = "PYTHON%1.1d%1.1d.DLL"; -#endif - -//-------------------------------------------------------------------------------- -bool InitialisePythonIndirectInterface() -{ - char python_dll_name[sizeof(python_dll_name_format)]; - - sprintf( python_dll_name, python_dll_name_format, PY_MAJOR_VERSION, PY_MINOR_VERSION ); - - python_dll = LoadLibrary( python_dll_name ); - if( python_dll == NULL ) - return false; - - try - { -#ifdef Py_REF_DEBUG - ptr_Py_RefTotal = GetInt_as_IntPointer( "_Py_RefTotal" ); -#endif - ptr_Py_DebugFlag = GetInt_as_IntPointer( "Py_DebugFlag" ); - ptr_Py_InteractiveFlag = GetInt_as_IntPointer( "Py_InteractiveFlag" ); - ptr_Py_OptimizeFlag = GetInt_as_IntPointer( "Py_OptimizeFlag" ); - ptr_Py_NoSiteFlag = GetInt_as_IntPointer( "Py_NoSiteFlag" ); - ptr_Py_VerboseFlag = GetInt_as_IntPointer( "Py_VerboseFlag" ); - ptr__Py_PackageContext = GetCharPointer_as_CharPointerPointer( "_Py_PackageContext" ); - - ptr__Exc_ArithmeticError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_ArithmeticError" ); - ptr__Exc_AssertionError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_AssertionError" ); - ptr__Exc_AttributeError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_AttributeError" ); - ptr__Exc_EnvironmentError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_EnvironmentError" ); - ptr__Exc_EOFError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_EOFError" ); - ptr__Exc_Exception = GetPyObjectPointer_As_PyObjectPointer( "PyExc_Exception" ); - ptr__Exc_FloatingPointError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_FloatingPointError" ); - ptr__Exc_ImportError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_ImportError" ); - ptr__Exc_IndexError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_IndexError" ); - ptr__Exc_IOError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_IOError" ); - ptr__Exc_KeyboardInterrupt = GetPyObjectPointer_As_PyObjectPointer( "PyExc_KeyboardInterrupt" ); - ptr__Exc_KeyError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_KeyError" ); - ptr__Exc_LookupError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_LookupError" ); - ptr__Exc_MemoryError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_MemoryError" ); - ptr__Exc_NameError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_NameError" ); - ptr__Exc_NotImplementedError= GetPyObjectPointer_As_PyObjectPointer( "PyExc_NotImplementedError" ); - ptr__Exc_OSError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_OSError" ); - ptr__Exc_OverflowError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_OverflowError" ); - ptr__Exc_RuntimeError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_RuntimeError" ); - ptr__Exc_StandardError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_StandardError" ); - ptr__Exc_SyntaxError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_SyntaxError" ); - ptr__Exc_SystemError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_SystemError" ); - ptr__Exc_SystemExit = GetPyObjectPointer_As_PyObjectPointer( "PyExc_SystemExit" ); - ptr__Exc_TypeError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_TypeError" ); - ptr__Exc_ValueError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_ValueError" ); -#ifdef MS_WINDOWS - ptr__Exc_WindowsError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_WindowsError" ); -#endif - ptr__Exc_ZeroDivisionError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_ZeroDivisionError" ); - ptr__Exc_IndentationError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_IndentationError" ); - ptr__Exc_TabError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_TabError" ); - ptr__Exc_UnboundLocalError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_UnboundLocalError" ); - ptr__Exc_UnicodeError = GetPyObjectPointer_As_PyObjectPointer( "PyExc_UnicodeError" ); - ptr__PyNone = GetPyObject_As_PyObjectPointer( "_Py_NoneStruct" ); - - ptr__PyFalse = GetPyObject_As_PyObjectPointer( "_Py_ZeroStruct" ); - ptr__PyTrue = GetPyObject_As_PyObjectPointer( "_Py_TrueStruct" ); - - ptr__CFunction_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyCFunction_Type" ); - ptr__Complex_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyComplex_Type" ); - ptr__Dict_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyDict_Type" ); - ptr__Float_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyFloat_Type" ); - ptr__Function_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyFunction_Type" ); - ptr__List_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyList_Type" ); - ptr__Long_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyLong_Type" ); - ptr__Method_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyMethod_Type" ); - ptr__Module_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyModule_Type" ); - ptr__Range_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyRange_Type" ); - ptr__Slice_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PySlice_Type" ); - ptr__TraceBack_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyTraceBack_Type" ); - ptr__Tuple_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyTuple_Type" ); - ptr__Type_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyType_Type" ); - ptr__Unicode_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyUnicode_Type" ); - ptr__Bytes_Type = GetPyTypeObject_As_PyTypeObjectPointer( "PyBytes_Type" ); - } - catch( GetAddressException &e ) - { - OutputDebugString( python_dll_name ); - OutputDebugString( " does not contain symbol "); - OutputDebugString( e.name ); - OutputDebugString( "\n" ); - - return false; - } - - return true; -} - -// -// Wrap variables as function calls -// -PyObject *_Exc_ArithmeticError() { return ptr__Exc_ArithmeticError; } -PyObject *_Exc_AssertionError() { return ptr__Exc_AssertionError; } -PyObject *_Exc_AttributeError() { return ptr__Exc_AttributeError; } -PyObject *_Exc_EnvironmentError() { return ptr__Exc_EnvironmentError; } -PyObject *_Exc_EOFError() { return ptr__Exc_EOFError; } -PyObject *_Exc_Exception() { return ptr__Exc_Exception; } -PyObject *_Exc_FloatingPointError() { return ptr__Exc_FloatingPointError; } -PyObject *_Exc_ImportError() { return ptr__Exc_ImportError; } -PyObject *_Exc_IndexError() { return ptr__Exc_IndexError; } -PyObject *_Exc_IOError() { return ptr__Exc_IOError; } -PyObject *_Exc_KeyboardInterrupt() { return ptr__Exc_KeyboardInterrupt; } -PyObject *_Exc_KeyError() { return ptr__Exc_KeyError; } -PyObject *_Exc_LookupError() { return ptr__Exc_LookupError; } -PyObject *_Exc_MemoryError() { return ptr__Exc_MemoryError; } -PyObject *_Exc_NameError() { return ptr__Exc_NameError; } -PyObject *_Exc_NotImplementedError() { return ptr__Exc_NotImplementedError; } -PyObject *_Exc_OSError() { return ptr__Exc_OSError; } -PyObject *_Exc_OverflowError() { return ptr__Exc_OverflowError; } -PyObject *_Exc_RuntimeError() { return ptr__Exc_RuntimeError; } -PyObject *_Exc_StandardError() { return ptr__Exc_StandardError; } -PyObject *_Exc_SyntaxError() { return ptr__Exc_SyntaxError; } -PyObject *_Exc_SystemError() { return ptr__Exc_SystemError; } -PyObject *_Exc_SystemExit() { return ptr__Exc_SystemExit; } -PyObject *_Exc_TypeError() { return ptr__Exc_TypeError; } -PyObject *_Exc_ValueError() { return ptr__Exc_ValueError; } -#ifdef MS_WINDOWS -PyObject *_Exc_WindowsError() { return ptr__Exc_WindowsError; } -#endif -PyObject *_Exc_ZeroDivisionError() { return ptr__Exc_ZeroDivisionError; } -PyObject *_Exc_IndentationError() { return ptr__Exc_IndentationError; } -PyObject *_Exc_TabError() { return ptr__Exc_TabError; } -PyObject *_Exc_UnboundLocalError() { return ptr__Exc_UnboundLocalError; } -PyObject *_Exc_UnicodeError() { return ptr__Exc_UnicodeError; } - -// -// wrap items in Object.h -// -PyObject *_None() { return ptr__PyNone; } - -PyObject *_False() { return ptr__PyFalse; } -PyObject *_True() { return ptr__PyTrue; } - -PyTypeObject *_CFunction_Type() { return ptr__CFunction_Type; } -PyTypeObject *_Complex_Type() { return ptr__Complex_Type; } -PyTypeObject *_Dict_Type() { return ptr__Dict_Type; } -PyTypeObject *_Float_Type() { return ptr__Float_Type; } -PyTypeObject *_Function_Type() { return ptr__Function_Type; } -PyTypeObject *_Bool_Type() { return ptr__Bool_Type; } -PyTypeObject *_List_Type() { return ptr__List_Type; } -PyTypeObject *_Long_Type() { return ptr__Long_Type; } -PyTypeObject *_Method_Type() { return ptr__Method_Type; } -PyTypeObject *_Module_Type() { return ptr__Module_Type; } -PyTypeObject *_Range_Type() { return ptr__Range_Type; } -PyTypeObject *_Slice_Type() { return ptr__Slice_Type; } -PyTypeObject *_TraceBack_Type() { return ptr__TraceBack_Type; } -PyTypeObject *_Tuple_Type() { return ptr__Tuple_Type; } -PyTypeObject *_Type_Type() { return ptr__Type_Type; } -PyTypeObject *_Unicode_Type() { return ptr__Unicode_Type; } -PyTypeObject *_Bytes_Type() { return ptr__Bytes_Type; } - -char *__Py_PackageContext() { return *ptr__Py_PackageContext; } - - -// -// wrap the Python Flag variables -// -int &_Py_DebugFlag() { return *ptr_Py_DebugFlag; } -int &_Py_InteractiveFlag() { return *ptr_Py_InteractiveFlag; } -int &_Py_OptimizeFlag() { return *ptr_Py_OptimizeFlag; } -int &_Py_NoSiteFlag() { return *ptr_Py_NoSiteFlag; } -int &_Py_VerboseFlag() { return *ptr_Py_VerboseFlag; } - -#if 0 -#define Py_INCREF(op) ( \ - _Py_INC_REFTOTAL _Py_REF_DEBUG_COMMA \ - ((PyObject*)(op))->ob_refcnt++) - -#define Py_DECREF(op) \ - if (_Py_DEC_REFTOTAL _Py_REF_DEBUG_COMMA \ - --((PyObject*)(op))->ob_refcnt != 0) \ - _Py_CHECK_REFCNT(op) \ - else \ - _Py_Dealloc((PyObject *)(op)) -#endif - -void _XINCREF( PyObject *op ) -{ - // This function must match the contents of Py_XINCREF(op) - if( op == NULL ) - return; - -#ifdef Py_REF_DEBUG - (*ptr_Py_RefTotal)++; -#endif - (op)->ob_refcnt++; - -} - -void _XDECREF( PyObject *op ) -{ - // This function must match the contents of Py_XDECREF(op); - if( op == NULL ) - return; - -#ifdef Py_REF_DEBUG - (*ptr_Py_RefTotal)--; -#endif - - if (--(op)->ob_refcnt == 0) - _Py_Dealloc((PyObject *)(op)); -} - - -#else -#error "Can only delay load under Win32" -#endif - -#else - -//================================================================================ -// -// Map onto Macros -// -//================================================================================ - -// -// Wrap variables as function calls -// - -PyObject *_Exc_ArithmeticError() { return ::PyExc_ArithmeticError; } -PyObject *_Exc_AssertionError() { return ::PyExc_AssertionError; } -PyObject *_Exc_AttributeError() { return ::PyExc_AttributeError; } -PyObject *_Exc_EnvironmentError() { return ::PyExc_EnvironmentError; } -PyObject *_Exc_EOFError() { return ::PyExc_EOFError; } -PyObject *_Exc_Exception() { return ::PyExc_Exception; } -PyObject *_Exc_FloatingPointError() { return ::PyExc_FloatingPointError; } -PyObject *_Exc_ImportError() { return ::PyExc_ImportError; } -PyObject *_Exc_IndexError() { return ::PyExc_IndexError; } -PyObject *_Exc_IOError() { return ::PyExc_IOError; } -PyObject *_Exc_KeyboardInterrupt() { return ::PyExc_KeyboardInterrupt; } -PyObject *_Exc_KeyError() { return ::PyExc_KeyError; } -PyObject *_Exc_LookupError() { return ::PyExc_LookupError; } -PyObject *_Exc_MemoryError() { return ::PyExc_MemoryError; } -PyObject *_Exc_NameError() { return ::PyExc_NameError; } -PyObject *_Exc_NotImplementedError() { return ::PyExc_NotImplementedError; } -PyObject *_Exc_OSError() { return ::PyExc_OSError; } -PyObject *_Exc_OverflowError() { return ::PyExc_OverflowError; } -PyObject *_Exc_RuntimeError() { return ::PyExc_RuntimeError; } -PyObject *_Exc_SyntaxError() { return ::PyExc_SyntaxError; } -PyObject *_Exc_SystemError() { return ::PyExc_SystemError; } -PyObject *_Exc_SystemExit() { return ::PyExc_SystemExit; } -PyObject *_Exc_TypeError() { return ::PyExc_TypeError; } -PyObject *_Exc_ValueError() { return ::PyExc_ValueError; } -PyObject *_Exc_ZeroDivisionError() { return ::PyExc_ZeroDivisionError; } -PyObject *_Exc_IndentationError() { return ::PyExc_IndentationError; } -PyObject *_Exc_TabError() { return ::PyExc_TabError; } -PyObject *_Exc_UnboundLocalError() { return ::PyExc_UnboundLocalError; } -PyObject *_Exc_UnicodeError() { return ::PyExc_UnicodeError; } - -#ifdef MS_WINDOWS -PyObject *_Exc_WindowsError() { return ::PyExc_WindowsError; } -#endif - - - - -// -// wrap items in Object.h -// -PyObject *_None() { return &::_Py_NoneStruct; } - -PyObject *_False() { return Py_False; } -PyObject *_True() { return Py_True; } - -PyTypeObject *_CFunction_Type() { return &PyCFunction_Type; } -PyTypeObject *_Complex_Type() { return &PyComplex_Type; } -PyTypeObject *_Dict_Type() { return &PyDict_Type; } -PyTypeObject *_Float_Type() { return &PyFloat_Type; } -PyTypeObject *_Function_Type() { return &PyFunction_Type; } -PyTypeObject *_Bool_Type() { return &PyBool_Type; } -PyTypeObject *_List_Type() { return &PyList_Type; } -PyTypeObject *_Long_Type() { return &PyLong_Type; } -PyTypeObject *_Method_Type() { return &PyMethod_Type; } -PyTypeObject *_Module_Type() { return &PyModule_Type; } -PyTypeObject *_Range_Type() { return &PyRange_Type; } -PyTypeObject *_Slice_Type() { return &PySlice_Type; } -PyTypeObject *_TraceBack_Type() { return &PyTraceBack_Type; } -PyTypeObject *_Tuple_Type() { return &PyTuple_Type; } -PyTypeObject *_Type_Type() { return &PyType_Type; } -PyTypeObject *_Unicode_Type() { return &PyUnicode_Type; } -PyTypeObject *_Bytes_Type() { return &PyBytes_Type; } - -// -// wrap flags -// -int &_Py_DebugFlag() { return Py_DebugFlag; } -int &_Py_InteractiveFlag() { return Py_InteractiveFlag; } -int &_Py_OptimizeFlag() { return Py_OptimizeFlag; } -int &_Py_NoSiteFlag() { return Py_NoSiteFlag; } -int &_Py_VerboseFlag() { return Py_VerboseFlag; } -char *__Py_PackageContext() { return _Py_PackageContext; } - -// -// Needed to keep the abstactions for delayload interface -// -void _XINCREF( PyObject *op ) -{ - Py_XINCREF( op ); -} - -void _XDECREF( PyObject *op ) -{ - Py_XDECREF( op ); -} - -#endif -} diff --git a/CXX/Python3/Objects.hxx b/CXX/Python3/Objects.hxx index 2847512f065a..927618f0ba06 100644 --- a/CXX/Python3/Objects.hxx +++ b/CXX/Python3/Objects.hxx @@ -413,7 +413,7 @@ namespace Py { } - virtual bool accepts( PyObject *pyob ) const + virtual bool accepts( PyObject *pyob ) { return pyob == NULL; } @@ -1328,7 +1328,6 @@ namespace Py } // Assignment acquires new ownership of pointer - SeqBase &operator=( const Object &rhs ) { return *this = *rhs; @@ -1520,7 +1519,7 @@ namespace Py int operator-( const iterator &other ) const { - if( seq->ptr() != other.seq->ptr() ) + if( seq->ptr() != other.seq->ptr() ) throw RuntimeError( "SeqBase::iterator comparison error" ); return count - other.count; @@ -2192,8 +2191,8 @@ namespace Py } } } - // Assignment acquires new ownership of pointer + // Assignment acquires new ownership of pointer Tuple &operator=( const Object &rhs ) { return *this = *rhs; @@ -2380,8 +2379,8 @@ namespace Py { return max_size(); } - // Assignment acquires new ownership of pointer + // Assignment acquires new ownership of pointer List &operator=( const Object &rhs ) { return *this = *rhs; @@ -3074,8 +3073,8 @@ namespace Py set( PyDict_New(), true ); validate(); } - // Assignment acquires new ownership of pointer + // Assignment acquires new ownership of pointer Dict &operator=( const Object &rhs ) { return *this = *rhs; diff --git a/CXX/Python3/cxx_extensions.cxx b/CXX/Python3/cxx_extensions.cxx index 59874ad95576..8f2a4886c251 100644 --- a/CXX/Python3/cxx_extensions.cxx +++ b/CXX/Python3/cxx_extensions.cxx @@ -366,7 +366,7 @@ PythonType::PythonType( size_t basic_size, int itemsize, const char *default_nam memset( table, 0, sizeof( PyTypeObject ) ); // ensure new fields are 0 *reinterpret_cast( table ) = py_object_initializer; - // QQQ table->ob_type = _Type_Type(); + reinterpret_cast( table )->ob_type = _Type_Type(); // QQQ table->ob_size = 0; table->tp_name = const_cast( default_name ); table->tp_basicsize = basic_size; @@ -440,10 +440,10 @@ PythonType::PythonType( size_t basic_size, int itemsize, const char *default_nam table->tp_version_tag = 0; #ifdef COUNT_ALLOCS - table->tp_allocs = 0; - table->tp_frees = 0; + table->tp_alloc = 0; + table->tp_free = 0; table->tp_maxalloc = 0; - table->tp_prev = 0; + table->tp_orev = 0; table->tp_next = 0; #endif } @@ -1602,9 +1602,6 @@ extern "C" PyObject *method_keyword_call_handler( PyObject *_self_and_name_tuple } } -extern "C" void do_not_dealloc( void * ) -{} - //-------------------------------------------------------------------------------- // diff --git a/CXX/WrapPython.h b/CXX/WrapPython.h index 118a8740f8ad..6a73545d2d3d 100644 --- a/CXX/WrapPython.h +++ b/CXX/WrapPython.h @@ -57,4 +57,15 @@ // pull in python definitions #include +// fix issue with Python assuming that isspace, toupper etc are macros +#if defined(isspace) +#undef isspace +#undef isupper +#undef islower +#undef isalnum +#undef isalpha +#undef toupper +#undef tolower +#endif + #endif diff --git a/INSTALL b/INSTALL index fa03255b5844..e43b117ebd0a 100644 --- a/INSTALL +++ b/INSTALL @@ -196,6 +196,16 @@ libpng 1.2 (or later) user, you can ignore this since we build support into the matplotlib single click installer. +:term:`dateutil` 1.1 or later + Provides extensions to python datetime handling. If using pip, + easy_install or installing from source, the installer will attempt + to download and install `python_dateutil` from PyPI. + +:term:`pyparsing` + Required for matplotlib's mathtext math rendering support. If + using pip, easy_install or installing from source, the installer + will attempt to download and install `pyparsing` from PyPI. + **Optional** These are optional packages which you may want to install to use @@ -230,17 +240,8 @@ backends and the capabilities they provide. agg template source statically, so it will not affect anything on your system outside of matplotlib. -:term:`pytz` 2007g or later - timezone handling for python datetime objects. By default, - matplotlib will install pytz if it isn't already installed on your - system. To override the default, use :file:`setup.cfg` to force or - prevent installation of pytz. - -:term:`dateutil` 1.1 or later - provides extensions to python datetime handling. By default, matplotlib - will install dateutil if it isn't already installed on your - system. To override the default, use :file:`setup.cfg` to force - or prevent installation of dateutil. +:term:`PyCXX` 6.2.4 + A library for writing Python extensions in C++. .. _build_osx: diff --git a/MANIFEST.in b/MANIFEST.in index 9838df35c44b..0ee8e71c2677 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,20 +1,16 @@ -include CHANGELOG KNOWN_BUGS INSTALL +include CHANGELOG INSTALL include INTERACTIVE TODO CONTRIBUTING.md -include Makefile make.osx MANIFEST.in MANIFEST +include Makefile MANIFEST.in MANIFEST include matplotlibrc.template setup.cfg.template -include __init__.py setupext.py setup.py setupegg.py -include examples/data/* -include lib/mpl_toolkits -include lib/matplotlib/mpl-data/matplotlib.conf -include lib/matplotlib/mpl-data/matplotlib.conf.template +include setupext.py setup.py setupegg.py include lib/matplotlib/mpl-data/lineprops.glade include lib/matplotlib/mpl-data/matplotlibrc include lib/matplotlib/mpl-data/images/* include lib/matplotlib/mpl-data/fonts/ttf/* include lib/matplotlib/mpl-data/fonts/pdfcorefonts/* include lib/matplotlib/mpl-data/fonts/afm/* -recursive-include lib/matplotlib/mpl-data/sample_data/* -recursive-include license LICENSE* +recursive-include lib/matplotlib/mpl-data/sample_data/ * +recursive-include LICENSE * recursive-include examples * recursive-include doc * recursive-include src *.cpp *.c *.h *.m diff --git a/agg24/include/agg_rasterizer_cells_aa.h b/agg24/include/agg_rasterizer_cells_aa.h index c8f2cb80d49d..2be0efeca175 100755 --- a/agg24/include/agg_rasterizer_cells_aa.h +++ b/agg24/include/agg_rasterizer_cells_aa.h @@ -29,8 +29,7 @@ #ifndef AGG_RASTERIZER_CELLS_AA_INCLUDED #define AGG_RASTERIZER_CELLS_AA_INCLUDED -#include "CXX/Exception.hxx" -#include +#include #include #include #include "agg_math.h" @@ -183,9 +182,9 @@ namespace agg { if((m_num_cells & cell_block_mask) == 0) { - if(m_num_blocks >= cell_block_limit) { - throw Py::OverflowError( - "Agg rendering complexity exceeded. Consider downsampling or decimating your data."); + if (m_num_blocks >= cell_block_limit) + { + throw std::overflow_error("Allocated too many blocks"); } allocate_block(); } diff --git a/distribute_setup.py b/distribute_setup.py new file mode 100755 index 000000000000..8f5b0637bf39 --- /dev/null +++ b/distribute_setup.py @@ -0,0 +1,515 @@ +#!python +"""Bootstrap distribute installation + +If you want to use setuptools in your package's setup.py, just include this +file in the same directory with it, and add this to the top of your setup.py:: + + from distribute_setup import use_setuptools + use_setuptools() + +If you want to require a specific version of setuptools, set a download +mirror, or use an alternate download directory, you can do so by supplying +the appropriate options to ``use_setuptools()``. + +This file can also be run as a script to install or upgrade setuptools. +""" +import os +import sys +import time +import fnmatch +import tempfile +import tarfile +from distutils import log + +try: + from site import USER_SITE +except ImportError: + USER_SITE = None + +try: + import subprocess + + def _python_cmd(*args): + args = (sys.executable,) + args + return subprocess.call(args) == 0 + +except ImportError: + # will be used for python 2.3 + def _python_cmd(*args): + args = (sys.executable,) + args + # quoting arguments if windows + if sys.platform == 'win32': + def quote(arg): + if ' ' in arg: + return '"%s"' % arg + return arg + args = [quote(arg) for arg in args] + return os.spawnl(os.P_WAIT, sys.executable, *args) == 0 + +DEFAULT_VERSION = "0.6.28" +DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/" +SETUPTOOLS_FAKED_VERSION = "0.6c11" + +SETUPTOOLS_PKG_INFO = """\ +Metadata-Version: 1.0 +Name: setuptools +Version: %s +Summary: xxxx +Home-page: xxx +Author: xxx +Author-email: xxx +License: xxx +Description: xxx +""" % SETUPTOOLS_FAKED_VERSION + + +def _install(tarball, install_args=()): + # extracting the tarball + tmpdir = tempfile.mkdtemp() + log.warn('Extracting in %s', tmpdir) + old_wd = os.getcwd() + try: + os.chdir(tmpdir) + tar = tarfile.open(tarball) + _extractall(tar) + tar.close() + + # going in the directory + subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) + os.chdir(subdir) + log.warn('Now working in %s', subdir) + + # installing + log.warn('Installing Distribute') + if not _python_cmd('setup.py', 'install', *install_args): + log.warn('Something went wrong during the installation.') + log.warn('See the error message above.') + finally: + os.chdir(old_wd) + + +def _build_egg(egg, tarball, to_dir): + # extracting the tarball + tmpdir = tempfile.mkdtemp() + log.warn('Extracting in %s', tmpdir) + old_wd = os.getcwd() + try: + os.chdir(tmpdir) + tar = tarfile.open(tarball) + _extractall(tar) + tar.close() + + # going in the directory + subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) + os.chdir(subdir) + log.warn('Now working in %s', subdir) + + # building an egg + log.warn('Building a Distribute egg in %s', to_dir) + _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) + + finally: + os.chdir(old_wd) + # returning the result + log.warn(egg) + if not os.path.exists(egg): + raise IOError('Could not build the egg.') + + +def _do_download(version, download_base, to_dir, download_delay): + egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg' + % (version, sys.version_info[0], sys.version_info[1])) + if not os.path.exists(egg): + tarball = download_setuptools(version, download_base, + to_dir, download_delay) + _build_egg(egg, tarball, to_dir) + sys.path.insert(0, egg) + import setuptools + setuptools.bootstrap_install_from = egg + + +def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, + to_dir=os.curdir, download_delay=15, no_fake=True): + # making sure we use the absolute path + to_dir = os.path.abspath(to_dir) + was_imported = 'pkg_resources' in sys.modules or \ + 'setuptools' in sys.modules + try: + try: + import pkg_resources + if not hasattr(pkg_resources, '_distribute'): + if not no_fake: + _fake_setuptools() + raise ImportError + except ImportError: + return _do_download(version, download_base, to_dir, download_delay) + try: + pkg_resources.require("distribute>=" + version) + return + except pkg_resources.VersionConflict: + e = sys.exc_info()[1] + if was_imported: + sys.stderr.write( + "The required version of distribute (>=%s) is not available,\n" + "and can't be installed while this script is running. Please\n" + "install a more recent version first, using\n" + "'easy_install -U distribute'." + "\n\n(Currently using %r)\n" % (version, e.args[0])) + sys.exit(2) + else: + del pkg_resources, sys.modules['pkg_resources'] # reload ok + return _do_download(version, download_base, to_dir, + download_delay) + except pkg_resources.DistributionNotFound: + return _do_download(version, download_base, to_dir, + download_delay) + finally: + if not no_fake: + _create_fake_setuptools_pkg_info(to_dir) + + +def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, + to_dir=os.curdir, delay=15): + """Download distribute from a specified location and return its filename + + `version` should be a valid distribute version number that is available + as an egg for download under the `download_base` URL (which should end + with a '/'). `to_dir` is the directory where the egg will be downloaded. + `delay` is the number of seconds to pause before an actual download + attempt. + """ + # making sure we use the absolute path + to_dir = os.path.abspath(to_dir) + try: + from urllib.request import urlopen + except ImportError: + from urllib2 import urlopen + tgz_name = "distribute-%s.tar.gz" % version + url = download_base + tgz_name + saveto = os.path.join(to_dir, tgz_name) + src = dst = None + if not os.path.exists(saveto): # Avoid repeated downloads + try: + log.warn("Downloading %s", url) + src = urlopen(url) + # Read/write all in one block, so we don't create a corrupt file + # if the download is interrupted. + data = src.read() + dst = open(saveto, "wb") + dst.write(data) + finally: + if src: + src.close() + if dst: + dst.close() + return os.path.realpath(saveto) + + +def _no_sandbox(function): + def __no_sandbox(*args, **kw): + try: + from setuptools.sandbox import DirectorySandbox + if not hasattr(DirectorySandbox, '_old'): + def violation(*args): + pass + DirectorySandbox._old = DirectorySandbox._violation + DirectorySandbox._violation = violation + patched = True + else: + patched = False + except ImportError: + patched = False + + try: + return function(*args, **kw) + finally: + if patched: + DirectorySandbox._violation = DirectorySandbox._old + del DirectorySandbox._old + + return __no_sandbox + + +def _patch_file(path, content): + """Will backup the file then patch it""" + existing_content = open(path).read() + if existing_content == content: + # already patched + log.warn('Already patched.') + return False + log.warn('Patching...') + _rename_path(path) + f = open(path, 'w') + try: + f.write(content) + finally: + f.close() + return True + +_patch_file = _no_sandbox(_patch_file) + + +def _same_content(path, content): + return open(path).read() == content + + +def _rename_path(path): + new_name = path + '.OLD.%s' % time.time() + log.warn('Renaming %s into %s', path, new_name) + os.rename(path, new_name) + return new_name + + +def _remove_flat_installation(placeholder): + if not os.path.isdir(placeholder): + log.warn('Unkown installation at %s', placeholder) + return False + found = False + for file in os.listdir(placeholder): + if fnmatch.fnmatch(file, 'setuptools*.egg-info'): + found = True + break + if not found: + log.warn('Could not locate setuptools*.egg-info') + return + + log.warn('Removing elements out of the way...') + pkg_info = os.path.join(placeholder, file) + if os.path.isdir(pkg_info): + patched = _patch_egg_dir(pkg_info) + else: + patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO) + + if not patched: + log.warn('%s already patched.', pkg_info) + return False + # now let's move the files out of the way + for element in ('setuptools', 'pkg_resources.py', 'site.py'): + element = os.path.join(placeholder, element) + if os.path.exists(element): + _rename_path(element) + else: + log.warn('Could not find the %s element of the ' + 'Setuptools distribution', element) + return True + +_remove_flat_installation = _no_sandbox(_remove_flat_installation) + + +def _after_install(dist): + log.warn('After install bootstrap.') + placeholder = dist.get_command_obj('install').install_purelib + _create_fake_setuptools_pkg_info(placeholder) + + +def _create_fake_setuptools_pkg_info(placeholder): + if not placeholder or not os.path.exists(placeholder): + log.warn('Could not find the install location') + return + pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1]) + setuptools_file = 'setuptools-%s-py%s.egg-info' % \ + (SETUPTOOLS_FAKED_VERSION, pyver) + pkg_info = os.path.join(placeholder, setuptools_file) + if os.path.exists(pkg_info): + log.warn('%s already exists', pkg_info) + return + + if not os.access(pkg_info, os.W_OK): + log.warn("Don't have permissions to write %s, skipping", pkg_info) + + log.warn('Creating %s', pkg_info) + f = open(pkg_info, 'w') + try: + f.write(SETUPTOOLS_PKG_INFO) + finally: + f.close() + + pth_file = os.path.join(placeholder, 'setuptools.pth') + log.warn('Creating %s', pth_file) + f = open(pth_file, 'w') + try: + f.write(os.path.join(os.curdir, setuptools_file)) + finally: + f.close() + +_create_fake_setuptools_pkg_info = _no_sandbox( + _create_fake_setuptools_pkg_info +) + + +def _patch_egg_dir(path): + # let's check if it's already patched + pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO') + if os.path.exists(pkg_info): + if _same_content(pkg_info, SETUPTOOLS_PKG_INFO): + log.warn('%s already patched.', pkg_info) + return False + _rename_path(path) + os.mkdir(path) + os.mkdir(os.path.join(path, 'EGG-INFO')) + pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO') + f = open(pkg_info, 'w') + try: + f.write(SETUPTOOLS_PKG_INFO) + finally: + f.close() + return True + +_patch_egg_dir = _no_sandbox(_patch_egg_dir) + + +def _before_install(): + log.warn('Before install bootstrap.') + _fake_setuptools() + + +def _under_prefix(location): + if 'install' not in sys.argv: + return True + args = sys.argv[sys.argv.index('install') + 1:] + for index, arg in enumerate(args): + for option in ('--root', '--prefix'): + if arg.startswith('%s=' % option): + top_dir = arg.split('root=')[-1] + return location.startswith(top_dir) + elif arg == option: + if len(args) > index: + top_dir = args[index + 1] + return location.startswith(top_dir) + if arg == '--user' and USER_SITE is not None: + return location.startswith(USER_SITE) + return True + + +def _fake_setuptools(): + log.warn('Scanning installed packages') + try: + import pkg_resources + except ImportError: + # we're cool + log.warn('Setuptools or Distribute does not seem to be installed.') + return + ws = pkg_resources.working_set + try: + setuptools_dist = ws.find( + pkg_resources.Requirement.parse('setuptools', replacement=False) + ) + except TypeError: + # old distribute API + setuptools_dist = ws.find( + pkg_resources.Requirement.parse('setuptools') + ) + + if setuptools_dist is None: + log.warn('No setuptools distribution found') + return + # detecting if it was already faked + setuptools_location = setuptools_dist.location + log.warn('Setuptools installation detected at %s', setuptools_location) + + # if --root or --preix was provided, and if + # setuptools is not located in them, we don't patch it + if not _under_prefix(setuptools_location): + log.warn('Not patching, --root or --prefix is installing Distribute' + ' in another location') + return + + # let's see if its an egg + if not setuptools_location.endswith('.egg'): + log.warn('Non-egg installation') + res = _remove_flat_installation(setuptools_location) + if not res: + return + else: + log.warn('Egg installation') + pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO') + if (os.path.exists(pkg_info) and + _same_content(pkg_info, SETUPTOOLS_PKG_INFO)): + log.warn('Already patched.') + return + log.warn('Patching...') + # let's create a fake egg replacing setuptools one + res = _patch_egg_dir(setuptools_location) + if not res: + return + log.warn('Patched done.') + _relaunch() + + +def _relaunch(): + log.warn('Relaunching...') + # we have to relaunch the process + # pip marker to avoid a relaunch bug + _cmd = ['-c', 'install', '--single-version-externally-managed'] + if sys.argv[:3] == _cmd: + sys.argv[0] = 'setup.py' + args = [sys.executable] + sys.argv + sys.exit(subprocess.call(args)) + + +def _extractall(self, path=".", members=None): + """Extract all members from the archive to the current working + directory and set owner, modification time and permissions on + directories afterwards. `path' specifies a different directory + to extract to. `members' is optional and must be a subset of the + list returned by getmembers(). + """ + import copy + import operator + from tarfile import ExtractError + directories = [] + + if members is None: + members = self + + for tarinfo in members: + if tarinfo.isdir(): + # Extract directories with a safe mode. + directories.append(tarinfo) + tarinfo = copy.copy(tarinfo) + tarinfo.mode = 448 # decimal for oct 0700 + self.extract(tarinfo, path) + + # Reverse sort directories. + if sys.version_info < (2, 4): + def sorter(dir1, dir2): + return cmp(dir1.name, dir2.name) + directories.sort(sorter) + directories.reverse() + else: + directories.sort(key=operator.attrgetter('name'), reverse=True) + + # Set correct owner, mtime and filemode on directories. + for tarinfo in directories: + dirpath = os.path.join(path, tarinfo.name) + try: + self.chown(tarinfo, dirpath) + self.utime(tarinfo, dirpath) + self.chmod(tarinfo, dirpath) + except ExtractError: + e = sys.exc_info()[1] + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + +def _build_install_args(argv): + install_args = [] + user_install = '--user' in argv + if user_install and sys.version_info < (2, 6): + log.warn("--user requires Python 2.6 or later") + raise SystemExit(1) + if user_install: + install_args.append('--user') + return install_args + + +def main(argv, version=DEFAULT_VERSION): + """Install or upgrade setuptools and EasyInstall""" + tarball = download_setuptools() + _install(tarball, _build_install_args(argv)) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/doc/faq/installing_faq.rst b/doc/faq/installing_faq.rst index 6374d94875cd..ab2c90c7c92d 100644 --- a/doc/faq/installing_faq.rst +++ b/doc/faq/installing_faq.rst @@ -139,7 +139,7 @@ If you want to be able to follow the development branch as it changes just replace the last step with (make sure you have **setuptools** installed):: - > python setupegg.py develop + > python setup.py develop This creates links in the right places and installs the command line script to the appropriate places. diff --git a/lib/dateutil_py2/LICENSE b/lib/dateutil_py2/LICENSE deleted file mode 100644 index c5b5923c55e5..000000000000 --- a/lib/dateutil_py2/LICENSE +++ /dev/null @@ -1,259 +0,0 @@ -A. HISTORY OF THE SOFTWARE -========================== - -Python was created in the early 1990s by Guido van Rossum at Stichting -Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands -as a successor of a language called ABC. Guido remains Python's -principal author, although it includes many contributions from others. - -In 1995, Guido continued his work on Python at the Corporation for -National Research Initiatives (CNRI, see http://www.cnri.reston.va.us) -in Reston, Virginia where he released several versions of the -software. - -In May 2000, Guido and the Python core development team moved to -BeOpen.com to form the BeOpen PythonLabs team. In October of the same -year, the PythonLabs team moved to Digital Creations (now Zope -Corporation, see http://www.zope.com). In 2001, the Python Software -Foundation (PSF, see http://www.python.org/psf/) was formed, a -non-profit organization created specifically to own Python-related -Intellectual Property. Zope Corporation is a sponsoring member of -the PSF. - -All Python releases are Open Source (see http://www.opensource.org for -the Open Source Definition). Historically, most, but not all, Python -releases have also been GPL-compatible; the table below summarizes -the various releases. - - Release Derived Year Owner GPL- - from compatible? (1) - - 0.9.0 thru 1.2 1991-1995 CWI yes - 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes - 1.6 1.5.2 2000 CNRI no - 2.0 1.6 2000 BeOpen.com no - 1.6.1 1.6 2001 CNRI yes (2) - 2.1 2.0+1.6.1 2001 PSF no - 2.0.1 2.0+1.6.1 2001 PSF yes - 2.1.1 2.1+2.0.1 2001 PSF yes - 2.2 2.1.1 2001 PSF yes - 2.1.2 2.1.1 2002 PSF yes - 2.1.3 2.1.2 2002 PSF yes - 2.2.1 2.2 2002 PSF yes - 2.2.2 2.2.1 2002 PSF yes - 2.2.3 2.2.2 2003 PSF yes - 2.3 2.2.2 2002-2003 PSF yes - -Footnotes: - -(1) GPL-compatible doesn't mean that we're distributing Python under - the GPL. All Python licenses, unlike the GPL, let you distribute - a modified version without making your changes open source. The - GPL-compatible licenses make it possible to combine Python with - other software that is released under the GPL; the others don't. - -(2) According to Richard Stallman, 1.6.1 is not GPL-compatible, - because its license has a choice of law clause. According to - CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 - is "not incompatible" with the GPL. - -Thanks to the many outside volunteers who have worked under Guido's -direction to make these releases possible. - - -B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON -=============================================================== - -PSF LICENSE AGREEMENT FOR PYTHON 2.3 ------------------------------------- - -1. This LICENSE AGREEMENT is between the Python Software Foundation -("PSF"), and the Individual or Organization ("Licensee") accessing and -otherwise using Python 2.3 software in source or binary form and its -associated documentation. - -2. Subject to the terms and conditions of this License Agreement, PSF -hereby grants Licensee a nonexclusive, royalty-free, world-wide -license to reproduce, analyze, test, perform and/or display publicly, -prepare derivative works, distribute, and otherwise use Python 2.3 -alone or in any derivative version, provided, however, that PSF's -License Agreement and PSF's notice of copyright, i.e., "Copyright (c) -2001, 2002, 2003 Python Software Foundation; All Rights Reserved" are -retained in Python 2.3 alone or in any derivative version prepared by -Licensee. - -3. In the event Licensee prepares a derivative work that is based on -or incorporates Python 2.3 or any part thereof, and wants to make -the derivative work available to others as provided herein, then -Licensee hereby agrees to include in any such work a brief summary of -the changes made to Python 2.3. - -4. PSF is making Python 2.3 available to Licensee on an "AS IS" -basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND -DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 2.3 WILL NOT -INFRINGE ANY THIRD PARTY RIGHTS. - -5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -2.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 2.3, -OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -6. This License Agreement will automatically terminate upon a material -breach of its terms and conditions. - -7. Nothing in this License Agreement shall be deemed to create any -relationship of agency, partnership, or joint venture between PSF and -Licensee. This License Agreement does not grant permission to use PSF -trademarks or trade name in a trademark sense to endorse or promote -products or services of Licensee, or any third party. - -8. By copying, installing or otherwise using Python 2.3, Licensee -agrees to be bound by the terms and conditions of this License -Agreement. - - -BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 -------------------------------------------- - -BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 - -1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an -office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the -Individual or Organization ("Licensee") accessing and otherwise using -this software in source or binary form and its associated -documentation ("the Software"). - -2. Subject to the terms and conditions of this BeOpen Python License -Agreement, BeOpen hereby grants Licensee a non-exclusive, -royalty-free, world-wide license to reproduce, analyze, test, perform -and/or display publicly, prepare derivative works, distribute, and -otherwise use the Software alone or in any derivative version, -provided, however, that the BeOpen Python License is retained in the -Software, alone or in any derivative version prepared by Licensee. - -3. BeOpen is making the Software available to Licensee on an "AS IS" -basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND -DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT -INFRINGE ANY THIRD PARTY RIGHTS. - -4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE -SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS -AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY -DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -5. This License Agreement will automatically terminate upon a material -breach of its terms and conditions. - -6. This License Agreement shall be governed by and interpreted in all -respects by the law of the State of California, excluding conflict of -law provisions. Nothing in this License Agreement shall be deemed to -create any relationship of agency, partnership, or joint venture -between BeOpen and Licensee. This License Agreement does not grant -permission to use BeOpen trademarks or trade names in a trademark -sense to endorse or promote products or services of Licensee, or any -third party. As an exception, the "BeOpen Python" logos available at -http://www.pythonlabs.com/logos.html may be used according to the -permissions granted on that web page. - -7. By copying, installing or otherwise using the software, Licensee -agrees to be bound by the terms and conditions of this License -Agreement. - - -CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 ---------------------------------------- - -1. This LICENSE AGREEMENT is between the Corporation for National -Research Initiatives, having an office at 1895 Preston White Drive, -Reston, VA 20191 ("CNRI"), and the Individual or Organization -("Licensee") accessing and otherwise using Python 1.6.1 software in -source or binary form and its associated documentation. - -2. Subject to the terms and conditions of this License Agreement, CNRI -hereby grants Licensee a nonexclusive, royalty-free, world-wide -license to reproduce, analyze, test, perform and/or display publicly, -prepare derivative works, distribute, and otherwise use Python 1.6.1 -alone or in any derivative version, provided, however, that CNRI's -License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) -1995-2001 Corporation for National Research Initiatives; All Rights -Reserved" are retained in Python 1.6.1 alone or in any derivative -version prepared by Licensee. Alternately, in lieu of CNRI's License -Agreement, Licensee may substitute the following text (omitting the -quotes): "Python 1.6.1 is made available subject to the terms and -conditions in CNRI's License Agreement. This Agreement together with -Python 1.6.1 may be located on the Internet using the following -unique, persistent identifier (known as a handle): 1895.22/1013. This -Agreement may also be obtained from a proxy server on the Internet -using the following URL: http://hdl.handle.net/1895.22/1013". - -3. In the event Licensee prepares a derivative work that is based on -or incorporates Python 1.6.1 or any part thereof, and wants to make -the derivative work available to others as provided herein, then -Licensee hereby agrees to include in any such work a brief summary of -the changes made to Python 1.6.1. - -4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" -basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND -DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT -INFRINGE ANY THIRD PARTY RIGHTS. - -5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, -OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -6. This License Agreement will automatically terminate upon a material -breach of its terms and conditions. - -7. This License Agreement shall be governed by the federal -intellectual property law of the United States, including without -limitation the federal copyright law, and, to the extent such -U.S. federal law does not apply, by the law of the Commonwealth of -Virginia, excluding Virginia's conflict of law provisions. -Notwithstanding the foregoing, with regard to derivative works based -on Python 1.6.1 that incorporate non-separable material that was -previously distributed under the GNU General Public License (GPL), the -law of the Commonwealth of Virginia shall govern this License -Agreement only as to issues arising under or with respect to -Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this -License Agreement shall be deemed to create any relationship of -agency, partnership, or joint venture between CNRI and Licensee. This -License Agreement does not grant permission to use CNRI trademarks or -trade name in a trademark sense to endorse or promote products or -services of Licensee, or any third party. - -8. By clicking on the "ACCEPT" button where indicated, or by copying, -installing or otherwise using Python 1.6.1, Licensee agrees to be -bound by the terms and conditions of this License Agreement. - - ACCEPT - - -CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 --------------------------------------------------- - -Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, -The Netherlands. All rights reserved. - -Permission to use, copy, modify, and distribute this software and its -documentation for any purpose and without fee is hereby granted, -provided that the above copyright notice appear in all copies and that -both that copyright notice and this permission notice appear in -supporting documentation, and that the name of Stichting Mathematisch -Centrum or CWI not be used in advertising or publicity pertaining to -distribution of the software without specific, written prior -permission. - -STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO -THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE -FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/lib/dateutil_py2/NEWS b/lib/dateutil_py2/NEWS deleted file mode 100644 index 8738e849e29c..000000000000 --- a/lib/dateutil_py2/NEWS +++ /dev/null @@ -1,143 +0,0 @@ - -Version 1.5 ------------ - -- As reported by Mathieu Bridon, rrules were matching the bysecond rules - incorrectly against byminute in some circumstances when the SECONDLY - frequency was in use, due to a copy & paste bug. The problem has been - unittested and corrected. - -- Adam Ryan reported a problem in the relativedelta implementation which - affected the yearday parameter in the month of January specifically. - This has been unittested and fixed. - -- Updated timezone information. - - -Version 1.4.1 -------------- - -- Updated timezone information. - - -Version 1.4 ------------ - -- Fixed another parser precision problem on conversion of decimal seconds - to microseconds, as reported by Erik Brown. Now these issues are gone - for real since it's not using floating point arithmetic anymore. - -- Fixed case where tzrange.utcoffset and tzrange.dst() might fail due - to a date being used where a datetime was expected (reported and fixed - by Lennart Regebro). - -- Prevent tzstr from introducing daylight timings in strings that didn't - specify them (reported by Lennart Regebro). - -- Calls like gettz("GMT+3") and gettz("UTC-2") will now return the - expected values, instead of the TZ variable behavior. - -- Fixed DST signal handling in zoneinfo files. Reported by - Nicholas F. Fabry and John-Mark Gurney. - - -Version 1.3 ------------ - -- Fixed precision problem on conversion of decimal seconds to - microseconds, as reported by Skip Montanaro. - -- Fixed bug in constructor of parser, and converted parser classes to - new-style classes. Original report and patch by Michael Elsdörfer. - -- Initialize tzid and comps in tz.py, to prevent the code from ever - raising a NameError (even with broken files). Johan Dahlin suggested - the fix after a pyflakes run. - -- Version is now published in dateutil.__version__, as requested - by Darren Dale. - -- All code is compatible with new-style division. - - -Version 1.2 ------------ - -- Now tzfile will round timezones to full-minutes if necessary, - since Python's datetime doesn't support sub-minute offsets. - Thanks to Ilpo Nyyssönen for reporting the issue. - -- Removed bare string exceptions, as reported and fixed by - Wilfredo Sánchez Vega. - -- Fix bug in leap count parsing (reported and fixed by Eugene Oden). - - -Version 1.1 ------------ - -- Fixed rrule byyearday handling. Abramo Bagnara pointed out that - RFC2445 allows negative numbers. - -- Fixed --prefix handling in setup.py (by Sidnei da Silva). - -- Now tz.gettz() returns a tzlocal instance when not given any - arguments and no other timezone information is found. - -- Updating timezone information to version 2005q. - - -Version 1.0 ------------ - -- Fixed parsing of XXhXXm formatted time after day/month/year - has been parsed. - -- Added patch by Jeffrey Harris optimizing rrule.__contains__. - - -Version 0.9 ------------ - -- Fixed pickling of timezone types, as reported by - Andreas Köhler. - -- Implemented internal timezone information with binary - timezone files [1]. datautil.tz.gettz() function will now - try to use the system timezone files, and fallback to - the internal versions. It's also possible to ask for - the internal versions directly by using - dateutil.zoneinfo.gettz(). - -- New tzwin timezone type, allowing access to Windows - internal timezones (contributed by Jeffrey Harris). - -- Fixed parsing of unicode date strings. - -- Accept parserinfo instances as the parser constructor - parameter, besides parserinfo (sub)classes. - -- Changed weekday to spell the not-set n value as None - instead of 0. - -- Fixed other reported bugs. - -[1] http://www.twinsun.com/tz/tz-link.htm - - -Version 0.5 ------------ - -- Removed FREQ_ prefix from rrule frequency constants - WARNING: this breaks compatibility with previous versions. - -- Fixed rrule.between() for cases where "after" is achieved - before even starting, as reported by Andreas Köhler. - -- Fixed two digit zero-year parsing (such as 31-Dec-00), as - reported by Jim Abramson, and included test case for this. - -- Sort exdate and rdate before iterating over them, so that - it's not necessary to sort them before adding to the rruleset, - as reported by Nicholas Piper. - diff --git a/lib/dateutil_py2/README b/lib/dateutil_py2/README deleted file mode 100644 index dbe7988ce499..000000000000 --- a/lib/dateutil_py2/README +++ /dev/null @@ -1,1970 +0,0 @@ -## This file is in the moin format. The latest version is found -## at https://moin.conectiva.com.br/DateUtil - -== Contents == -[[TableOfContents]] - -== Description == -The '''dateutil''' module provides powerful extensions to -the standard '''datetime''' module, available in Python 2.3+. - -== Features == - - * Computing of relative deltas (next month, next year, - next monday, last week of month, etc); - - * Computing of relative deltas between two given - date and/or datetime objects; - - * Computing of dates based on very flexible recurrence rules, - using a superset of the - [ftp://ftp.rfc-editor.org/in-notes/rfc2445.txt iCalendar] - specification. Parsing of RFC strings is supported as well. - - * Generic parsing of dates in almost any string format; - - * Timezone (tzinfo) implementations for tzfile(5) format - files (/etc/localtime, /usr/share/zoneinfo, etc), TZ - environment string (in all known formats), iCalendar - format files, given ranges (with help from relative deltas), - local machine timezone, fixed offset timezone, UTC timezone, - and Windows registry-based time zones. - - * Internal up-to-date world timezone information based on - Olson's database. - - * Computing of Easter Sunday dates for any given year, - using Western, Orthodox or Julian algorithms; - - * More than 400 test cases. - -== Quick example == -Here's a snapshot, just to give an idea about the power of the -package. For more examples, look at the documentation below. - -Suppose you want to know how much time is left, in -years/months/days/etc, before the next easter happening on a -year with a Friday 13th in August, and you want to get today's -date out of the "date" unix system command. Here is the code: -{{{ -from dateutil.relativedelta import * -from dateutil.easter import * -from dateutil.rrule import * -from dateutil.parser import * -from datetime import * -import commands -import os -now = parse(commands.getoutput("date")) -today = now.date() -year = rrule(YEARLY,bymonth=8,bymonthday=13,byweekday=FR)[0].year -rdelta = relativedelta(easter(year), today) -print "Today is:", today -print "Year with next Aug 13th on a Friday is:", year -print "How far is the Easter of that year:", rdelta -print "And the Easter of that year is:", today+rdelta -}}} - -And here's the output: -{{{ -Today is: 2003-10-11 -Year with next Aug 13th on a Friday is: 2004 -How far is the Easter of that year: relativedelta(months=+6) -And the Easter of that year is: 2004-04-11 -}}} - -{i} Being exactly 6 months ahead was '''really''' a coincidence :) - -== Download == -The following files are available. - * attachment:python-dateutil-1.0.tar.bz2 - * attachment:python-dateutil-1.0-1.noarch.rpm - -== Author == -The dateutil module was written by GustavoNiemeyer . - -== Documentation == -The following modules are available. - -=== relativedelta === -This module offers the '''relativedelta''' type, which is based -on the specification of the excelent work done by M.-A. Lemburg in his -[http://www.egenix.com/files/python/mxDateTime.html mxDateTime] -extension. However, notice that this type '''does not''' implement the -same algorithm as his work. Do not expect it to behave like -{{{mxDateTime}}}'s counterpart. - -==== relativedelta type ==== - -There's two different ways to build a relativedelta instance. The -first one is passing it two {{{date}}}/{{{datetime}}} instances: -{{{ -relativedelta(datetime1, datetime2) -}}} - -This will build the relative difference between {{{datetime1}}} and -{{{datetime2}}}, so that the following constraint is always true: -{{{ -datetime2+relativedelta(datetime1, datetime2) == datetime1 -}}} - -Notice that instead of {{{datetime}}} instances, you may use -{{{date}}} instances, or a mix of both. - -And the other way is to use any of the following keyword arguments: - - year, month, day, hour, minute, second, microsecond:: - Absolute information. - - years, months, weeks, days, hours, minutes, seconds, microseconds:: - Relative information, may be negative. - - weekday:: - One of the weekday instances ({{{MO}}}, {{{TU}}}, etc). These - instances may receive a parameter {{{n}}}, specifying the {{{n}}}th - weekday, which could be positive or negative (like {{{MO(+2)}}} or - {{{MO(-3)}}}. Not specifying it is the same as specifying {{{+1}}}. - You can also use an integer, where {{{0=MO}}}. Notice that, - for example, if the calculated date is already Monday, using - {{{MO}}} or {{{MO(+1)}}} (which is the same thing in this context), - won't change the day. - - leapdays:: - Will add given days to the date found, but only if the computed - year is a leap year and the computed date is post 28 of february. - - yearday, nlyearday:: - Set the yearday or the non-leap year day (jump leap days). - These are converted to {{{day}}}/{{{month}}}/{{{leapdays}}} - information. - -==== Behavior of operations ==== -If you're curious about exactly how the relative delta will act -on operations, here is a description of its behavior. - - 1. Calculate the absolute year, using the {{{year}}} argument, or the - original datetime year, if the argument is not present. - 1. Add the relative {{{years}}} argument to the absolute year. - 1. Do steps 1 and 2 for {{{month}}}/{{{months}}}. - 1. Calculate the absolute day, using the {{{day}}} argument, or the - original datetime day, if the argument is not present. Then, subtract - from the day until it fits in the year and month found after their - operations. - 1. Add the relative {{{days}}} argument to the absolute day. Notice - that the {{{weeks}}} argument is multiplied by 7 and added to {{{days}}}. - 1. If {{{leapdays}}} is present, the computed year is a leap year, and - the computed month is after february, remove one day from the found date. - 1. Do steps 1 and 2 for {{{hour}}}/{{{hours}}}, {{{minute}}}/{{{minutes}}}, - {{{second}}}/{{{seconds}}}, {{{microsecond}}}/{{{microseconds}}}. - 1. If the {{{weekday}}} argument is present, calculate the {{{n}}}th - occurrence of the given weekday. - -==== Examples ==== - -Let's begin our trip. -{{{ ->>> from datetime import *; from dateutil.relativedelta import * ->>> import calendar -}}} - -Store some values. -{{{ ->>> NOW = datetime.now() ->>> TODAY = date.today() ->>> NOW -datetime.datetime(2003, 9, 17, 20, 54, 47, 282310) ->>> TODAY -datetime.date(2003, 9, 17) -}}} - -Next month. -{{{ ->>> NOW+relativedelta(months=+1) -datetime.datetime(2003, 10, 17, 20, 54, 47, 282310) -}}} - -Next month, plus one week. -{{{ ->>> NOW+relativedelta(months=+1, weeks=+1) -datetime.datetime(2003, 10, 24, 20, 54, 47, 282310) -}}} - -Next month, plus one week, at 10am. -{{{ ->>> TODAY+relativedelta(months=+1, weeks=+1, hour=10) -datetime.datetime(2003, 10, 24, 10, 0) -}}} - -Let's try the other way around. Notice that the -hour setting we get in the relativedelta is relative, -since it's a difference, and the weeks parameter -has gone. -{{{ ->>> relativedelta(datetime(2003, 10, 24, 10, 0), TODAY) -relativedelta(months=+1, days=+7, hours=+10) -}}} - -One month before one year. -{{{ ->>> NOW+relativedelta(years=+1, months=-1) -datetime.datetime(2004, 8, 17, 20, 54, 47, 282310) -}}} - -How does it handle months with different numbers of days? -Notice that adding one month will never cross the month -boundary. -{{{ ->>> date(2003,1,27)+relativedelta(months=+1) -datetime.date(2003, 2, 27) ->>> date(2003,1,31)+relativedelta(months=+1) -datetime.date(2003, 2, 28) ->>> date(2003,1,31)+relativedelta(months=+2) -datetime.date(2003, 3, 31) -}}} - -The logic for years is the same, even on leap years. -{{{ ->>> date(2000,2,28)+relativedelta(years=+1) -datetime.date(2001, 2, 28) ->>> date(2000,2,29)+relativedelta(years=+1) -datetime.date(2001, 2, 28) - ->>> date(1999,2,28)+relativedelta(years=+1) -datetime.date(2000, 2, 28) ->>> date(1999,3,1)+relativedelta(years=+1) -datetime.date(2000, 3, 1) - ->>> date(2001,2,28)+relativedelta(years=-1) -datetime.date(2000, 2, 28) ->>> date(2001,3,1)+relativedelta(years=-1) -datetime.date(2000, 3, 1) -}}} - -Next friday. -{{{ ->>> TODAY+relativedelta(weekday=FR) -datetime.date(2003, 9, 19) - ->>> TODAY+relativedelta(weekday=calendar.FRIDAY) -datetime.date(2003, 9, 19) -}}} - -Last friday in this month. -{{{ ->>> TODAY+relativedelta(day=31, weekday=FR(-1)) -datetime.date(2003, 9, 26) -}}} - -Next wednesday (it's today!). -{{{ ->>> TODAY+relativedelta(weekday=WE(+1)) -datetime.date(2003, 9, 17) -}}} - -Next wednesday, but not today. -{{{ ->>> TODAY+relativedelta(days=+1, weekday=WE(+1)) -datetime.date(2003, 9, 24) -}}} - -Following -[http://www.cl.cam.ac.uk/~mgk25/iso-time.html ISO year week number notation] -find the first day of the 15th week of 1997. -{{{ ->>> datetime(1997,1,1)+relativedelta(day=4, weekday=MO(-1), weeks=+14) -datetime.datetime(1997, 4, 7, 0, 0) -}}} - -How long ago has the millennium changed? -{{{ ->>> relativedelta(NOW, date(2001,1,1)) -relativedelta(years=+2, months=+8, days=+16, - hours=+20, minutes=+54, seconds=+47, microseconds=+282310) -}}} - -How old is John? -{{{ ->>> johnbirthday = datetime(1978, 4, 5, 12, 0) ->>> relativedelta(NOW, johnbirthday) -relativedelta(years=+25, months=+5, days=+12, - hours=+8, minutes=+54, seconds=+47, microseconds=+282310) -}}} - -It works with dates too. -{{{ ->>> relativedelta(TODAY, johnbirthday) -relativedelta(years=+25, months=+5, days=+11, hours=+12) -}}} - -Obtain today's date using the yearday: -{{{ ->>> date(2003, 1, 1)+relativedelta(yearday=260) -datetime.date(2003, 9, 17) -}}} - -We can use today's date, since yearday should be absolute -in the given year: -{{{ ->>> TODAY+relativedelta(yearday=260) -datetime.date(2003, 9, 17) -}}} - -Last year it should be in the same day: -{{{ ->>> date(2002, 1, 1)+relativedelta(yearday=260) -datetime.date(2002, 9, 17) -}}} - -But not in a leap year: -{{{ ->>> date(2000, 1, 1)+relativedelta(yearday=260) -datetime.date(2000, 9, 16) -}}} - -We can use the non-leap year day to ignore this: -{{{ ->>> date(2000, 1, 1)+relativedelta(nlyearday=260) -datetime.date(2000, 9, 17) -}}} - -=== rrule === -The rrule module offers a small, complete, and very fast, implementation -of the recurrence rules documented in the -[ftp://ftp.rfc-editor.org/in-notes/rfc2445.txt iCalendar RFC], including -support for caching of results. - -==== rrule type ==== -That's the base of the rrule operation. It accepts all the keywords -defined in the RFC as its constructor parameters (except {{{byday}}}, -which was renamed to {{{byweekday}}}) and more. The constructor -prototype is: -{{{ -rrule(freq) -}}} - -Where {{{freq}}} must be one of {{{YEARLY}}}, {{{MONTHLY}}}, -{{{WEEKLY}}}, {{{DAILY}}}, {{{HOURLY}}}, {{{MINUTELY}}}, -or {{{SECONDLY}}}. - -Additionally, it supports the following keyword arguments: - - cache:: - If given, it must be a boolean value specifying to enable - or disable caching of results. If you will use the same - {{{rrule}}} instance multiple times, enabling caching will - improve the performance considerably. - - dtstart:: - The recurrence start. Besides being the base for the - recurrence, missing parameters in the final recurrence - instances will also be extracted from this date. If not - given, {{{datetime.now()}}} will be used instead. - - interval:: - The interval between each {{{freq}}} iteration. For example, - when using {{{YEARLY}}}, an interval of {{{2}}} means - once every two years, but with {{{HOURLY}}}, it means - once every two hours. The default interval is {{{1}}}. - - wkst:: - The week start day. Must be one of the {{{MO}}}, {{{TU}}}, - {{{WE}}} constants, or an integer, specifying the first day - of the week. This will affect recurrences based on weekly - periods. The default week start is got from - {{{calendar.firstweekday()}}}, and may be modified by - {{{calendar.setfirstweekday()}}}. - - count:: - How many occurrences will be generated. - - until:: - If given, this must be a {{{datetime}}} instance, that will - specify the limit of the recurrence. If a recurrence instance - happens to be the same as the {{{datetime}}} instance given - in the {{{until}}} keyword, this will be the last occurrence. - - bysetpos:: - If given, it must be either an integer, or a sequence of - integers, positive or negative. Each given integer will - specify an occurrence number, corresponding to the nth - occurrence of the rule inside the frequency period. For - example, a {{{bysetpos}}} of {{{-1}}} if combined with a - {{{MONTHLY}}} frequency, and a {{{byweekday}}} of - {{{(MO, TU, WE, TH, FR)}}}, will result in the last work - day of every month. - - bymonth:: - If given, it must be either an integer, or a sequence of - integers, meaning the months to apply the recurrence to. - - bymonthday:: - If given, it must be either an integer, or a sequence of - integers, meaning the month days to apply the recurrence to. - - byyearday:: - If given, it must be either an integer, or a sequence of - integers, meaning the year days to apply the recurrence to. - - byweekno:: - If given, it must be either an integer, or a sequence of - integers, meaning the week numbers to apply the recurrence - to. Week numbers have the meaning described in ISO8601, - that is, the first week of the year is that containing at - least four days of the new year. - - byweekday:: - If given, it must be either an integer ({{{0 == MO}}}), a - sequence of integers, one of the weekday constants - ({{{MO}}}, {{{TU}}}, etc), or a sequence of these constants. - When given, these variables will define the weekdays where - the recurrence will be applied. It's also possible to use - an argument {{{n}}} for the weekday instances, which will - mean the {{{n}}}''th'' occurrence of this weekday in the - period. For example, with {{{MONTHLY}}}, or with - {{{YEARLY}}} and {{{BYMONTH}}}, using {{{FR(+1)}}} - in {{{byweekday}}} will specify the first friday of the - month where the recurrence happens. Notice that in the RFC - documentation, this is specified as {{{BYDAY}}}, but was - renamed to avoid the ambiguity of that keyword. - - byhour:: - If given, it must be either an integer, or a sequence of - integers, meaning the hours to apply the recurrence to. - - byminute:: - If given, it must be either an integer, or a sequence of - integers, meaning the minutes to apply the recurrence to. - - bysecond:: - If given, it must be either an integer, or a sequence of - integers, meaning the seconds to apply the recurrence to. - - byeaster:: - If given, it must be either an integer, or a sequence of - integers, positive or negative. Each integer will define - an offset from the Easter Sunday. Passing the offset - {{{0}}} to {{{byeaster}}} will yield the Easter Sunday - itself. This is an extension to the RFC specification. - -==== rrule methods ==== -The following methods are available in {{{rrule}}} instances: - - rrule.before(dt, inc=False):: - Returns the last recurrence before the given {{{datetime}}} - instance. The {{{inc}}} keyword defines what happens if - {{{dt}}} '''is''' an occurrence. With {{{inc == True}}}, - if {{{dt}}} itself is an occurrence, it will be returned. - - rrule.after(dt, inc=False):: - Returns the first recurrence after the given {{{datetime}}} - instance. The {{{inc}}} keyword defines what happens if - {{{dt}}} '''is''' an occurrence. With {{{inc == True}}}, - if {{{dt}}} itself is an occurrence, it will be returned. - - rrule.between(after, before, inc=False):: - Returns all the occurrences of the rrule between {{{after}}} - and {{{before}}}. The {{{inc}}} keyword defines what happens - if {{{after}}} and/or {{{before}}} are themselves occurrences. - With {{{inc == True}}}, they will be included in the list, - if they are found in the recurrence set. - - rrule.count():: - Returns the number of recurrences in this set. It will have - go trough the whole recurrence, if this hasn't been done - before. - -Besides these methods, {{{rrule}}} instances also support -the {{{__getitem__()}}} and {{{__contains__()}}} special methods, -meaning that these are valid expressions: -{{{ -rr = rrule(...) -if datetime(...) in rr: - ... -print rr[0] -print rr[-1] -print rr[1:2] -print rr[::-2] -}}} - -The getitem/slicing mechanism is smart enough to avoid getting the whole -recurrence set, if possible. - -==== Notes ==== - - * The rrule type has no {{{byday}}} keyword. The equivalent keyword - has been replaced by the {{{byweekday}}} keyword, to remove the - ambiguity present in the original keyword. - - * Unlike documented in the RFC, the starting datetime ({{{dtstart}}}) - is not the first recurrence instance, unless it does fit in the - specified rules. In a python module context, this behavior makes more - sense than otherwise. Notice that you can easily get the original - behavior by using a rruleset and adding the {{{dtstart}}} as an - {{{rdate}}} recurrence. - - * Unlike documented in the RFC, every keyword is valid on every - frequency (the RFC documents that {{{byweekno}}} is only valid - on yearly frequencies, for example). - - * In addition to the documented keywords, a {{{byeaster}}} keyword - was introduced, making it easy to compute recurrent events relative - to the Easter Sunday. - -==== rrule examples ==== -These examples were converted from the RFC. - -Prepare the environment. -{{{ ->>> from dateutil.rrule import * ->>> from dateutil.parser import * ->>> from datetime import * - ->>> import pprint ->>> import sys ->>> sys.displayhook = pprint.pprint -}}} - -Daily, for 10 occurrences. -{{{ ->>> list(rrule(DAILY, count=10, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 3, 9, 0), - datetime.datetime(1997, 9, 4, 9, 0), - datetime.datetime(1997, 9, 5, 9, 0), - datetime.datetime(1997, 9, 6, 9, 0), - datetime.datetime(1997, 9, 7, 9, 0), - datetime.datetime(1997, 9, 8, 9, 0), - datetime.datetime(1997, 9, 9, 9, 0), - datetime.datetime(1997, 9, 10, 9, 0), - datetime.datetime(1997, 9, 11, 9, 0)] -}}} - -Daily until December 24, 1997 -{{{ ->>> list(rrule(DAILY, - dtstart=parse("19970902T090000"), - until=parse("19971224T000000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 3, 9, 0), - datetime.datetime(1997, 9, 4, 9, 0), - (...) - datetime.datetime(1997, 12, 21, 9, 0), - datetime.datetime(1997, 12, 22, 9, 0), - datetime.datetime(1997, 12, 23, 9, 0)] -}}} - -Every other day, 5 occurrences. -{{{ ->>> list(rrule(DAILY, interval=2, count=5, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 4, 9, 0), - datetime.datetime(1997, 9, 6, 9, 0), - datetime.datetime(1997, 9, 8, 9, 0), - datetime.datetime(1997, 9, 10, 9, 0)] -}}} - -Every 10 days, 5 occurrences. -{{{ ->>> list(rrule(DAILY, interval=10, count=5, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 12, 9, 0), - datetime.datetime(1997, 9, 22, 9, 0), - datetime.datetime(1997, 10, 2, 9, 0), - datetime.datetime(1997, 10, 12, 9, 0)] -}}} - -Everyday in January, for 3 years. -{{{ ->>> list(rrule(YEARLY, bymonth=1, byweekday=range(7), - dtstart=parse("19980101T090000"), - until=parse("20000131T090000"))) -[datetime.datetime(1998, 1, 1, 9, 0), - datetime.datetime(1998, 1, 2, 9, 0), - (...) - datetime.datetime(1998, 1, 30, 9, 0), - datetime.datetime(1998, 1, 31, 9, 0), - datetime.datetime(1999, 1, 1, 9, 0), - datetime.datetime(1999, 1, 2, 9, 0), - (...) - datetime.datetime(1999, 1, 30, 9, 0), - datetime.datetime(1999, 1, 31, 9, 0), - datetime.datetime(2000, 1, 1, 9, 0), - datetime.datetime(2000, 1, 2, 9, 0), - (...) - datetime.datetime(2000, 1, 29, 9, 0), - datetime.datetime(2000, 1, 31, 9, 0)] -}}} - -Same thing, in another way. -{{{ ->>> list(rrule(DAILY, bymonth=1, - dtstart=parse("19980101T090000"), - until=parse("20000131T090000"))) -(...) -}}} - -Weekly for 10 occurrences. -{{{ ->>> list(rrule(WEEKLY, count=10, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 9, 9, 0), - datetime.datetime(1997, 9, 16, 9, 0), - datetime.datetime(1997, 9, 23, 9, 0), - datetime.datetime(1997, 9, 30, 9, 0), - datetime.datetime(1997, 10, 7, 9, 0), - datetime.datetime(1997, 10, 14, 9, 0), - datetime.datetime(1997, 10, 21, 9, 0), - datetime.datetime(1997, 10, 28, 9, 0), - datetime.datetime(1997, 11, 4, 9, 0)] -}}} - -Every other week, 6 occurrences. -{{{ ->>> list(rrule(WEEKLY, interval=2, count=6, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 16, 9, 0), - datetime.datetime(1997, 9, 30, 9, 0), - datetime.datetime(1997, 10, 14, 9, 0), - datetime.datetime(1997, 10, 28, 9, 0), - datetime.datetime(1997, 11, 11, 9, 0)] -}}} - -Weekly on Tuesday and Thursday for 5 weeks. -{{{ ->>> list(rrule(WEEKLY, count=10, wkst=SU, byweekday=(TU,TH), - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 4, 9, 0), - datetime.datetime(1997, 9, 9, 9, 0), - datetime.datetime(1997, 9, 11, 9, 0), - datetime.datetime(1997, 9, 16, 9, 0), - datetime.datetime(1997, 9, 18, 9, 0), - datetime.datetime(1997, 9, 23, 9, 0), - datetime.datetime(1997, 9, 25, 9, 0), - datetime.datetime(1997, 9, 30, 9, 0), - datetime.datetime(1997, 10, 2, 9, 0)] -}}} - -Every other week on Tuesday and Thursday, for 8 occurrences. -{{{ ->>> list(rrule(WEEKLY, interval=2, count=8, - wkst=SU, byweekday=(TU,TH), - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 4, 9, 0), - datetime.datetime(1997, 9, 16, 9, 0), - datetime.datetime(1997, 9, 18, 9, 0), - datetime.datetime(1997, 9, 30, 9, 0), - datetime.datetime(1997, 10, 2, 9, 0), - datetime.datetime(1997, 10, 14, 9, 0), - datetime.datetime(1997, 10, 16, 9, 0)] -}}} - -Monthly on the 1st Friday for ten occurrences. -{{{ ->>> list(rrule(MONTHLY, count=10, byweekday=FR(1), - dtstart=parse("19970905T090000"))) -[datetime.datetime(1997, 9, 5, 9, 0), - datetime.datetime(1997, 10, 3, 9, 0), - datetime.datetime(1997, 11, 7, 9, 0), - datetime.datetime(1997, 12, 5, 9, 0), - datetime.datetime(1998, 1, 2, 9, 0), - datetime.datetime(1998, 2, 6, 9, 0), - datetime.datetime(1998, 3, 6, 9, 0), - datetime.datetime(1998, 4, 3, 9, 0), - datetime.datetime(1998, 5, 1, 9, 0), - datetime.datetime(1998, 6, 5, 9, 0)] -}}} - -Every other month on the 1st and last Sunday of the month for 10 occurrences. -{{{ ->>> list(rrule(MONTHLY, interval=2, count=10, - byweekday=(SU(1), SU(-1)), - dtstart=parse("19970907T090000"))) -[datetime.datetime(1997, 9, 7, 9, 0), - datetime.datetime(1997, 9, 28, 9, 0), - datetime.datetime(1997, 11, 2, 9, 0), - datetime.datetime(1997, 11, 30, 9, 0), - datetime.datetime(1998, 1, 4, 9, 0), - datetime.datetime(1998, 1, 25, 9, 0), - datetime.datetime(1998, 3, 1, 9, 0), - datetime.datetime(1998, 3, 29, 9, 0), - datetime.datetime(1998, 5, 3, 9, 0), - datetime.datetime(1998, 5, 31, 9, 0)] -}}} - -Monthly on the second to last Monday of the month for 6 months. -{{{ ->>> list(rrule(MONTHLY, count=6, byweekday=MO(-2), - dtstart=parse("19970922T090000"))) -[datetime.datetime(1997, 9, 22, 9, 0), - datetime.datetime(1997, 10, 20, 9, 0), - datetime.datetime(1997, 11, 17, 9, 0), - datetime.datetime(1997, 12, 22, 9, 0), - datetime.datetime(1998, 1, 19, 9, 0), - datetime.datetime(1998, 2, 16, 9, 0)] -}}} - -Monthly on the third to the last day of the month, for 6 months. -{{{ ->>> list(rrule(MONTHLY, count=6, bymonthday=-3, - dtstart=parse("19970928T090000"))) -[datetime.datetime(1997, 9, 28, 9, 0), - datetime.datetime(1997, 10, 29, 9, 0), - datetime.datetime(1997, 11, 28, 9, 0), - datetime.datetime(1997, 12, 29, 9, 0), - datetime.datetime(1998, 1, 29, 9, 0), - datetime.datetime(1998, 2, 26, 9, 0)] -}}} - -Monthly on the 2nd and 15th of the month for 5 occurrences. -{{{ ->>> list(rrule(MONTHLY, count=5, bymonthday=(2,15), - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 15, 9, 0), - datetime.datetime(1997, 10, 2, 9, 0), - datetime.datetime(1997, 10, 15, 9, 0), - datetime.datetime(1997, 11, 2, 9, 0)] -}}} - -Monthly on the first and last day of the month for 3 occurrences. -{{{ ->>> list(rrule(MONTHLY, count=5, bymonthday=(-1,1,), - dtstart=parse("1997090 -2T090000"))) -[datetime.datetime(1997, 9, 30, 9, 0), - datetime.datetime(1997, 10, 1, 9, 0), - datetime.datetime(1997, 10, 31, 9, 0), - datetime.datetime(1997, 11, 1, 9, 0), - datetime.datetime(1997, 11, 30, 9, 0)] -}}} - -Every 18 months on the 10th thru 15th of the month for 10 occurrences. -{{{ ->>> list(rrule(MONTHLY, interval=18, count=10, - bymonthday=range(10,16), - dtstart=parse("19970910T090000"))) -[datetime.datetime(1997, 9, 10, 9, 0), - datetime.datetime(1997, 9, 11, 9, 0), - datetime.datetime(1997, 9, 12, 9, 0), - datetime.datetime(1997, 9, 13, 9, 0), - datetime.datetime(1997, 9, 14, 9, 0), - datetime.datetime(1997, 9, 15, 9, 0), - datetime.datetime(1999, 3, 10, 9, 0), - datetime.datetime(1999, 3, 11, 9, 0), - datetime.datetime(1999, 3, 12, 9, 0), - datetime.datetime(1999, 3, 13, 9, 0)] -}}} - -Every Tuesday, every other month, 6 occurences. -{{{ ->>> list(rrule(MONTHLY, interval=2, count=6, byweekday=TU, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 9, 9, 0), - datetime.datetime(1997, 9, 16, 9, 0), - datetime.datetime(1997, 9, 23, 9, 0), - datetime.datetime(1997, 9, 30, 9, 0), - datetime.datetime(1997, 11, 4, 9, 0)] -}}} - -Yearly in June and July for 10 occurrences. -{{{ ->>> list(rrule(YEARLY, count=4, bymonth=(6,7), - dtstart=parse("19970610T0900 -00"))) -[datetime.datetime(1997, 6, 10, 9, 0), - datetime.datetime(1997, 7, 10, 9, 0), - datetime.datetime(1998, 6, 10, 9, 0), - datetime.datetime(1998, 7, 10, 9, 0)] -}}} - -Every 3rd year on the 1st, 100th and 200th day for 4 occurrences. -{{{ ->>> list(rrule(YEARLY, count=4, interval=3, byyearday=(1,100,200), - dtstart=parse("19970101T090000"))) -[datetime.datetime(1997, 1, 1, 9, 0), - datetime.datetime(1997, 4, 10, 9, 0), - datetime.datetime(1997, 7, 19, 9, 0), - datetime.datetime(2000, 1, 1, 9, 0)] -}}} - -Every 20th Monday of the year, 3 occurrences. -{{{ ->>> list(rrule(YEARLY, count=3, byweekday=MO(20), - dtstart=parse("19970519T090000"))) -[datetime.datetime(1997, 5, 19, 9, 0), - datetime.datetime(1998, 5, 18, 9, 0), - datetime.datetime(1999, 5, 17, 9, 0)] -}}} - -Monday of week number 20 (where the default start of the week is Monday), -3 occurrences. -{{{ ->>> list(rrule(YEARLY, count=3, byweekno=20, byweekday=MO, - dtstart=parse("19970512T090000"))) -[datetime.datetime(1997, 5, 12, 9, 0), - datetime.datetime(1998, 5, 11, 9, 0), - datetime.datetime(1999, 5, 17, 9, 0)] -}}} - -The week number 1 may be in the last year. -{{{ ->>> list(rrule(WEEKLY, count=3, byweekno=1, byweekday=MO, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 12, 29, 9, 0), - datetime.datetime(1999, 1, 4, 9, 0), - datetime.datetime(2000, 1, 3, 9, 0)] -}}} - -And the week numbers greater than 51 may be in the next year. -{{{ ->>> list(rrule(WEEKLY, count=3, byweekno=52, byweekday=SU, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 12, 28, 9, 0), - datetime.datetime(1998, 12, 27, 9, 0), - datetime.datetime(2000, 1, 2, 9, 0)] -}}} - -Only some years have week number 53: -{{{ ->>> list(rrule(WEEKLY, count=3, byweekno=53, byweekday=MO, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1998, 12, 28, 9, 0), - datetime.datetime(2004, 12, 27, 9, 0), - datetime.datetime(2009, 12, 28, 9, 0)] -}}} - -Every Friday the 13th, 4 occurrences. -{{{ ->>> list(rrule(YEARLY, count=4, byweekday=FR, bymonthday=13, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1998, 2, 13, 9, 0), - datetime.datetime(1998, 3, 13, 9, 0), - datetime.datetime(1998, 11, 13, 9, 0), - datetime.datetime(1999, 8, 13, 9, 0)] -}}} - -Every four years, the first Tuesday after a Monday in November, -3 occurrences (U.S. Presidential Election day): -{{{ ->>> list(rrule(YEARLY, interval=4, count=3, bymonth=11, - byweekday=TU, bymonthday=(2,3,4,5,6,7,8), - dtstart=parse("19961105T090000"))) -[datetime.datetime(1996, 11, 5, 9, 0), - datetime.datetime(2000, 11, 7, 9, 0), - datetime.datetime(2004, 11, 2, 9, 0)] -}}} - -The 3rd instance into the month of one of Tuesday, Wednesday or -Thursday, for the next 3 months: -{{{ ->>> list(rrule(MONTHLY, count=3, byweekday=(TU,WE,TH), - bysetpos=3, dtstart=parse("19970904T090000"))) -[datetime.datetime(1997, 9, 4, 9, 0), - datetime.datetime(1997, 10, 7, 9, 0), - datetime.datetime(1997, 11, 6, 9, 0)] -}}} - -The 2nd to last weekday of the month, 3 occurrences. -{{{ ->>> list(rrule(MONTHLY, count=3, byweekday=(MO,TU,WE,TH,FR), - bysetpos=-2, dtstart=parse("19970929T090000"))) -[datetime.datetime(1997, 9, 29, 9, 0), - datetime.datetime(1997, 10, 30, 9, 0), - datetime.datetime(1997, 11, 27, 9, 0)] -}}} - -Every 3 hours from 9:00 AM to 5:00 PM on a specific day. -{{{ ->>> list(rrule(HOURLY, interval=3, - dtstart=parse("19970902T090000"), - until=parse("19970902T170000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 2, 12, 0), - datetime.datetime(1997, 9, 2, 15, 0)] -}}} - -Every 15 minutes for 6 occurrences. -{{{ ->>> list(rrule(MINUTELY, interval=15, count=6, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 2, 9, 15), - datetime.datetime(1997, 9, 2, 9, 30), - datetime.datetime(1997, 9, 2, 9, 45), - datetime.datetime(1997, 9, 2, 10, 0), - datetime.datetime(1997, 9, 2, 10, 15)] -}}} - -Every hour and a half for 4 occurrences. -{{{ ->>> list(rrule(MINUTELY, interval=90, count=4, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 2, 10, 30), - datetime.datetime(1997, 9, 2, 12, 0), - datetime.datetime(1997, 9, 2, 13, 30)] -}}} - -Every 20 minutes from 9:00 AM to 4:40 PM for two days. -{{{ ->>> list(rrule(MINUTELY, interval=20, count=48, - byhour=range(9,17), byminute=(0,20,40), - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 2, 9, 20), - (...) - datetime.datetime(1997, 9, 2, 16, 20), - datetime.datetime(1997, 9, 2, 16, 40), - datetime.datetime(1997, 9, 3, 9, 0), - datetime.datetime(1997, 9, 3, 9, 20), - (...) - datetime.datetime(1997, 9, 3, 16, 20), - datetime.datetime(1997, 9, 3, 16, 40)] -}}} - -An example where the days generated makes a difference because of {{{wkst}}}. -{{{ ->>> list(rrule(WEEKLY, interval=2, count=4, - byweekday=(TU,SU), wkst=MO, - dtstart=parse("19970805T090000"))) -[datetime.datetime(1997, 8, 5, 9, 0), - datetime.datetime(1997, 8, 10, 9, 0), - datetime.datetime(1997, 8, 19, 9, 0), - datetime.datetime(1997, 8, 24, 9, 0)] - ->>> list(rrule(WEEKLY, interval=2, count=4, - byweekday=(TU,SU), wkst=SU, - dtstart=parse("19970805T090000"))) -[datetime.datetime(1997, 8, 5, 9, 0), - datetime.datetime(1997, 8, 17, 9, 0), - datetime.datetime(1997, 8, 19, 9, 0), - datetime.datetime(1997, 8, 31, 9, 0)] -}}} - -==== rruleset type ==== -The {{{rruleset}}} type allows more complex recurrence setups, mixing -multiple rules, dates, exclusion rules, and exclusion dates. -The type constructor takes the following keyword arguments: - - cache:: - If True, caching of results will be enabled, improving performance - of multiple queries considerably. - -==== rruleset methods ==== -The following methods are available: - - rruleset.rrule(rrule):: - Include the given {{{rrule}}} instance in the recurrence set - generation. - - rruleset.rdate(dt):: - Include the given {{{datetime}}} instance in the recurrence - set generation. - - rruleset.exrule(rrule):: - Include the given {{{rrule}}} instance in the recurrence set - exclusion list. Dates which are part of the given recurrence - rules will not be generated, even if some inclusive {{{rrule}}} - or {{{rdate}}} matches them. - - rruleset.exdate(dt):: - Include the given {{{datetime}}} instance in the recurrence set - exclusion list. Dates included that way will not be generated, - even if some inclusive {{{rrule}}} or {{{rdate}}} matches them. - - rruleset.before(dt, inc=False):: - Returns the last recurrence before the given {{{datetime}}} - instance. The {{{inc}}} keyword defines what happens if - {{{dt}}} '''is''' an occurrence. With {{{inc == True}}}, - if {{{dt}}} itself is an occurrence, it will be returned. - - rruleset.after(dt, inc=False):: - Returns the first recurrence after the given {{{datetime}}} - instance. The {{{inc}}} keyword defines what happens if - {{{dt}}} '''is''' an occurrence. With {{{inc == True}}}, - if {{{dt}}} itself is an occurrence, it will be returned. - - rruleset.between(after, before, inc=False):: - Returns all the occurrences of the rrule between {{{after}}} - and {{{before}}}. The {{{inc}}} keyword defines what happens - if {{{after}}} and/or {{{before}}} are themselves occurrences. - With {{{inc == True}}}, they will be included in the list, - if they are found in the recurrence set. - - rruleset.count():: - Returns the number of recurrences in this set. It will have - go trough the whole recurrence, if this hasn't been done - before. - -Besides these methods, {{{rruleset}}} instances also support -the {{{__getitem__()}}} and {{{__contains__()}}} special methods, -meaning that these are valid expressions: -{{{ -set = rruleset(...) -if datetime(...) in set: - ... -print set[0] -print set[-1] -print set[1:2] -print set[::-2] -}}} - -The getitem/slicing mechanism is smart enough to avoid getting the whole -recurrence set, if possible. - -==== rruleset examples ==== -Daily, for 7 days, jumping Saturday and Sunday occurrences. -{{{ ->>> set = rruleset() ->>> set.rrule(rrule(DAILY, count=7, - dtstart=parse("19970902T090000"))) ->>> set.exrule(rrule(YEARLY, byweekday=(SA,SU), - dtstart=parse("19970902T090000"))) ->>> list(set) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 3, 9, 0), - datetime.datetime(1997, 9, 4, 9, 0), - datetime.datetime(1997, 9, 5, 9, 0), - datetime.datetime(1997, 9, 8, 9, 0)] -}}} - -Weekly, for 4 weeks, plus one time on day 7, and not on day 16. -{{{ ->>> set = rruleset() ->>> set.rrule(rrule(WEEKLY, count=4, - dtstart=parse("19970902T090000"))) ->>> set.rdate(datetime.datetime(1997, 9, 7, 9, 0)) ->>> set.exdate(datetime.datetime(1997, 9, 16, 9, 0)) ->>> list(set) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 7, 9, 0), - datetime.datetime(1997, 9, 9, 9, 0), - datetime.datetime(1997, 9, 23, 9, 0)] -}}} - -==== rrulestr() function ==== -The {{{rrulestr()}}} function is a parser for ''RFC-like'' syntaxes. -The function prototype is: -{{{ -rrulestr(str) -}}} - -The string passed as parameter may be a multiple line string, a -single line string, or just the {{{RRULE}}} property value. - -Additionally, it accepts the following keyword arguments: - - cache:: - If {{{True}}}, the {{{rruleset}}} or {{{rrule}}} created instance - will cache its results. Default is not to cache. - - dtstart:: - If given, it must be a {{{datetime}}} instance that will be used - when no {{{DTSTART}}} property is found in the parsed string. If - it is not given, and the property is not found, {{{datetime.now()}}} - will be used instead. - - unfold:: - If set to {{{True}}}, lines will be unfolded following the RFC - specification. It defaults to {{{False}}}, meaning that spaces - before every line will be stripped. - - forceset:: - If set to {{{True}}} a {{{rruleset}}} instance will be returned, - even if only a single rule is found. The default is to return an - {{{rrule}}} if possible, and an {{{rruleset}}} if necessary. - - compatible:: - If set to {{{True}}}, the parser will operate in RFC-compatible - mode. Right now it means that {{{unfold}}} will be turned on, - and if a {{{DTSTART}}} is found, it will be considered the first - recurrence instance, as documented in the RFC. - - ignoretz:: - If set to {{{True}}}, the date parser will ignore timezone - information available in the {{{DTSTART}}} property, or the - {{{UNTIL}}} attribute. - - tzinfos:: - If set, it will be passed to the datetime string parser to - resolve unknown timezone settings. For more information about - what could be used here, check the parser documentation. - -==== rrulestr() examples ==== - -Every 10 days, 5 occurrences. -{{{ ->>> list(rrulestr(""" -... DTSTART:19970902T090000 -... RRULE:FREQ=DAILY;INTERVAL=10;COUNT=5 -... """)) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 12, 9, 0), - datetime.datetime(1997, 9, 22, 9, 0), - datetime.datetime(1997, 10, 2, 9, 0), - datetime.datetime(1997, 10, 12, 9, 0)] -}}} - -Same thing, but passing only the {{{RRULE}}} value. -{{{ ->>> list(rrulestr("FREQ=DAILY;INTERVAL=10;COUNT=5", - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 12, 9, 0), - datetime.datetime(1997, 9, 22, 9, 0), - datetime.datetime(1997, 10, 2, 9, 0), - datetime.datetime(1997, 10, 12, 9, 0)] -}}} - -Notice that when using a single rule, it returns an -{{{rrule}}} instance, unless {{{forceset}}} was used. -{{{ ->>> rrulestr("FREQ=DAILY;INTERVAL=10;COUNT=5") - - ->>> rrulestr(""" -... DTSTART:19970902T090000 -... RRULE:FREQ=DAILY;INTERVAL=10;COUNT=5 -... """) - - ->>> rrulestr("FREQ=DAILY;INTERVAL=10;COUNT=5", forceset=True) - -}}} - -But when an {{{rruleset}}} is needed, it is automatically used. -{{{ ->>> rrulestr(""" -... DTSTART:19970902T090000 -... RRULE:FREQ=DAILY;INTERVAL=10;COUNT=5 -... RRULE:FREQ=DAILY;INTERVAL=5;COUNT=3 -... """) - -}}} - -=== parser === -This module offers a generic date/time string parser which is -able to parse most known formats to represent a date and/or -time. - -==== parse() function ==== -That's probably the only function you'll need from this module. -It offers you an interface to access the parser functionality and -extract a {{{datetime}}} type out of a string. - -The prototype of this function is: -{{{ -parse(timestr) -}}} - -Additionally, the following keyword arguments are available: - - default:: - If given, this must be a {{{datetime}}} instance. Any fields - missing in the parsed date will be copied from this instance. - The default value is the current date, at 00:00:00am. - - ignoretz:: - If this is true, even if a timezone is found in the string, - the parser will not use it. - - tzinfos:: - Using this keyword argument you may provide custom timezones - to the parser. If given, it must be either a dictionary with - the timezone abbreviation as key, or a function accepting a - timezone abbreviation and offset as argument. The dictionary - values and the function return must be a timezone offset - in seconds, a tzinfo subclass, or a string defining the - timezone (in the TZ environment variable format). - - dayfirst:: - This option allow one to change the precedence in which - days are parsed in date strings. The default is given in the - parserinfo instance (the default parserinfo has it set to - False). If {{{dayfirst}}} is False, the {{{MM-DD-YYYY}}} - format will have precedence over {{{DD-MM-YYYY}}} in an - ambiguous date. - - yearfirst:: - This option allow one to change the precedence in which - years are parsed in date strings. The default is given in - the parserinfo instance (the default parserinfo has it set - to False). If {{{yearfirst}}} is false, the {{{MM-DD-YY}}} - format will have precedence over {{{YY-MM-DD}}} in an - ambiguous date. - - fuzzy:: - If {{{fuzzy}}} is set to True, unknown tokens in the string - will be ignored. - - parserinfo:: - This parameter allows one to change how the string is parsed, - by using a different parserinfo class instance. Using it you - may, for example, intenationalize the parser strings, or make - it ignore additional words. - -==== Format precedence ==== -Whenever an ambiguous date is found, the {{{dayfirst}}} and -{{{yearfirst}}} parameters will control how the information -is processed. Here is the precedence in each case: - -If {{{dayfirst}}} is {{{False}}} and {{{yearfirst}}} is {{{False}}}, -(default, if no parameter is given): - - * {{{MM-DD-YY}}} - * {{{DD-MM-YY}}} - * {{{YY-MM-DD}}} - -If {{{dayfirst}}} is {{{True}}} and {{{yearfirst}}} is {{{False}}}: - - * {{{DD-MM-YY}}} - * {{{MM-DD-YY}}} - * {{{YY-MM-DD}}} - -If {{{dayfirst}}} is {{{False}}} and {{{yearfirst}}} is {{{True}}}: - - * {{{YY-MM-DD}}} - * {{{MM-DD-YY}}} - * {{{DD-MM-YY}}} - -If {{{dayfirst}}} is {{{True}}} and {{{yearfirst}}} is {{{True}}}: - - * {{{YY-MM-DD}}} - * {{{DD-MM-YY}}} - * {{{MM-DD-YY}}} - -==== Converting two digit years ==== -When a two digit year is found, it is processed considering -the current year, so that the computed year is never more -than 49 years after the current year, nor 50 years before the -current year. In other words, if we are in year 2003, and the -year 30 is found, it will be considered as 2030, but if the -year 60 is found, it will be considered 1960. - -==== Examples ==== -The following code will prepare the environment: -{{{ ->>> from dateutil.parser import * ->>> from dateutil.tz import * ->>> from datetime import * ->>> TZOFFSETS = {"BRST": -10800} ->>> BRSTTZ = tzoffset(-10800, "BRST") ->>> DEFAULT = datetime(2003, 9, 25) -}}} - -Some simple examples based on the {{{date}}} command, using the -{{{TZOFFSET}}} dictionary to provide the BRST timezone offset. -{{{ ->>> parse("Thu Sep 25 10:36:28 BRST 2003", tzinfos=TZOFFSETS) -datetime.datetime(2003, 9, 25, 10, 36, 28, - tzinfo=tzoffset('BRST', -10800)) - ->>> parse("2003 10:36:28 BRST 25 Sep Thu", tzinfos=TZOFFSETS) -datetime.datetime(2003, 9, 25, 10, 36, 28, - tzinfo=tzoffset('BRST', -10800)) -}}} - -Notice that since BRST is my local timezone, parsing it without -further timezone settings will yield a {{{tzlocal}}} timezone. -{{{ ->>> parse("Thu Sep 25 10:36:28 BRST 2003") -datetime.datetime(2003, 9, 25, 10, 36, 28, tzinfo=tzlocal()) -}}} - -We can also ask to ignore the timezone explicitly: -{{{ ->>> parse("Thu Sep 25 10:36:28 BRST 2003", ignoretz=True) -datetime.datetime(2003, 9, 25, 10, 36, 28) -}}} - -That's the same as processing a string without timezone: -{{{ ->>> parse("Thu Sep 25 10:36:28 2003") -datetime.datetime(2003, 9, 25, 10, 36, 28) -}}} - -Without the year, but passing our {{{DEFAULT}}} datetime to return -the same year, no mattering what year we currently are in: -{{{ ->>> parse("Thu Sep 25 10:36:28", default=DEFAULT) -datetime.datetime(2003, 9, 25, 10, 36, 28) -}}} - -Strip it further: -{{{ ->>> parse("Thu Sep 10:36:28", default=DEFAULT) -datetime.datetime(2003, 9, 25, 10, 36, 28) - ->>> parse("Thu 10:36:28", default=DEFAULT) -datetime.datetime(2003, 9, 25, 10, 36, 28) - ->>> parse("Thu 10:36", default=DEFAULT) -datetime.datetime(2003, 9, 25, 10, 36) - ->>> parse("10:36", default=DEFAULT) -datetime.datetime(2003, 9, 25, 10, 36) ->>> -}}} - -Strip in a different way: -{{{ ->>> parse("Thu Sep 25 2003") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("Sep 25 2003") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("Sep 2003", default=DEFAULT) -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("Sep", default=DEFAULT) -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("2003", default=DEFAULT) -datetime.datetime(2003, 9, 25, 0, 0) -}}} - -Another format, based on {{{date -R}}} (RFC822): -{{{ ->>> parse("Thu, 25 Sep 2003 10:49:41 -0300") -datetime.datetime(2003, 9, 25, 10, 49, 41, - tzinfo=tzoffset(None, -10800)) -}}} - -ISO format: -{{{ ->>> parse("2003-09-25T10:49:41.5-03:00") -datetime.datetime(2003, 9, 25, 10, 49, 41, 500000, - tzinfo=tzoffset(None, -10800)) -}}} - -Some variations: -{{{ ->>> parse("2003-09-25T10:49:41") -datetime.datetime(2003, 9, 25, 10, 49, 41) - ->>> parse("2003-09-25T10:49") -datetime.datetime(2003, 9, 25, 10, 49) - ->>> parse("2003-09-25T10") -datetime.datetime(2003, 9, 25, 10, 0) - ->>> parse("2003-09-25") -datetime.datetime(2003, 9, 25, 0, 0) -}}} - -ISO format, without separators: -{{{ ->>> parse("20030925T104941.5-0300") -datetime.datetime(2003, 9, 25, 10, 49, 41, 500000, - tzinfo=tzinfo=tzoffset(None, -10800)) - ->>> parse("20030925T104941-0300") -datetime.datetime(2003, 9, 25, 10, 49, 41, - tzinfo=tzoffset(None, -10800)) - ->>> parse("20030925T104941") -datetime.datetime(2003, 9, 25, 10, 49, 41) - ->>> parse("20030925T1049") -datetime.datetime(2003, 9, 25, 10, 49) - ->>> parse("20030925T10") -datetime.datetime(2003, 9, 25, 10, 0) - ->>> parse("20030925") -datetime.datetime(2003, 9, 25, 0, 0) -}}} - -Everything together. -{{{ ->>> parse("199709020900") -datetime.datetime(1997, 9, 2, 9, 0) ->>> parse("19970902090059") -datetime.datetime(1997, 9, 2, 9, 0, 59) -}}} - -Different date orderings: -{{{ ->>> parse("2003-09-25") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("2003-Sep-25") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("25-Sep-2003") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("Sep-25-2003") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("09-25-2003") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("25-09-2003") -datetime.datetime(2003, 9, 25, 0, 0) -}}} - -Check some ambiguous dates: -{{{ ->>> parse("10-09-2003") -datetime.datetime(2003, 10, 9, 0, 0) - ->>> parse("10-09-2003", dayfirst=True) -datetime.datetime(2003, 9, 10, 0, 0) - ->>> parse("10-09-03") -datetime.datetime(2003, 10, 9, 0, 0) - ->>> parse("10-09-03", yearfirst=True) -datetime.datetime(2010, 9, 3, 0, 0) -}}} - -Other date separators are allowed: -{{{ ->>> parse("2003.Sep.25") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("2003/09/25") -datetime.datetime(2003, 9, 25, 0, 0) -}}} - -Even with spaces: -{{{ ->>> parse("2003 Sep 25") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("2003 09 25") -datetime.datetime(2003, 9, 25, 0, 0) -}}} - -Hours with letters work: -{{{ ->>> parse("10h36m28.5s", default=DEFAULT) -datetime.datetime(2003, 9, 25, 10, 36, 28, 500000) - ->>> parse("01s02h03m", default=DEFAULT) -datetime.datetime(2003, 9, 25, 2, 3, 1) - ->>> parse("01h02m03", default=DEFAULT) -datetime.datetime(2003, 9, 3, 1, 2) - ->>> parse("01h02", default=DEFAULT) -datetime.datetime(2003, 9, 2, 1, 0) - ->>> parse("01h02s", default=DEFAULT) -datetime.datetime(2003, 9, 25, 1, 0, 2) -}}} - -With AM/PM: -{{{ ->>> parse("10h am", default=DEFAULT) -datetime.datetime(2003, 9, 25, 10, 0) - ->>> parse("10pm", default=DEFAULT) -datetime.datetime(2003, 9, 25, 22, 0) - ->>> parse("12:00am", default=DEFAULT) -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("12pm", default=DEFAULT) -datetime.datetime(2003, 9, 25, 12, 0) -}}} - -Some special treating for ''pertain'' relations: -{{{ ->>> parse("Sep 03", default=DEFAULT) -datetime.datetime(2003, 9, 3, 0, 0) - ->>> parse("Sep of 03", default=DEFAULT) -datetime.datetime(2003, 9, 25, 0, 0) -}}} - -Fuzzy parsing: -{{{ ->>> s = "Today is 25 of September of 2003, exactly " \ -... "at 10:49:41 with timezone -03:00." ->>> parse(s, fuzzy=True) -datetime.datetime(2003, 9, 25, 10, 49, 41, - tzinfo=tzoffset(None, -10800)) -}}} - -Other random formats: -{{{ ->>> parse("Wed, July 10, '96") -datetime.datetime(1996, 7, 10, 0, 0) - ->>> parse("1996.07.10 AD at 15:08:56 PDT", ignoretz=True) -datetime.datetime(1996, 7, 10, 15, 8, 56) - ->>> parse("Tuesday, April 12, 1952 AD 3:30:42pm PST", ignoretz=True) -datetime.datetime(1952, 4, 12, 15, 30, 42) - ->>> parse("November 5, 1994, 8:15:30 am EST", ignoretz=True) -datetime.datetime(1994, 11, 5, 8, 15, 30) - ->>> parse("3rd of May 2001") -datetime.datetime(2001, 5, 3, 0, 0) - ->>> parse("5:50 A.M. on June 13, 1990") -datetime.datetime(1990, 6, 13, 5, 50) -}}} - -=== easter === -This module offers a generic easter computing method for -any given year, using Western, Orthodox or Julian algorithms. - -==== easter() function ==== -This method was ported from the work done by -[http://users.chariot.net.au/~gmarts/eastalg.htm GM Arts], -on top of the algorithm by -[http://www.tondering.dk/claus/calendar.html Claus Tondering], -which was based in part on the algorithm of Ouding (1940), -as quoted in "Explanatory Supplement to the Astronomical -Almanac", P. Kenneth Seidelmann, editor. - -This algorithm implements three different easter -calculation methods: - - 1. Original calculation in Julian calendar, valid in - dates after 326 AD - 1. Original method, with date converted to Gregorian - calendar, valid in years 1583 to 4099 - 1. Revised method, in Gregorian calendar, valid in - years 1583 to 4099 as well - -These methods are represented by the constants: -{{{ -EASTER_JULIAN = 1 -EASTER_ORTHODOX = 2 -EASTER_WESTERN = 3 -}}} - -The default method is method 3. - -=== tz === -This module offers timezone implementations subclassing -the abstract {{{datetime.tzinfo}}} type. There are -classes to handle [http://www.twinsun.com/tz/tz-link.htm tzfile] -format files (usually are in /etc/localtime, -/usr/share/zoneinfo, etc), TZ environment string (in all -known formats), given ranges (with help from relative -deltas), local machine timezone, fixed offset timezone, -and UTC timezone. - -==== tzutc type ==== -This type implements a basic UTC timezone. The constructor of this -type accepts no parameters. - -==== tzutc examples ==== -{{{ ->>> from datetime import * ->>> from dateutil.tz import * - ->>> datetime.now() -datetime.datetime(2003, 9, 27, 9, 40, 1, 521290) - ->>> datetime.now(tzutc()) -datetime.datetime(2003, 9, 27, 12, 40, 12, 156379, tzinfo=tzutc()) - ->>> datetime.now(tzutc()).tzname() -'UTC' -}}} - -==== tzoffset type ==== -This type implements a fixed offset timezone, with no -support to daylight saving times. Here is the prototype of the -type constructor: -{{{ -tzoffset(name, offset) -}}} - -The {{{name}}} parameter may be optionally set to {{{None}}}, and -{{{offset}}} must be given in seconds. - -==== tzoffset examples ==== -{{{ ->>> from datetime import * ->>> from dateutil.tz import * - ->>> datetime.now(tzoffset("BRST", -10800)) -datetime.datetime(2003, 9, 27, 9, 52, 43, 624904, - tzinfo=tzinfo=tzoffset('BRST', -10800)) - ->>> datetime.now(tzoffset("BRST", -10800)).tzname() -'BRST' - ->>> datetime.now(tzoffset("BRST", -10800)).astimezone(tzutc()) -datetime.datetime(2003, 9, 27, 12, 53, 11, 446419, - tzinfo=tzutc()) -}}} - -==== tzlocal type ==== -This type implements timezone settings as known by the -operating system. The constructor of this type accepts no -parameters. - -==== tzlocal examples ==== -{{{ ->>> from datetime import * ->>> from dateutil.tz import * - ->>> datetime.now(tzlocal()) -datetime.datetime(2003, 9, 27, 10, 1, 43, 673605, - tzinfo=tzlocal()) - ->>> datetime.now(tzlocal()).tzname() -'BRST' - ->>> datetime.now(tzlocal()).astimezone(tzoffset(None, 0)) -datetime.datetime(2003, 9, 27, 13, 3, 0, 11493, - tzinfo=tzoffset(None, 0)) -}}} - -==== tzstr type ==== -This type implements timezone settings extracted from a -string in known TZ environment variable formats. Here is the prototype -of the constructor: -{{{ -tzstr(str) -}}} - -==== tzstr examples ==== -Here are examples of the recognized formats: - - * {{{EST5EDT}}} - * {{{EST5EDT,4,0,6,7200,10,0,26,7200,3600}}} - * {{{EST5EDT,4,1,0,7200,10,-1,0,7200,3600}}} - * {{{EST5EDT4,M4.1.0/02:00:00,M10-5-0/02:00}}} - * {{{EST5EDT4,95/02:00:00,298/02:00}}} - * {{{EST5EDT4,J96/02:00:00,J299/02:00}}} - -Notice that if daylight information is not present, but a -daylight abbreviation was provided, {{{tzstr}}} will follow the -convention of using the first sunday of April to start daylight -saving, and the last sunday of October to end it. If start or -end time is not present, 2AM will be used, and if the daylight -offset is not present, the standard offset plus one hour will -be used. This convention is the same as used in the GNU libc. - -This also means that some of the above examples are exactly -equivalent, and all of these examples are equivalent -in the year of 2003. - -Here is the example mentioned in the -[http://www.python.org/doc/current/lib/module-time.html time module documentation]. -{{{ ->>> os.environ['TZ'] = 'EST+05EDT,M4.1.0,M10.5.0' ->>> time.tzset() ->>> time.strftime('%X %x %Z') -'02:07:36 05/08/03 EDT' ->>> os.environ['TZ'] = 'AEST-10AEDT-11,M10.5.0,M3.5.0' ->>> time.tzset() ->>> time.strftime('%X %x %Z') -'16:08:12 05/08/03 AEST' -}}} - -And here is an example showing the same information using {{{tzstr}}}, -without touching system settings. -{{{ ->>> tz1 = tzstr('EST+05EDT,M4.1.0,M10.5.0') ->>> tz2 = tzstr('AEST-10AEDT-11,M10.5.0,M3.5.0') ->>> dt = datetime(2003, 5, 8, 2, 7, 36, tzinfo=tz1) ->>> dt.strftime('%X %x %Z') -'02:07:36 05/08/03 EDT' ->>> dt.astimezone(tz2).strftime('%X %x %Z') -'16:07:36 05/08/03 AEST' -}}} - -Are these really equivalent? -{{{ ->>> tzstr('EST5EDT') == tzstr('EST5EDT,4,1,0,7200,10,-1,0,7200,3600') -True -}}} - -Check the daylight limit. -{{{ ->>> datetime(2003, 4, 6, 1, 59, tzinfo=tz).tzname() -'EST' ->>> datetime(2003, 4, 6, 2, 00, tzinfo=tz).tzname() -'EDT' ->>> datetime(2003, 10, 26, 0, 59, tzinfo=tz).tzname() -'EDT' ->>> datetime(2003, 10, 26, 1, 00, tzinfo=tz).tzname() -'EST' -}}} - -==== tzrange type ==== -This type offers the same functionality as the {{{tzstr}}} type, but -instead of timezone strings, information is passed using -{{{relativedelta}}}s which are applied to a datetime set to the first -day of the year. Here is the prototype of this type's constructor: -{{{ -tzrange(stdabbr, stdoffset=None, dstabbr=None, dstoffset=None, - start=None, end=None): -}}} - -Offsets must be given in seconds. Information not provided will be -set to the defaults, as explained in the {{{tzstr}}} section above. - -==== tzrange examples ==== -{{{ ->>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT") -True - ->>> from dateutil.relativedelta import * ->>> range1 = tzrange("EST", -18000, "EDT") ->>> range2 = tzrange("EST", -18000, "EDT", -14400, -... relativedelta(hours=+2, month=4, day=1, - weekday=SU(+1)), -... relativedelta(hours=+1, month=10, day=31, - weekday=SU(-1))) ->>> tzstr('EST5EDT') == range1 == range2 -True -}}} - -Notice a minor detail in the last example: while the DST should end -at 2AM, the delta will catch 1AM. That's because the daylight saving -time should end at 2AM standard time (the difference between STD and -DST is 1h in the given example) instead of the DST time. That's how -the {{{tzinfo}}} subtypes should deal with the extra hour that happens -when going back to the standard time. Check -[http://www.python.org/doc/current/lib/datetime-tzinfo.html tzinfo documentation] -for more information. - -==== tzfile type ==== -This type allows one to use tzfile(5) format timezone files to extract -current and historical zone information. Here is the type constructor -prototype: -{{{ -tzfile(fileobj) -}}} - -Where {{{fileobj}}} is either a filename or a file-like object with -a {{{read()}}} method. - -==== tzfile examples ==== -{{{ ->>> tz = tzfile("/etc/localtime") ->>> datetime.now(tz) -datetime.datetime(2003, 9, 27, 12, 3, 48, 392138, - tzinfo=tzfile('/etc/localtime')) - ->>> datetime.now(tz).astimezone(tzutc()) -datetime.datetime(2003, 9, 27, 15, 3, 53, 70863, - tzinfo=tzutc()) - ->>> datetime.now(tz).tzname() -'BRST' ->>> datetime(2003, 1, 1, tzinfo=tz).tzname() -'BRDT' -}}} - -Check the daylight limit. -{{{ ->>> tz = tzfile('/usr/share/zoneinfo/EST5EDT') ->>> datetime(2003, 4, 6, 1, 59, tzinfo=tz).tzname() -'EST' ->>> datetime(2003, 4, 6, 2, 00, tzinfo=tz).tzname() -'EDT' ->>> datetime(2003, 10, 26, 0, 59, tzinfo=tz).tzname() -'EDT' ->>> datetime(2003, 10, 26, 1, 00, tzinfo=tz).tzname() -'EST' -}}} - -==== tzical type ==== -This type is able to parse -[ftp://ftp.rfc-editor.org/in-notes/rfc2445.txt iCalendar] -style {{{VTIMEZONE}}} sessions into a Python timezone object. -The constuctor prototype is: -{{{ -tzical(fileobj) -}}} - -Where {{{fileobj}}} is either a filename or a file-like object with -a {{{read()}}} method. - -==== tzical methods ==== - - tzical.get(tzid=None):: - Since a single iCalendar file may contain more than one timezone, - you must ask for the timezone you want with this method. If there's - more than one timezone in the parsed file, you'll need to pass the - {{{tzid}}} parameter. Otherwise, leaving it empty will yield the only - available timezone. - -==== tzical examples ==== -Here is a sample file extracted from the RFC. This file defines -the {{{EST5EDT}}} timezone, and will be used in the following example. -{{{ -BEGIN:VTIMEZONE -TZID:US-Eastern -LAST-MODIFIED:19870101T000000Z -TZURL:http://zones.stds_r_us.net/tz/US-Eastern -BEGIN:STANDARD -DTSTART:19671029T020000 -RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10 -TZOFFSETFROM:-0400 -TZOFFSETTO:-0500 -TZNAME:EST -END:STANDARD -BEGIN:DAYLIGHT -DTSTART:19870405T020000 -RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4 -TZOFFSETFROM:-0500 -TZOFFSETTO:-0400 -TZNAME:EDT -END:DAYLIGHT -END:VTIMEZONE -}}} - -And here is an example exploring a {{{tzical}}} type: -{{{ ->>> from dateutil.tz import *; from datetime import * - ->>> tz = tzical('EST5EDT.ics') ->>> tz.keys() -['US-Eastern'] - ->>> est = tz.get('US-Eastern') ->>> est - - ->>> datetime.now(est) -datetime.datetime(2003, 10, 6, 19, 44, 18, 667987, - tzinfo=) - ->>> est == tz.get() -True -}}} - -Let's check the daylight ranges, as usual: -{{{ ->>> datetime(2003, 4, 6, 1, 59, tzinfo=est).tzname() -'EST' ->>> datetime(2003, 4, 6, 2, 00, tzinfo=est).tzname() -'EDT' - ->>> datetime(2003, 10, 26, 0, 59, tzinfo=est).tzname() -'EDT' ->>> datetime(2003, 10, 26, 1, 00, tzinfo=est).tzname() -'EST' -}}} - -==== tzwin type ==== -This type offers access to internal registry-based Windows timezones. -The constuctor prototype is: -{{{ -tzwin(name) -}}} - -Where {{{name}}} is the timezone name. There's a static {{{tzwin.list()}}} -method to check the available names, - -==== tzwin methods ==== - - tzwin.display():: - This method returns the timezone extended name. - - tzwin.list():: - This static method lists all available timezone names. - -==== tzwin examples ==== -{{{ ->>> tz = tzwin("E. South America Standard Time") -}}} - -==== tzwinlocal type ==== -This type offers access to internal registry-based Windows timezones. -The constructor accepts no parameters, so the prototype is: -{{{ -tzwinlocal() -}}} - -==== tzwinlocal methods ==== - - tzwinlocal.display():: - This method returns the timezone extended name, and returns - {{{None}}} if one is not available. - -==== tzwinlocal examples ==== -{{{ ->>> tz = tzwinlocal() -}}} - -==== gettz() function ==== -This function is a helper that will try its best to get the right -timezone for your environment, or for the given string. The prototype -is as follows: -{{{ -gettz(name=None) -}}} - -If given, the parameter may be a filename, a path relative to the base -of the timezone information path (the base could be -{{{/usr/share/zoneinfo}}}, for example), a string timezone -specification, or a timezone abbreviation. If {{{name}}} is not given, -and the {{{TZ}}} environment variable is set, it's used instead. If the -parameter is not given, and {{{TZ}}} is not set, the default tzfile -paths will be tried. Then, if no timezone information is found, -an internal compiled database of timezones is used. When running -on Windows, the internal registry-based Windows timezones are also -considered. - -Example: -{{{ ->>> from dateutil.tz import * ->>> gettz() -tzfile('/etc/localtime') - ->>> gettz("America/Sao Paulo") -tzfile('/usr/share/zoneinfo/America/Sao_Paulo') - ->>> gettz("EST5EDT") -tzfile('/usr/share/zoneinfo/EST5EDT') - ->>> gettz("EST5") -tzstr('EST5') - ->>> gettz('BRST') -tzlocal() - ->>> os.environ["TZ"] = "America/Sao Paulo" ->>> gettz() -tzfile('/usr/share/zoneinfo/America/Sao_Paulo') - ->>> os.environ["TZ"] = "BRST" ->>> gettz() -tzlocal() - ->>> gettz("Unavailable") ->>> -}}} - -=== zoneinfo === -This module provides direct access to the internal compiled -database of timezones. The timezone data and the compiling tools -are obtained from the following project: - - http://www.twinsun.com/tz/tz-link.htm - -==== gettz() function ==== -This function will try to retrieve the given timezone information -from the internal compiled database, and will cache its results. - -Example: -{{{ ->>> from dateutil import zoneinfo ->>> zoneinfo.gettz("Brazil/East") -tzfile('Brazil/East') -}}} - -## vim:ft=moin diff --git a/lib/dateutil_py2/__init__.py b/lib/dateutil_py2/__init__.py deleted file mode 100644 index 44895a0d388e..000000000000 --- a/lib/dateutil_py2/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -""" -Copyright (c) 2003-2010 Gustavo Niemeyer - -This module offers extensions to the standard python 2.3+ -datetime module. -""" -__author__ = "Gustavo Niemeyer " -__license__ = "PSF License" -__version__ = "1.5-mpl" diff --git a/lib/dateutil_py2/easter.py b/lib/dateutil_py2/easter.py deleted file mode 100644 index d7944104beb1..000000000000 --- a/lib/dateutil_py2/easter.py +++ /dev/null @@ -1,92 +0,0 @@ -""" -Copyright (c) 2003-2007 Gustavo Niemeyer - -This module offers extensions to the standard python 2.3+ -datetime module. -""" -__author__ = "Gustavo Niemeyer " -__license__ = "PSF License" - -import datetime - -__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] - -EASTER_JULIAN = 1 -EASTER_ORTHODOX = 2 -EASTER_WESTERN = 3 - -def easter(year, method=EASTER_WESTERN): - """ - This method was ported from the work done by GM Arts, - on top of the algorithm by Claus Tondering, which was - based in part on the algorithm of Ouding (1940), as - quoted in "Explanatory Supplement to the Astronomical - Almanac", P. Kenneth Seidelmann, editor. - - This algorithm implements three different easter - calculation methods: - - 1 - Original calculation in Julian calendar, valid in - dates after 326 AD - 2 - Original method, with date converted to Gregorian - calendar, valid in years 1583 to 4099 - 3 - Revised method, in Gregorian calendar, valid in - years 1583 to 4099 as well - - These methods are represented by the constants: - - EASTER_JULIAN = 1 - EASTER_ORTHODOX = 2 - EASTER_WESTERN = 3 - - The default method is method 3. - - More about the algorithm may be found at: - - http://users.chariot.net.au/~gmarts/eastalg.htm - - and - - http://www.tondering.dk/claus/calendar.html - - """ - - if not (1 <= method <= 3): - raise ValueError, "invalid method" - - # g - Golden year - 1 - # c - Century - # h - (23 - Epact) mod 30 - # i - Number of days from March 21 to Paschal Full Moon - # j - Weekday for PFM (0=Sunday, etc) - # p - Number of days from March 21 to Sunday on or before PFM - # (-6 to 28 methods 1 & 3, to 56 for method 2) - # e - Extra days to add for method 2 (converting Julian - # date to Gregorian date) - - y = year - g = y % 19 - e = 0 - if method < 3: - # Old method - i = (19*g+15)%30 - j = (y+y//4+i)%7 - if method == 2: - # Extra dates to convert Julian to Gregorian date - e = 10 - if y > 1600: - e = e+y//100-16-(y//100-16)//4 - else: - # New method - c = y//100 - h = (c-c//4-(8*c+13)//25+19*g+15)%30 - i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11)) - j = (y+y//4+i+2-c+c//4)%7 - - # p can be from -6 to 56 corresponding to dates 22 March to 23 May - # (later dates apply to method 2, although 23 May never actually occurs) - p = i-j+e - d = 1+(p+27+(p+6)//40)%31 - m = 3+(p+26)//30 - return datetime.date(int(y),int(m),int(d)) - diff --git a/lib/dateutil_py2/parser.py b/lib/dateutil_py2/parser.py deleted file mode 100644 index 5d824e411f32..000000000000 --- a/lib/dateutil_py2/parser.py +++ /dev/null @@ -1,886 +0,0 @@ -# -*- coding:iso-8859-1 -*- -""" -Copyright (c) 2003-2007 Gustavo Niemeyer - -This module offers extensions to the standard python 2.3+ -datetime module. -""" -__author__ = "Gustavo Niemeyer " -__license__ = "PSF License" - -import datetime -import string -import time -import sys -import os - -try: - from cStringIO import StringIO -except ImportError: - from StringIO import StringIO - -import relativedelta -import tz - - -__all__ = ["parse", "parserinfo"] - - -# Some pointers: -# -# http://www.cl.cam.ac.uk/~mgk25/iso-time.html -# http://www.iso.ch/iso/en/prods-services/popstds/datesandtime.html -# http://www.w3.org/TR/NOTE-datetime -# http://ringmaster.arc.nasa.gov/tools/time_formats.html -# http://search.cpan.org/author/MUIR/Time-modules-2003.0211/lib/Time/ParseDate.pm -# http://stein.cshl.org/jade/distrib/docs/java.text.SimpleDateFormat.html - - -class _timelex(object): - - def __init__(self, instream): - if isinstance(instream, basestring): - instream = StringIO(instream) - self.instream = instream - self.wordchars = ('abcdfeghijklmnopqrstuvwxyz' - 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_' - 'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ' - 'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ') - self.numchars = '0123456789' - self.whitespace = ' \t\r\n' - self.charstack = [] - self.tokenstack = [] - self.eof = False - - def get_token(self): - if self.tokenstack: - return self.tokenstack.pop(0) - seenletters = False - token = None - state = None - wordchars = self.wordchars - numchars = self.numchars - whitespace = self.whitespace - while not self.eof: - if self.charstack: - nextchar = self.charstack.pop(0) - else: - nextchar = self.instream.read(1) - while nextchar == '\x00': - nextchar = self.instream.read(1) - if not nextchar: - self.eof = True - break - elif not state: - token = nextchar - if nextchar in wordchars: - state = 'a' - elif nextchar in numchars: - state = '0' - elif nextchar in whitespace: - token = ' ' - break # emit token - else: - break # emit token - elif state == 'a': - seenletters = True - if nextchar in wordchars: - token += nextchar - elif nextchar == '.': - token += nextchar - state = 'a.' - else: - self.charstack.append(nextchar) - break # emit token - elif state == '0': - if nextchar in numchars: - token += nextchar - elif nextchar == '.': - token += nextchar - state = '0.' - else: - self.charstack.append(nextchar) - break # emit token - elif state == 'a.': - seenletters = True - if nextchar == '.' or nextchar in wordchars: - token += nextchar - elif nextchar in numchars and token[-1] == '.': - token += nextchar - state = '0.' - else: - self.charstack.append(nextchar) - break # emit token - elif state == '0.': - if nextchar == '.' or nextchar in numchars: - token += nextchar - elif nextchar in wordchars and token[-1] == '.': - token += nextchar - state = 'a.' - else: - self.charstack.append(nextchar) - break # emit token - if (state in ('a.', '0.') and - (seenletters or token.count('.') > 1 or token[-1] == '.')): - l = token.split('.') - token = l[0] - for tok in l[1:]: - self.tokenstack.append('.') - if tok: - self.tokenstack.append(tok) - return token - - def __iter__(self): - return self - - def next(self): - token = self.get_token() - if token is None: - raise StopIteration - return token - - def split(cls, s): - return list(cls(s)) - split = classmethod(split) - - -class _resultbase(object): - - def __init__(self): - for attr in self.__slots__: - setattr(self, attr, None) - - def _repr(self, classname): - l = [] - for attr in self.__slots__: - value = getattr(self, attr) - if value is not None: - l.append("%s=%s" % (attr, `value`)) - return "%s(%s)" % (classname, ", ".join(l)) - - def __repr__(self): - return self._repr(self.__class__.__name__) - - -class parserinfo(object): - - # m from a.m/p.m, t from ISO T separator - JUMP = [" ", ".", ",", ";", "-", "/", "'", - "at", "on", "and", "ad", "m", "t", "of", - "st", "nd", "rd", "th"] - - WEEKDAYS = [("Mon", "Monday"), - ("Tue", "Tuesday"), - ("Wed", "Wednesday"), - ("Thu", "Thursday"), - ("Fri", "Friday"), - ("Sat", "Saturday"), - ("Sun", "Sunday")] - MONTHS = [("Jan", "January"), - ("Feb", "February"), - ("Mar", "March"), - ("Apr", "April"), - ("May", "May"), - ("Jun", "June"), - ("Jul", "July"), - ("Aug", "August"), - ("Sep", "September"), - ("Oct", "October"), - ("Nov", "November"), - ("Dec", "December")] - HMS = [("h", "hour", "hours"), - ("m", "minute", "minutes"), - ("s", "second", "seconds")] - AMPM = [("am", "a"), - ("pm", "p")] - UTCZONE = ["UTC", "GMT", "Z"] - PERTAIN = ["of"] - TZOFFSET = {} - - def __init__(self, dayfirst=False, yearfirst=False): - self._jump = self._convert(self.JUMP) - self._weekdays = self._convert(self.WEEKDAYS) - self._months = self._convert(self.MONTHS) - self._hms = self._convert(self.HMS) - self._ampm = self._convert(self.AMPM) - self._utczone = self._convert(self.UTCZONE) - self._pertain = self._convert(self.PERTAIN) - - self.dayfirst = dayfirst - self.yearfirst = yearfirst - - self._year = time.localtime().tm_year - self._century = self._year//100*100 - - def _convert(self, lst): - dct = {} - for i in range(len(lst)): - v = lst[i] - if isinstance(v, tuple): - for v in v: - dct[v.lower()] = i - else: - dct[v.lower()] = i - return dct - - def jump(self, name): - return name.lower() in self._jump - - def weekday(self, name): - if len(name) >= 3: - try: - return self._weekdays[name.lower()] - except KeyError: - pass - return None - - def month(self, name): - if len(name) >= 3: - try: - return self._months[name.lower()]+1 - except KeyError: - pass - return None - - def hms(self, name): - try: - return self._hms[name.lower()] - except KeyError: - return None - - def ampm(self, name): - try: - return self._ampm[name.lower()] - except KeyError: - return None - - def pertain(self, name): - return name.lower() in self._pertain - - def utczone(self, name): - return name.lower() in self._utczone - - def tzoffset(self, name): - if name in self._utczone: - return 0 - return self.TZOFFSET.get(name) - - def convertyear(self, year): - if year < 100: - year += self._century - if abs(year-self._year) >= 50: - if year < self._year: - year += 100 - else: - year -= 100 - return year - - def validate(self, res): - # move to info - if res.year is not None: - res.year = self.convertyear(res.year) - if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z': - res.tzname = "UTC" - res.tzoffset = 0 - elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname): - res.tzoffset = 0 - return True - - -class parser(object): - - def __init__(self, info=None): - self.info = info or parserinfo() - - def parse(self, timestr, default=None, - ignoretz=False, tzinfos=None, - **kwargs): - if not default: - default = datetime.datetime.now().replace(hour=0, minute=0, - second=0, microsecond=0) - res = self._parse(timestr, **kwargs) - if res is None: - raise ValueError, "unknown string format" - repl = {} - for attr in ["year", "month", "day", "hour", - "minute", "second", "microsecond"]: - value = getattr(res, attr) - if value is not None: - repl[attr] = value - ret = default.replace(**repl) - if res.weekday is not None and not res.day: - ret = ret+relativedelta.relativedelta(weekday=res.weekday) - if not ignoretz: - if callable(tzinfos) or tzinfos and res.tzname in tzinfos: - if callable(tzinfos): - tzdata = tzinfos(res.tzname, res.tzoffset) - else: - tzdata = tzinfos.get(res.tzname) - if isinstance(tzdata, datetime.tzinfo): - tzinfo = tzdata - elif isinstance(tzdata, basestring): - tzinfo = tz.tzstr(tzdata) - elif isinstance(tzdata, int): - tzinfo = tz.tzoffset(res.tzname, tzdata) - else: - raise ValueError, "offset must be tzinfo subclass, " \ - "tz string, or int offset" - ret = ret.replace(tzinfo=tzinfo) - elif res.tzname and res.tzname in time.tzname: - ret = ret.replace(tzinfo=tz.tzlocal()) - elif res.tzoffset == 0: - ret = ret.replace(tzinfo=tz.tzutc()) - elif res.tzoffset: - ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) - return ret - - class _result(_resultbase): - __slots__ = ["year", "month", "day", "weekday", - "hour", "minute", "second", "microsecond", - "tzname", "tzoffset"] - - def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False): - info = self.info - if dayfirst is None: - dayfirst = info.dayfirst - if yearfirst is None: - yearfirst = info.yearfirst - res = self._result() - l = _timelex.split(timestr) - try: - - # year/month/day list - ymd = [] - - # Index of the month string in ymd - mstridx = -1 - - len_l = len(l) - i = 0 - while i < len_l: - - # Check if it's a number - try: - value_repr = l[i] - value = float(value_repr) - except ValueError: - value = None - - if value is not None: - # Token is a number - len_li = len(l[i]) - i += 1 - if (len(ymd) == 3 and len_li in (2, 4) - and (i >= len_l or (l[i] != ':' and - info.hms(l[i]) is None))): - # 19990101T23[59] - s = l[i-1] - res.hour = int(s[:2]) - if len_li == 4: - res.minute = int(s[2:]) - elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6): - # YYMMDD or HHMMSS[.ss] - s = l[i-1] - if not ymd and l[i-1].find('.') == -1: - ymd.append(info.convertyear(int(s[:2]))) - ymd.append(int(s[2:4])) - ymd.append(int(s[4:])) - else: - # 19990101T235959[.59] - res.hour = int(s[:2]) - res.minute = int(s[2:4]) - res.second, res.microsecond = _parsems(s[4:]) - elif len_li == 8: - # YYYYMMDD - s = l[i-1] - ymd.append(int(s[:4])) - ymd.append(int(s[4:6])) - ymd.append(int(s[6:])) - elif len_li in (12, 14): - # YYYYMMDDhhmm[ss] - s = l[i-1] - ymd.append(int(s[:4])) - ymd.append(int(s[4:6])) - ymd.append(int(s[6:8])) - res.hour = int(s[8:10]) - res.minute = int(s[10:12]) - if len_li == 14: - res.second = int(s[12:]) - elif ((i < len_l and info.hms(l[i]) is not None) or - (i+1 < len_l and l[i] == ' ' and - info.hms(l[i+1]) is not None)): - # HH[ ]h or MM[ ]m or SS[.ss][ ]s - if l[i] == ' ': - i += 1 - idx = info.hms(l[i]) - while True: - if idx == 0: - res.hour = int(value) - if value%1: - res.minute = int(60*(value%1)) - elif idx == 1: - res.minute = int(value) - if value%1: - res.second = int(60*(value%1)) - elif idx == 2: - res.second, res.microsecond = \ - _parsems(value_repr) - i += 1 - if i >= len_l or idx == 2: - break - # 12h00 - try: - value_repr = l[i] - value = float(value_repr) - except ValueError: - break - else: - i += 1 - idx += 1 - if i < len_l: - newidx = info.hms(l[i]) - if newidx is not None: - idx = newidx - elif i+1 < len_l and l[i] == ':': - # HH:MM[:SS[.ss]] - res.hour = int(value) - i += 1 - value = float(l[i]) - res.minute = int(value) - if value%1: - res.second = int(60*(value%1)) - i += 1 - if i < len_l and l[i] == ':': - res.second, res.microsecond = _parsems(l[i+1]) - i += 2 - elif i < len_l and l[i] in ('-', '/', '.'): - sep = l[i] - ymd.append(int(value)) - i += 1 - if i < len_l and not info.jump(l[i]): - try: - # 01-01[-01] - ymd.append(int(l[i])) - except ValueError: - # 01-Jan[-01] - value = info.month(l[i]) - if value is not None: - ymd.append(value) - assert mstridx == -1 - mstridx = len(ymd)-1 - else: - return None - i += 1 - if i < len_l and l[i] == sep: - # We have three members - i += 1 - value = info.month(l[i]) - if value is not None: - ymd.append(value) - mstridx = len(ymd)-1 - assert mstridx == -1 - else: - ymd.append(int(l[i])) - i += 1 - elif i >= len_l or info.jump(l[i]): - if i+1 < len_l and info.ampm(l[i+1]) is not None: - # 12 am - res.hour = int(value) - if res.hour < 12 and info.ampm(l[i+1]) == 1: - res.hour += 12 - elif res.hour == 12 and info.ampm(l[i+1]) == 0: - res.hour = 0 - i += 1 - else: - # Year, month or day - ymd.append(int(value)) - i += 1 - elif info.ampm(l[i]) is not None: - # 12am - res.hour = int(value) - if res.hour < 12 and info.ampm(l[i]) == 1: - res.hour += 12 - elif res.hour == 12 and info.ampm(l[i]) == 0: - res.hour = 0 - i += 1 - elif not fuzzy: - return None - else: - i += 1 - continue - - # Check weekday - value = info.weekday(l[i]) - if value is not None: - res.weekday = value - i += 1 - continue - - # Check month name - value = info.month(l[i]) - if value is not None: - ymd.append(value) - assert mstridx == -1 - mstridx = len(ymd)-1 - i += 1 - if i < len_l: - if l[i] in ('-', '/'): - # Jan-01[-99] - sep = l[i] - i += 1 - ymd.append(int(l[i])) - i += 1 - if i < len_l and l[i] == sep: - # Jan-01-99 - i += 1 - ymd.append(int(l[i])) - i += 1 - elif (i+3 < len_l and l[i] == l[i+2] == ' ' - and info.pertain(l[i+1])): - # Jan of 01 - # In this case, 01 is clearly year - try: - value = int(l[i+3]) - except ValueError: - # Wrong guess - pass - else: - # Convert it here to become unambiguous - ymd.append(info.convertyear(value)) - i += 4 - continue - - # Check am/pm - value = info.ampm(l[i]) - if value is not None: - if value == 1 and res.hour < 12: - res.hour += 12 - elif value == 0 and res.hour == 12: - res.hour = 0 - i += 1 - continue - - # Check for a timezone name - if (res.hour is not None and len(l[i]) <= 5 and - res.tzname is None and res.tzoffset is None and - not [x for x in l[i] if x not in string.ascii_uppercase]): - res.tzname = l[i] - res.tzoffset = info.tzoffset(res.tzname) - i += 1 - - # Check for something like GMT+3, or BRST+3. Notice - # that it doesn't mean "I am 3 hours after GMT", but - # "my time +3 is GMT". If found, we reverse the - # logic so that timezone parsing code will get it - # right. - if i < len_l and l[i] in ('+', '-'): - l[i] = ('+', '-')[l[i] == '+'] - res.tzoffset = None - if info.utczone(res.tzname): - # With something like GMT+3, the timezone - # is *not* GMT. - res.tzname = None - - continue - - # Check for a numbered timezone - if res.hour is not None and l[i] in ('+', '-'): - signal = (-1,1)[l[i] == '+'] - i += 1 - len_li = len(l[i]) - if len_li == 4: - # -0300 - res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60 - elif i+1 < len_l and l[i+1] == ':': - # -03:00 - res.tzoffset = int(l[i])*3600+int(l[i+2])*60 - i += 2 - elif len_li <= 2: - # -[0]3 - res.tzoffset = int(l[i][:2])*3600 - else: - return None - i += 1 - res.tzoffset *= signal - - # Look for a timezone name between parenthesis - if (i+3 < len_l and - info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and - 3 <= len(l[i+2]) <= 5 and - not [x for x in l[i+2] - if x not in string.ascii_uppercase]): - # -0300 (BRST) - res.tzname = l[i+2] - i += 4 - continue - - # Check jumps - if not (info.jump(l[i]) or fuzzy): - return None - - i += 1 - - # Process year/month/day - len_ymd = len(ymd) - if len_ymd > 3: - # More than three members!? - return None - elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2): - # One member, or two members with a month string - if mstridx != -1: - res.month = ymd[mstridx] - del ymd[mstridx] - if len_ymd > 1 or mstridx == -1: - if ymd[0] > 31: - res.year = ymd[0] - else: - res.day = ymd[0] - elif len_ymd == 2: - # Two members with numbers - if ymd[0] > 31: - # 99-01 - res.year, res.month = ymd - elif ymd[1] > 31: - # 01-99 - res.month, res.year = ymd - elif dayfirst and ymd[1] <= 12: - # 13-01 - res.day, res.month = ymd - else: - # 01-13 - res.month, res.day = ymd - if len_ymd == 3: - # Three members - if mstridx == 0: - res.month, res.day, res.year = ymd - elif mstridx == 1: - if ymd[0] > 31 or (yearfirst and ymd[2] <= 31): - # 99-Jan-01 - res.year, res.month, res.day = ymd - else: - # 01-Jan-01 - # Give precendence to day-first, since - # two-digit years is usually hand-written. - res.day, res.month, res.year = ymd - elif mstridx == 2: - # WTF!? - if ymd[1] > 31: - # 01-99-Jan - res.day, res.year, res.month = ymd - else: - # 99-01-Jan - res.year, res.day, res.month = ymd - else: - if ymd[0] > 31 or \ - (yearfirst and ymd[1] <= 12 and ymd[2] <= 31): - # 99-01-01 - res.year, res.month, res.day = ymd - elif ymd[0] > 12 or (dayfirst and ymd[1] <= 12): - # 13-01-01 - res.day, res.month, res.year = ymd - else: - # 01-13-01 - res.month, res.day, res.year = ymd - - except (IndexError, ValueError, AssertionError): - return None - - if not info.validate(res): - return None - return res - -DEFAULTPARSER = parser() -def parse(timestr, parserinfo=None, **kwargs): - if parserinfo: - return parser(parserinfo).parse(timestr, **kwargs) - else: - return DEFAULTPARSER.parse(timestr, **kwargs) - - -class _tzparser(object): - - class _result(_resultbase): - - __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset", - "start", "end"] - - class _attr(_resultbase): - __slots__ = ["month", "week", "weekday", - "yday", "jyday", "day", "time"] - - def __repr__(self): - return self._repr("") - - def __init__(self): - _resultbase.__init__(self) - self.start = self._attr() - self.end = self._attr() - - def parse(self, tzstr): - res = self._result() - l = _timelex.split(tzstr) - try: - - len_l = len(l) - - i = 0 - while i < len_l: - # BRST+3[BRDT[+2]] - j = i - while j < len_l and not [x for x in l[j] - if x in "0123456789:,-+"]: - j += 1 - if j != i: - if not res.stdabbr: - offattr = "stdoffset" - res.stdabbr = "".join(l[i:j]) - else: - offattr = "dstoffset" - res.dstabbr = "".join(l[i:j]) - i = j - if (i < len_l and - (l[i] in ('+', '-') or l[i][0] in "0123456789")): - if l[i] in ('+', '-'): - # Yes, that's right. See the TZ variable - # documentation. - signal = (1,-1)[l[i] == '+'] - i += 1 - else: - signal = -1 - len_li = len(l[i]) - if len_li == 4: - # -0300 - setattr(res, offattr, - (int(l[i][:2])*3600+int(l[i][2:])*60)*signal) - elif i+1 < len_l and l[i+1] == ':': - # -03:00 - setattr(res, offattr, - (int(l[i])*3600+int(l[i+2])*60)*signal) - i += 2 - elif len_li <= 2: - # -[0]3 - setattr(res, offattr, - int(l[i][:2])*3600*signal) - else: - return None - i += 1 - if res.dstabbr: - break - else: - break - - if i < len_l: - for j in range(i, len_l): - if l[j] == ';': l[j] = ',' - - assert l[i] == ',' - - i += 1 - - if i >= len_l: - pass - elif (8 <= l.count(',') <= 9 and - not [y for x in l[i:] if x != ',' - for y in x if y not in "0123456789"]): - # GMT0BST,3,0,30,3600,10,0,26,7200[,3600] - for x in (res.start, res.end): - x.month = int(l[i]) - i += 2 - if l[i] == '-': - value = int(l[i+1])*-1 - i += 1 - else: - value = int(l[i]) - i += 2 - if value: - x.week = value - x.weekday = (int(l[i])-1)%7 - else: - x.day = int(l[i]) - i += 2 - x.time = int(l[i]) - i += 2 - if i < len_l: - if l[i] in ('-','+'): - signal = (-1,1)[l[i] == "+"] - i += 1 - else: - signal = 1 - res.dstoffset = (res.stdoffset+int(l[i]))*signal - elif (l.count(',') == 2 and l[i:].count('/') <= 2 and - not [y for x in l[i:] if x not in (',','/','J','M', - '.','-',':') - for y in x if y not in "0123456789"]): - for x in (res.start, res.end): - if l[i] == 'J': - # non-leap year day (1 based) - i += 1 - x.jyday = int(l[i]) - elif l[i] == 'M': - # month[-.]week[-.]weekday - i += 1 - x.month = int(l[i]) - i += 1 - assert l[i] in ('-', '.') - i += 1 - x.week = int(l[i]) - if x.week == 5: - x.week = -1 - i += 1 - assert l[i] in ('-', '.') - i += 1 - x.weekday = (int(l[i])-1)%7 - else: - # year day (zero based) - x.yday = int(l[i])+1 - - i += 1 - - if i < len_l and l[i] == '/': - i += 1 - # start time - len_li = len(l[i]) - if len_li == 4: - # -0300 - x.time = (int(l[i][:2])*3600+int(l[i][2:])*60) - elif i+1 < len_l and l[i+1] == ':': - # -03:00 - x.time = int(l[i])*3600+int(l[i+2])*60 - i += 2 - if i+1 < len_l and l[i+1] == ':': - i += 2 - x.time += int(l[i]) - elif len_li <= 2: - # -[0]3 - x.time = (int(l[i][:2])*3600) - else: - return None - i += 1 - - assert i == len_l or l[i] == ',' - - i += 1 - - assert i >= len_l - - except (IndexError, ValueError, AssertionError): - return None - - return res - - -DEFAULTTZPARSER = _tzparser() -def _parsetz(tzstr): - return DEFAULTTZPARSER.parse(tzstr) - - -def _parsems(value): - """Parse a I[.F] seconds value into (seconds, microseconds).""" - if "." not in value: - return int(value), 0 - else: - i, f = value.split(".") - return int(i), int(f.ljust(6, "0")[:6]) - - -# vim:ts=4:sw=4:et diff --git a/lib/dateutil_py2/relativedelta.py b/lib/dateutil_py2/relativedelta.py deleted file mode 100644 index 0c72a8180fb7..000000000000 --- a/lib/dateutil_py2/relativedelta.py +++ /dev/null @@ -1,432 +0,0 @@ -""" -Copyright (c) 2003-2010 Gustavo Niemeyer - -This module offers extensions to the standard python 2.3+ -datetime module. -""" -__author__ = "Gustavo Niemeyer " -__license__ = "PSF License" - -import datetime -import calendar - -__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] - -class weekday(object): - __slots__ = ["weekday", "n"] - - def __init__(self, weekday, n=None): - self.weekday = weekday - self.n = n - - def __call__(self, n): - if n == self.n: - return self - else: - return self.__class__(self.weekday, n) - - def __eq__(self, other): - try: - if self.weekday != other.weekday or self.n != other.n: - return False - except AttributeError: - return False - return True - - def __repr__(self): - s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] - if not self.n: - return s - else: - return "%s(%+d)" % (s, self.n) - -MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)]) - -class relativedelta: - """ -The relativedelta type is based on the specification of the excelent -work done by M.-A. Lemburg in his mx.DateTime extension. However, -notice that this type does *NOT* implement the same algorithm as -his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. - -There's two different ways to build a relativedelta instance. The -first one is passing it two date/datetime classes: - - relativedelta(datetime1, datetime2) - -And the other way is to use the following keyword arguments: - - year, month, day, hour, minute, second, microsecond: - Absolute information. - - years, months, weeks, days, hours, minutes, seconds, microseconds: - Relative information, may be negative. - - weekday: - One of the weekday instances (MO, TU, etc). These instances may - receive a parameter N, specifying the Nth weekday, which could - be positive or negative (like MO(+1) or MO(-2). Not specifying - it is the same as specifying +1. You can also use an integer, - where 0=MO. - - leapdays: - Will add given days to the date found, if year is a leap - year, and the date found is post 28 of february. - - yearday, nlyearday: - Set the yearday or the non-leap year day (jump leap days). - These are converted to day/month/leapdays information. - -Here is the behavior of operations with relativedelta: - -1) Calculate the absolute year, using the 'year' argument, or the - original datetime year, if the argument is not present. - -2) Add the relative 'years' argument to the absolute year. - -3) Do steps 1 and 2 for month/months. - -4) Calculate the absolute day, using the 'day' argument, or the - original datetime day, if the argument is not present. Then, - subtract from the day until it fits in the year and month - found after their operations. - -5) Add the relative 'days' argument to the absolute day. Notice - that the 'weeks' argument is multiplied by 7 and added to - 'days'. - -6) Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds, - microsecond/microseconds. - -7) If the 'weekday' argument is present, calculate the weekday, - with the given (wday, nth) tuple. wday is the index of the - weekday (0-6, 0=Mon), and nth is the number of weeks to add - forward or backward, depending on its signal. Notice that if - the calculated date is already Monday, for example, using - (0, 1) or (0, -1) won't change the day. - """ - - def __init__(self, dt1=None, dt2=None, - years=0, months=0, days=0, leapdays=0, weeks=0, - hours=0, minutes=0, seconds=0, microseconds=0, - year=None, month=None, day=None, weekday=None, - yearday=None, nlyearday=None, - hour=None, minute=None, second=None, microsecond=None): - if dt1 and dt2: - if not isinstance(dt1, datetime.date) or \ - not isinstance(dt2, datetime.date): - raise TypeError, "relativedelta only diffs datetime/date" - if type(dt1) is not type(dt2): - if not isinstance(dt1, datetime.datetime): - dt1 = datetime.datetime.fromordinal(dt1.toordinal()) - elif not isinstance(dt2, datetime.datetime): - dt2 = datetime.datetime.fromordinal(dt2.toordinal()) - self.years = 0 - self.months = 0 - self.days = 0 - self.leapdays = 0 - self.hours = 0 - self.minutes = 0 - self.seconds = 0 - self.microseconds = 0 - self.year = None - self.month = None - self.day = None - self.weekday = None - self.hour = None - self.minute = None - self.second = None - self.microsecond = None - self._has_time = 0 - - months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month) - self._set_months(months) - dtm = self.__radd__(dt2) - if dt1 < dt2: - while dt1 > dtm: - months += 1 - self._set_months(months) - dtm = self.__radd__(dt2) - else: - while dt1 < dtm: - months -= 1 - self._set_months(months) - dtm = self.__radd__(dt2) - delta = dt1 - dtm - self.seconds = delta.seconds+delta.days*86400 - self.microseconds = delta.microseconds - else: - self.years = years - self.months = months - self.days = days+weeks*7 - self.leapdays = leapdays - self.hours = hours - self.minutes = minutes - self.seconds = seconds - self.microseconds = microseconds - self.year = year - self.month = month - self.day = day - self.hour = hour - self.minute = minute - self.second = second - self.microsecond = microsecond - - if type(weekday) is int: - self.weekday = weekdays[weekday] - else: - self.weekday = weekday - - yday = 0 - if nlyearday: - yday = nlyearday - elif yearday: - yday = yearday - if yearday > 59: - self.leapdays = -1 - if yday: - ydayidx = [31,59,90,120,151,181,212,243,273,304,334,366] - for idx, ydays in enumerate(ydayidx): - if yday <= ydays: - self.month = idx+1 - if idx == 0: - self.day = yday - else: - self.day = yday-ydayidx[idx-1] - break - else: - raise ValueError, "invalid year day (%d)" % yday - - self._fix() - - def _fix(self): - if abs(self.microseconds) > 999999: - s = self.microseconds//abs(self.microseconds) - div, mod = divmod(self.microseconds*s, 1000000) - self.microseconds = mod*s - self.seconds += div*s - if abs(self.seconds) > 59: - s = self.seconds//abs(self.seconds) - div, mod = divmod(self.seconds*s, 60) - self.seconds = mod*s - self.minutes += div*s - if abs(self.minutes) > 59: - s = self.minutes//abs(self.minutes) - div, mod = divmod(self.minutes*s, 60) - self.minutes = mod*s - self.hours += div*s - if abs(self.hours) > 23: - s = self.hours//abs(self.hours) - div, mod = divmod(self.hours*s, 24) - self.hours = mod*s - self.days += div*s - if abs(self.months) > 11: - s = self.months//abs(self.months) - div, mod = divmod(self.months*s, 12) - self.months = mod*s - self.years += div*s - if (self.hours or self.minutes or self.seconds or self.microseconds or - self.hour is not None or self.minute is not None or - self.second is not None or self.microsecond is not None): - self._has_time = 1 - else: - self._has_time = 0 - - def _set_months(self, months): - self.months = months - if abs(self.months) > 11: - s = self.months//abs(self.months) - div, mod = divmod(self.months*s, 12) - self.months = mod*s - self.years = div*s - else: - self.years = 0 - - def __radd__(self, other): - if not isinstance(other, datetime.date): - raise TypeError, "unsupported type for add operation" - elif self._has_time and not isinstance(other, datetime.datetime): - other = datetime.datetime.fromordinal(other.toordinal()) - year = (self.year or other.year)+self.years - month = self.month or other.month - if self.months: - assert 1 <= abs(self.months) <= 12 - month += self.months - if month > 12: - year += 1 - month -= 12 - elif month < 1: - year -= 1 - month += 12 - day = min(calendar.monthrange(year, month)[1], - self.day or other.day) - repl = {"year": year, "month": month, "day": day} - for attr in ["hour", "minute", "second", "microsecond"]: - value = getattr(self, attr) - if value is not None: - repl[attr] = value - days = self.days - if self.leapdays and month > 2 and calendar.isleap(year): - days += self.leapdays - ret = (other.replace(**repl) - + datetime.timedelta(days=days, - hours=self.hours, - minutes=self.minutes, - seconds=self.seconds, - microseconds=self.microseconds)) - if self.weekday: - weekday, nth = self.weekday.weekday, self.weekday.n or 1 - jumpdays = (abs(nth)-1)*7 - if nth > 0: - jumpdays += (7-ret.weekday()+weekday)%7 - else: - jumpdays += (ret.weekday()-weekday)%7 - jumpdays *= -1 - ret += datetime.timedelta(days=jumpdays) - return ret - - def __rsub__(self, other): - return self.__neg__().__radd__(other) - - def __add__(self, other): - if not isinstance(other, relativedelta): - raise TypeError, "unsupported type for add operation" - return relativedelta(years=other.years+self.years, - months=other.months+self.months, - days=other.days+self.days, - hours=other.hours+self.hours, - minutes=other.minutes+self.minutes, - seconds=other.seconds+self.seconds, - microseconds=other.microseconds+self.microseconds, - leapdays=other.leapdays or self.leapdays, - year=other.year or self.year, - month=other.month or self.month, - day=other.day or self.day, - weekday=other.weekday or self.weekday, - hour=other.hour or self.hour, - minute=other.minute or self.minute, - second=other.second or self.second, - microsecond=other.second or self.microsecond) - - def __sub__(self, other): - if not isinstance(other, relativedelta): - raise TypeError, "unsupported type for sub operation" - return relativedelta(years=other.years-self.years, - months=other.months-self.months, - days=other.days-self.days, - hours=other.hours-self.hours, - minutes=other.minutes-self.minutes, - seconds=other.seconds-self.seconds, - microseconds=other.microseconds-self.microseconds, - leapdays=other.leapdays or self.leapdays, - year=other.year or self.year, - month=other.month or self.month, - day=other.day or self.day, - weekday=other.weekday or self.weekday, - hour=other.hour or self.hour, - minute=other.minute or self.minute, - second=other.second or self.second, - microsecond=other.second or self.microsecond) - - def __neg__(self): - return relativedelta(years=-self.years, - months=-self.months, - days=-self.days, - hours=-self.hours, - minutes=-self.minutes, - seconds=-self.seconds, - microseconds=-self.microseconds, - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - def __nonzero__(self): - return not (not self.years and - not self.months and - not self.days and - not self.hours and - not self.minutes and - not self.seconds and - not self.microseconds and - not self.leapdays and - self.year is None and - self.month is None and - self.day is None and - self.weekday is None and - self.hour is None and - self.minute is None and - self.second is None and - self.microsecond is None) - - def __mul__(self, other): - f = float(other) - return relativedelta(years=self.years*f, - months=self.months*f, - days=self.days*f, - hours=self.hours*f, - minutes=self.minutes*f, - seconds=self.seconds*f, - microseconds=self.microseconds*f, - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - def __eq__(self, other): - if not isinstance(other, relativedelta): - return False - if self.weekday or other.weekday: - if not self.weekday or not other.weekday: - return False - if self.weekday.weekday != other.weekday.weekday: - return False - n1, n2 = self.weekday.n, other.weekday.n - if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): - return False - return (self.years == other.years and - self.months == other.months and - self.days == other.days and - self.hours == other.hours and - self.minutes == other.minutes and - self.seconds == other.seconds and - self.leapdays == other.leapdays and - self.year == other.year and - self.month == other.month and - self.day == other.day and - self.hour == other.hour and - self.minute == other.minute and - self.second == other.second and - self.microsecond == other.microsecond) - - def __ne__(self, other): - return not self.__eq__(other) - - def __div__(self, other): - return self.__mul__(1/float(other)) - - def __repr__(self): - l = [] - for attr in ["years", "months", "days", "leapdays", - "hours", "minutes", "seconds", "microseconds"]: - value = getattr(self, attr) - if value: - l.append("%s=%+d" % (attr, value)) - for attr in ["year", "month", "day", "weekday", - "hour", "minute", "second", "microsecond"]: - value = getattr(self, attr) - if value is not None: - l.append("%s=%s" % (attr, `value`)) - return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) - -# vim:ts=4:sw=4:et diff --git a/lib/dateutil_py2/rrule.py b/lib/dateutil_py2/rrule.py deleted file mode 100644 index 6bd83cad3722..000000000000 --- a/lib/dateutil_py2/rrule.py +++ /dev/null @@ -1,1097 +0,0 @@ -""" -Copyright (c) 2003-2010 Gustavo Niemeyer - -This module offers extensions to the standard python 2.3+ -datetime module. -""" -__author__ = "Gustavo Niemeyer " -__license__ = "PSF License" - -import itertools -import datetime -import calendar -import thread -import sys - -__all__ = ["rrule", "rruleset", "rrulestr", - "YEARLY", "MONTHLY", "WEEKLY", "DAILY", - "HOURLY", "MINUTELY", "SECONDLY", - "MO", "TU", "WE", "TH", "FR", "SA", "SU"] - -# Every mask is 7 days longer to handle cross-year weekly periods. -M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30+ - [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) -M365MASK = list(M366MASK) -M29, M30, M31 = range(1,30), range(1,31), range(1,32) -MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) -MDAY365MASK = list(MDAY366MASK) -M29, M30, M31 = range(-29,0), range(-30,0), range(-31,0) -NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) -NMDAY365MASK = list(NMDAY366MASK) -M366RANGE = (0,31,60,91,121,152,182,213,244,274,305,335,366) -M365RANGE = (0,31,59,90,120,151,181,212,243,273,304,334,365) -WDAYMASK = [0,1,2,3,4,5,6]*55 -del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] -MDAY365MASK = tuple(MDAY365MASK) -M365MASK = tuple(M365MASK) - -(YEARLY, - MONTHLY, - WEEKLY, - DAILY, - HOURLY, - MINUTELY, - SECONDLY) = range(7) - -# Imported on demand. -easter = None -parser = None - -class weekday(object): - __slots__ = ["weekday", "n"] - - def __init__(self, weekday, n=None): - if n == 0: - raise ValueError, "Can't create weekday with n == 0" - self.weekday = weekday - self.n = n - - def __call__(self, n): - if n == self.n: - return self - else: - return self.__class__(self.weekday, n) - - def __eq__(self, other): - try: - if self.weekday != other.weekday or self.n != other.n: - return False - except AttributeError: - return False - return True - - def __repr__(self): - s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] - if not self.n: - return s - else: - return "%s(%+d)" % (s, self.n) - -MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)]) - -class rrulebase: - def __init__(self, cache=False): - if cache: - self._cache = [] - self._cache_lock = thread.allocate_lock() - self._cache_gen = self._iter() - self._cache_complete = False - else: - self._cache = None - self._cache_complete = False - self._len = None - - def __iter__(self): - if self._cache_complete: - return iter(self._cache) - elif self._cache is None: - return self._iter() - else: - return self._iter_cached() - - def _iter_cached(self): - i = 0 - gen = self._cache_gen - cache = self._cache - acquire = self._cache_lock.acquire - release = self._cache_lock.release - while gen: - if i == len(cache): - acquire() - if self._cache_complete: - break - try: - for j in range(10): - cache.append(gen.next()) - except StopIteration: - self._cache_gen = gen = None - self._cache_complete = True - break - release() - yield cache[i] - i += 1 - while i < self._len: - yield cache[i] - i += 1 - - def __getitem__(self, item): - if self._cache_complete: - return self._cache[item] - elif isinstance(item, slice): - if item.step and item.step < 0: - return list(iter(self))[item] - else: - return list(itertools.islice(self, - item.start or 0, - item.stop or sys.maxint, - item.step or 1)) - elif item >= 0: - gen = iter(self) - try: - for i in range(item+1): - res = gen.next() - except StopIteration: - raise IndexError - return res - else: - return list(iter(self))[item] - - def __contains__(self, item): - if self._cache_complete: - return item in self._cache - else: - for i in self: - if i == item: - return True - elif i > item: - return False - return False - - # __len__() introduces a large performance penality. - def count(self): - if self._len is None: - for x in self: pass - return self._len - - def before(self, dt, inc=False): - if self._cache_complete: - gen = self._cache - else: - gen = self - last = None - if inc: - for i in gen: - if i > dt: - break - last = i - else: - for i in gen: - if i >= dt: - break - last = i - return last - - def after(self, dt, inc=False): - if self._cache_complete: - gen = self._cache - else: - gen = self - if inc: - for i in gen: - if i >= dt: - return i - else: - for i in gen: - if i > dt: - return i - return None - - def between(self, after, before, inc=False): - if self._cache_complete: - gen = self._cache - else: - gen = self - started = False - l = [] - if inc: - for i in gen: - if i > before: - break - elif not started: - if i >= after: - started = True - l.append(i) - else: - l.append(i) - else: - for i in gen: - if i >= before: - break - elif not started: - if i > after: - started = True - l.append(i) - else: - l.append(i) - return l - -class rrule(rrulebase): - def __init__(self, freq, dtstart=None, - interval=1, wkst=None, count=None, until=None, bysetpos=None, - bymonth=None, bymonthday=None, byyearday=None, byeaster=None, - byweekno=None, byweekday=None, - byhour=None, byminute=None, bysecond=None, - cache=False): - rrulebase.__init__(self, cache) - global easter - if not dtstart: - dtstart = datetime.datetime.now().replace(microsecond=0) - elif not isinstance(dtstart, datetime.datetime): - dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) - else: - dtstart = dtstart.replace(microsecond=0) - self._dtstart = dtstart - self._tzinfo = dtstart.tzinfo - self._freq = freq - self._interval = interval - self._count = count - if until and not isinstance(until, datetime.datetime): - until = datetime.datetime.fromordinal(until.toordinal()) - self._until = until - if wkst is None: - self._wkst = calendar.firstweekday() - elif type(wkst) is int: - self._wkst = wkst - else: - self._wkst = wkst.weekday - if bysetpos is None: - self._bysetpos = None - elif type(bysetpos) is int: - if bysetpos == 0 or not (-366 <= bysetpos <= 366): - raise ValueError("bysetpos must be between 1 and 366, " - "or between -366 and -1") - self._bysetpos = (bysetpos,) - else: - self._bysetpos = tuple(bysetpos) - for pos in self._bysetpos: - if pos == 0 or not (-366 <= pos <= 366): - raise ValueError("bysetpos must be between 1 and 366, " - "or between -366 and -1") - if not (byweekno or byyearday or bymonthday or - byweekday is not None or byeaster is not None): - if freq == YEARLY: - if not bymonth: - bymonth = dtstart.month - bymonthday = dtstart.day - elif freq == MONTHLY: - bymonthday = dtstart.day - elif freq == WEEKLY: - byweekday = dtstart.weekday() - # bymonth - if not bymonth: - self._bymonth = None - elif type(bymonth) is int: - self._bymonth = (bymonth,) - else: - self._bymonth = tuple(bymonth) - # byyearday - if not byyearday: - self._byyearday = None - elif type(byyearday) is int: - self._byyearday = (byyearday,) - else: - self._byyearday = tuple(byyearday) - # byeaster - if byeaster is not None: - if not easter: - from dateutil import easter - if type(byeaster) is int: - self._byeaster = (byeaster,) - else: - self._byeaster = tuple(byeaster) - else: - self._byeaster = None - # bymonthay - if not bymonthday: - self._bymonthday = () - self._bynmonthday = () - elif type(bymonthday) is int: - if bymonthday < 0: - self._bynmonthday = (bymonthday,) - self._bymonthday = () - else: - self._bymonthday = (bymonthday,) - self._bynmonthday = () - else: - self._bymonthday = tuple([x for x in bymonthday if x > 0]) - self._bynmonthday = tuple([x for x in bymonthday if x < 0]) - # byweekno - if byweekno is None: - self._byweekno = None - elif type(byweekno) is int: - self._byweekno = (byweekno,) - else: - self._byweekno = tuple(byweekno) - # byweekday / bynweekday - if byweekday is None: - self._byweekday = None - self._bynweekday = None - elif type(byweekday) is int: - self._byweekday = (byweekday,) - self._bynweekday = None - elif hasattr(byweekday, "n"): - if not byweekday.n or freq > MONTHLY: - self._byweekday = (byweekday.weekday,) - self._bynweekday = None - else: - self._bynweekday = ((byweekday.weekday, byweekday.n),) - self._byweekday = None - else: - self._byweekday = [] - self._bynweekday = [] - for wday in byweekday: - if type(wday) is int: - self._byweekday.append(wday) - elif not wday.n or freq > MONTHLY: - self._byweekday.append(wday.weekday) - else: - self._bynweekday.append((wday.weekday, wday.n)) - self._byweekday = tuple(self._byweekday) - self._bynweekday = tuple(self._bynweekday) - if not self._byweekday: - self._byweekday = None - elif not self._bynweekday: - self._bynweekday = None - # byhour - if byhour is None: - if freq < HOURLY: - self._byhour = (dtstart.hour,) - else: - self._byhour = None - elif type(byhour) is int: - self._byhour = (byhour,) - else: - self._byhour = tuple(byhour) - # byminute - if byminute is None: - if freq < MINUTELY: - self._byminute = (dtstart.minute,) - else: - self._byminute = None - elif type(byminute) is int: - self._byminute = (byminute,) - else: - self._byminute = tuple(byminute) - # bysecond - if bysecond is None: - if freq < SECONDLY: - self._bysecond = (dtstart.second,) - else: - self._bysecond = None - elif type(bysecond) is int: - self._bysecond = (bysecond,) - else: - self._bysecond = tuple(bysecond) - - if self._freq >= HOURLY: - self._timeset = None - else: - self._timeset = [] - for hour in self._byhour: - for minute in self._byminute: - for second in self._bysecond: - self._timeset.append( - datetime.time(hour, minute, second, - tzinfo=self._tzinfo)) - self._timeset.sort() - self._timeset = tuple(self._timeset) - - def _iter(self): - year, month, day, hour, minute, second, weekday, yearday, _ = \ - self._dtstart.timetuple() - - # Some local variables to speed things up a bit - freq = self._freq - interval = self._interval - wkst = self._wkst - until = self._until - bymonth = self._bymonth - byweekno = self._byweekno - byyearday = self._byyearday - byweekday = self._byweekday - byeaster = self._byeaster - bymonthday = self._bymonthday - bynmonthday = self._bynmonthday - bysetpos = self._bysetpos - byhour = self._byhour - byminute = self._byminute - bysecond = self._bysecond - - ii = _iterinfo(self) - ii.rebuild(year, month) - - getdayset = {YEARLY:ii.ydayset, - MONTHLY:ii.mdayset, - WEEKLY:ii.wdayset, - DAILY:ii.ddayset, - HOURLY:ii.ddayset, - MINUTELY:ii.ddayset, - SECONDLY:ii.ddayset}[freq] - - if freq < HOURLY: - timeset = self._timeset - else: - gettimeset = {HOURLY:ii.htimeset, - MINUTELY:ii.mtimeset, - SECONDLY:ii.stimeset}[freq] - if ((freq >= HOURLY and - self._byhour and hour not in self._byhour) or - (freq >= MINUTELY and - self._byminute and minute not in self._byminute) or - (freq >= SECONDLY and - self._bysecond and second not in self._bysecond)): - timeset = () - else: - timeset = gettimeset(hour, minute, second) - - total = 0 - count = self._count - while True: - # Get dayset with the right frequency - dayset, start, end = getdayset(year, month, day) - - # Do the "hard" work ;-) - filtered = False - for i in dayset[start:end]: - if ((bymonth and ii.mmask[i] not in bymonth) or - (byweekno and not ii.wnomask[i]) or - (byweekday and ii.wdaymask[i] not in byweekday) or - (ii.nwdaymask and not ii.nwdaymask[i]) or - (byeaster and not ii.eastermask[i]) or - ((bymonthday or bynmonthday) and - ii.mdaymask[i] not in bymonthday and - ii.nmdaymask[i] not in bynmonthday) or - (byyearday and - ((i < ii.yearlen and i+1 not in byyearday - and -ii.yearlen+i not in byyearday) or - (i >= ii.yearlen and i+1-ii.yearlen not in byyearday - and -ii.nextyearlen+i-ii.yearlen - not in byyearday)))): - dayset[i] = None - filtered = True - - # Output results - if bysetpos and timeset: - poslist = [] - for pos in bysetpos: - if pos < 0: - daypos, timepos = divmod(pos, len(timeset)) - else: - daypos, timepos = divmod(pos-1, len(timeset)) - try: - i = [x for x in dayset[start:end] - if x is not None][daypos] - time = timeset[timepos] - except IndexError: - pass - else: - date = datetime.date.fromordinal(ii.yearordinal+i) - res = datetime.datetime.combine(date, time) - if res not in poslist: - poslist.append(res) - poslist.sort() - for res in poslist: - if until and res > until: - self._len = total - return - elif res >= self._dtstart: - total += 1 - yield res - if count: - count -= 1 - if not count: - self._len = total - return - else: - for i in dayset[start:end]: - if i is not None: - date = datetime.date.fromordinal(ii.yearordinal+i) - for time in timeset: - res = datetime.datetime.combine(date, time) - if until and res > until: - self._len = total - return - elif res >= self._dtstart: - total += 1 - yield res - if count: - count -= 1 - if not count: - self._len = total - return - - # Handle frequency and interval - fixday = False - if freq == YEARLY: - year += interval - if year > datetime.MAXYEAR: - self._len = total - return - ii.rebuild(year, month) - elif freq == MONTHLY: - month += interval - if month > 12: - div, mod = divmod(month, 12) - month = mod - year += div - if month == 0: - month = 12 - year -= 1 - if year > datetime.MAXYEAR: - self._len = total - return - ii.rebuild(year, month) - elif freq == WEEKLY: - if wkst > weekday: - day += -(weekday+1+(6-wkst))+self._interval*7 - else: - day += -(weekday-wkst)+self._interval*7 - weekday = wkst - fixday = True - elif freq == DAILY: - day += interval - fixday = True - elif freq == HOURLY: - if filtered: - # Jump to one iteration before next day - hour += ((23-hour)//interval)*interval - while True: - hour += interval - div, mod = divmod(hour, 24) - if div: - hour = mod - day += div - fixday = True - if not byhour or hour in byhour: - break - timeset = gettimeset(hour, minute, second) - elif freq == MINUTELY: - if filtered: - # Jump to one iteration before next day - minute += ((1439-(hour*60+minute))//interval)*interval - while True: - minute += interval - div, mod = divmod(minute, 60) - if div: - minute = mod - hour += div - div, mod = divmod(hour, 24) - if div: - hour = mod - day += div - fixday = True - filtered = False - if ((not byhour or hour in byhour) and - (not byminute or minute in byminute)): - break - timeset = gettimeset(hour, minute, second) - elif freq == SECONDLY: - if filtered: - # Jump to one iteration before next day - second += (((86399-(hour*3600+minute*60+second)) - //interval)*interval) - while True: - second += self._interval - div, mod = divmod(second, 60) - if div: - second = mod - minute += div - div, mod = divmod(minute, 60) - if div: - minute = mod - hour += div - div, mod = divmod(hour, 24) - if div: - hour = mod - day += div - fixday = True - if ((not byhour or hour in byhour) and - (not byminute or minute in byminute) and - (not bysecond or second in bysecond)): - break - timeset = gettimeset(hour, minute, second) - - if fixday and day > 28: - daysinmonth = calendar.monthrange(year, month)[1] - if day > daysinmonth: - while day > daysinmonth: - day -= daysinmonth - month += 1 - if month == 13: - month = 1 - year += 1 - if year > datetime.MAXYEAR: - self._len = total - return - daysinmonth = calendar.monthrange(year, month)[1] - ii.rebuild(year, month) - -class _iterinfo(object): - __slots__ = ["rrule", "lastyear", "lastmonth", - "yearlen", "nextyearlen", "yearordinal", "yearweekday", - "mmask", "mrange", "mdaymask", "nmdaymask", - "wdaymask", "wnomask", "nwdaymask", "eastermask"] - - def __init__(self, rrule): - for attr in self.__slots__: - setattr(self, attr, None) - self.rrule = rrule - - def rebuild(self, year, month): - # Every mask is 7 days longer to handle cross-year weekly periods. - rr = self.rrule - if year != self.lastyear: - self.yearlen = 365+calendar.isleap(year) - self.nextyearlen = 365+calendar.isleap(year+1) - firstyday = datetime.date(year, 1, 1) - self.yearordinal = firstyday.toordinal() - self.yearweekday = firstyday.weekday() - - wday = datetime.date(year, 1, 1).weekday() - if self.yearlen == 365: - self.mmask = M365MASK - self.mdaymask = MDAY365MASK - self.nmdaymask = NMDAY365MASK - self.wdaymask = WDAYMASK[wday:] - self.mrange = M365RANGE - else: - self.mmask = M366MASK - self.mdaymask = MDAY366MASK - self.nmdaymask = NMDAY366MASK - self.wdaymask = WDAYMASK[wday:] - self.mrange = M366RANGE - - if not rr._byweekno: - self.wnomask = None - else: - self.wnomask = [0]*(self.yearlen+7) - #no1wkst = firstwkst = self.wdaymask.index(rr._wkst) - no1wkst = firstwkst = (7-self.yearweekday+rr._wkst)%7 - if no1wkst >= 4: - no1wkst = 0 - # Number of days in the year, plus the days we got - # from last year. - wyearlen = self.yearlen+(self.yearweekday-rr._wkst)%7 - else: - # Number of days in the year, minus the days we - # left in last year. - wyearlen = self.yearlen-no1wkst - div, mod = divmod(wyearlen, 7) - numweeks = div+mod//4 - for n in rr._byweekno: - if n < 0: - n += numweeks+1 - if not (0 < n <= numweeks): - continue - if n > 1: - i = no1wkst+(n-1)*7 - if no1wkst != firstwkst: - i -= 7-firstwkst - else: - i = no1wkst - for j in range(7): - self.wnomask[i] = 1 - i += 1 - if self.wdaymask[i] == rr._wkst: - break - if 1 in rr._byweekno: - # Check week number 1 of next year as well - # TODO: Check -numweeks for next year. - i = no1wkst+numweeks*7 - if no1wkst != firstwkst: - i -= 7-firstwkst - if i < self.yearlen: - # If week starts in next year, we - # don't care about it. - for j in range(7): - self.wnomask[i] = 1 - i += 1 - if self.wdaymask[i] == rr._wkst: - break - if no1wkst: - # Check last week number of last year as - # well. If no1wkst is 0, either the year - # started on week start, or week number 1 - # got days from last year, so there are no - # days from last year's last week number in - # this year. - if -1 not in rr._byweekno: - lyearweekday = datetime.date(year-1,1,1).weekday() - lno1wkst = (7-lyearweekday+rr._wkst)%7 - lyearlen = 365+calendar.isleap(year-1) - if lno1wkst >= 4: - lno1wkst = 0 - lnumweeks = 52+(lyearlen+ - (lyearweekday-rr._wkst)%7)%7//4 - else: - lnumweeks = 52+(self.yearlen-no1wkst)%7//4 - else: - lnumweeks = -1 - if lnumweeks in rr._byweekno: - for i in range(no1wkst): - self.wnomask[i] = 1 - - if (rr._bynweekday and - (month != self.lastmonth or year != self.lastyear)): - ranges = [] - if rr._freq == YEARLY: - if rr._bymonth: - for month in rr._bymonth: - ranges.append(self.mrange[month-1:month+1]) - else: - ranges = [(0, self.yearlen)] - elif rr._freq == MONTHLY: - ranges = [self.mrange[month-1:month+1]] - if ranges: - # Weekly frequency won't get here, so we may not - # care about cross-year weekly periods. - self.nwdaymask = [0]*self.yearlen - for first, last in ranges: - last -= 1 - for wday, n in rr._bynweekday: - if n < 0: - i = last+(n+1)*7 - i -= (self.wdaymask[i]-wday)%7 - else: - i = first+(n-1)*7 - i += (7-self.wdaymask[i]+wday)%7 - if first <= i <= last: - self.nwdaymask[i] = 1 - - if rr._byeaster: - self.eastermask = [0]*(self.yearlen+7) - eyday = easter.easter(year).toordinal()-self.yearordinal - for offset in rr._byeaster: - self.eastermask[eyday+offset] = 1 - - self.lastyear = year - self.lastmonth = month - - def ydayset(self, year, month, day): - return range(self.yearlen), 0, self.yearlen - - def mdayset(self, year, month, day): - set = [None]*self.yearlen - start, end = self.mrange[month-1:month+1] - for i in range(start, end): - set[i] = i - return set, start, end - - def wdayset(self, year, month, day): - # We need to handle cross-year weeks here. - set = [None]*(self.yearlen+7) - i = datetime.date(year, month, day).toordinal()-self.yearordinal - start = i - for j in range(7): - set[i] = i - i += 1 - #if (not (0 <= i < self.yearlen) or - # self.wdaymask[i] == self.rrule._wkst): - # This will cross the year boundary, if necessary. - if self.wdaymask[i] == self.rrule._wkst: - break - return set, start, i - - def ddayset(self, year, month, day): - set = [None]*self.yearlen - i = datetime.date(year, month, day).toordinal()-self.yearordinal - set[i] = i - return set, i, i+1 - - def htimeset(self, hour, minute, second): - set = [] - rr = self.rrule - for minute in rr._byminute: - for second in rr._bysecond: - set.append(datetime.time(hour, minute, second, - tzinfo=rr._tzinfo)) - set.sort() - return set - - def mtimeset(self, hour, minute, second): - set = [] - rr = self.rrule - for second in rr._bysecond: - set.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) - set.sort() - return set - - def stimeset(self, hour, minute, second): - return (datetime.time(hour, minute, second, - tzinfo=self.rrule._tzinfo),) - - -class rruleset(rrulebase): - - class _genitem: - def __init__(self, genlist, gen): - try: - self.dt = gen() - genlist.append(self) - except StopIteration: - pass - self.genlist = genlist - self.gen = gen - - def next(self): - try: - self.dt = self.gen() - except StopIteration: - self.genlist.remove(self) - - def __cmp__(self, other): - return cmp(self.dt, other.dt) - - def __init__(self, cache=False): - rrulebase.__init__(self, cache) - self._rrule = [] - self._rdate = [] - self._exrule = [] - self._exdate = [] - - def rrule(self, rrule): - self._rrule.append(rrule) - - def rdate(self, rdate): - self._rdate.append(rdate) - - def exrule(self, exrule): - self._exrule.append(exrule) - - def exdate(self, exdate): - self._exdate.append(exdate) - - def _iter(self): - rlist = [] - self._rdate.sort() - self._genitem(rlist, iter(self._rdate).next) - for gen in [iter(x).next for x in self._rrule]: - self._genitem(rlist, gen) - rlist.sort() - exlist = [] - self._exdate.sort() - self._genitem(exlist, iter(self._exdate).next) - for gen in [iter(x).next for x in self._exrule]: - self._genitem(exlist, gen) - exlist.sort() - lastdt = None - total = 0 - while rlist: - ritem = rlist[0] - if not lastdt or lastdt != ritem.dt: - while exlist and exlist[0] < ritem: - exlist[0].next() - exlist.sort() - if not exlist or ritem != exlist[0]: - total += 1 - yield ritem.dt - lastdt = ritem.dt - ritem.next() - rlist.sort() - self._len = total - -class _rrulestr: - - _freq_map = {"YEARLY": YEARLY, - "MONTHLY": MONTHLY, - "WEEKLY": WEEKLY, - "DAILY": DAILY, - "HOURLY": HOURLY, - "MINUTELY": MINUTELY, - "SECONDLY": SECONDLY} - - _weekday_map = {"MO":0,"TU":1,"WE":2,"TH":3,"FR":4,"SA":5,"SU":6} - - def _handle_int(self, rrkwargs, name, value, **kwargs): - rrkwargs[name.lower()] = int(value) - - def _handle_int_list(self, rrkwargs, name, value, **kwargs): - rrkwargs[name.lower()] = [int(x) for x in value.split(',')] - - _handle_INTERVAL = _handle_int - _handle_COUNT = _handle_int - _handle_BYSETPOS = _handle_int_list - _handle_BYMONTH = _handle_int_list - _handle_BYMONTHDAY = _handle_int_list - _handle_BYYEARDAY = _handle_int_list - _handle_BYEASTER = _handle_int_list - _handle_BYWEEKNO = _handle_int_list - _handle_BYHOUR = _handle_int_list - _handle_BYMINUTE = _handle_int_list - _handle_BYSECOND = _handle_int_list - - def _handle_FREQ(self, rrkwargs, name, value, **kwargs): - rrkwargs["freq"] = self._freq_map[value] - - def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): - global parser - if not parser: - from dateutil import parser - try: - rrkwargs["until"] = parser.parse(value, - ignoretz=kwargs.get("ignoretz"), - tzinfos=kwargs.get("tzinfos")) - except ValueError: - raise ValueError, "invalid until date" - - def _handle_WKST(self, rrkwargs, name, value, **kwargs): - rrkwargs["wkst"] = self._weekday_map[value] - - def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwarsg): - l = [] - for wday in value.split(','): - for i in range(len(wday)): - if wday[i] not in '+-0123456789': - break - n = wday[:i] or None - w = wday[i:] - if n: n = int(n) - l.append(weekdays[self._weekday_map[w]](n)) - rrkwargs["byweekday"] = l - - _handle_BYDAY = _handle_BYWEEKDAY - - def _parse_rfc_rrule(self, line, - dtstart=None, - cache=False, - ignoretz=False, - tzinfos=None): - if line.find(':') != -1: - name, value = line.split(':') - if name != "RRULE": - raise ValueError, "unknown parameter name" - else: - value = line - rrkwargs = {} - for pair in value.split(';'): - name, value = pair.split('=') - name = name.upper() - value = value.upper() - try: - getattr(self, "_handle_"+name)(rrkwargs, name, value, - ignoretz=ignoretz, - tzinfos=tzinfos) - except AttributeError: - raise ValueError, "unknown parameter '%s'" % name - except (KeyError, ValueError): - raise ValueError, "invalid '%s': %s" % (name, value) - return rrule(dtstart=dtstart, cache=cache, **rrkwargs) - - def _parse_rfc(self, s, - dtstart=None, - cache=False, - unfold=False, - forceset=False, - compatible=False, - ignoretz=False, - tzinfos=None): - global parser - if compatible: - forceset = True - unfold = True - s = s.upper() - if not s.strip(): - raise ValueError, "empty string" - if unfold: - lines = s.splitlines() - i = 0 - while i < len(lines): - line = lines[i].rstrip() - if not line: - del lines[i] - elif i > 0 and line[0] == " ": - lines[i-1] += line[1:] - del lines[i] - else: - i += 1 - else: - lines = s.split() - if (not forceset and len(lines) == 1 and - (s.find(':') == -1 or s.startswith('RRULE:'))): - return self._parse_rfc_rrule(lines[0], cache=cache, - dtstart=dtstart, ignoretz=ignoretz, - tzinfos=tzinfos) - else: - rrulevals = [] - rdatevals = [] - exrulevals = [] - exdatevals = [] - for line in lines: - if not line: - continue - if line.find(':') == -1: - name = "RRULE" - value = line - else: - name, value = line.split(':', 1) - parms = name.split(';') - if not parms: - raise ValueError, "empty property name" - name = parms[0] - parms = parms[1:] - if name == "RRULE": - for parm in parms: - raise ValueError, "unsupported RRULE parm: "+parm - rrulevals.append(value) - elif name == "RDATE": - for parm in parms: - if parm != "VALUE=DATE-TIME": - raise ValueError, "unsupported RDATE parm: "+parm - rdatevals.append(value) - elif name == "EXRULE": - for parm in parms: - raise ValueError, "unsupported EXRULE parm: "+parm - exrulevals.append(value) - elif name == "EXDATE": - for parm in parms: - if parm != "VALUE=DATE-TIME": - raise ValueError, "unsupported RDATE parm: "+parm - exdatevals.append(value) - elif name == "DTSTART": - for parm in parms: - raise ValueError, "unsupported DTSTART parm: "+parm - if not parser: - from dateutil import parser - dtstart = parser.parse(value, ignoretz=ignoretz, - tzinfos=tzinfos) - else: - raise ValueError, "unsupported property: "+name - if (forceset or len(rrulevals) > 1 or - rdatevals or exrulevals or exdatevals): - if not parser and (rdatevals or exdatevals): - from dateutil import parser - set = rruleset(cache=cache) - for value in rrulevals: - set.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, - ignoretz=ignoretz, - tzinfos=tzinfos)) - for value in rdatevals: - for datestr in value.split(','): - set.rdate(parser.parse(datestr, - ignoretz=ignoretz, - tzinfos=tzinfos)) - for value in exrulevals: - set.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, - ignoretz=ignoretz, - tzinfos=tzinfos)) - for value in exdatevals: - for datestr in value.split(','): - set.exdate(parser.parse(datestr, - ignoretz=ignoretz, - tzinfos=tzinfos)) - if compatible and dtstart: - set.rdate(dtstart) - return set - else: - return self._parse_rfc_rrule(rrulevals[0], - dtstart=dtstart, - cache=cache, - ignoretz=ignoretz, - tzinfos=tzinfos) - - def __call__(self, s, **kwargs): - return self._parse_rfc(s, **kwargs) - -rrulestr = _rrulestr() - -# vim:ts=4:sw=4:et diff --git a/lib/dateutil_py2/tz.py b/lib/dateutil_py2/tz.py deleted file mode 100644 index 0e28d6b33209..000000000000 --- a/lib/dateutil_py2/tz.py +++ /dev/null @@ -1,951 +0,0 @@ -""" -Copyright (c) 2003-2007 Gustavo Niemeyer - -This module offers extensions to the standard python 2.3+ -datetime module. -""" -__author__ = "Gustavo Niemeyer " -__license__ = "PSF License" - -import datetime -import struct -import time -import sys -import os - -relativedelta = None -parser = None -rrule = None - -__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", - "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"] - -try: - from dateutil.tzwin import tzwin, tzwinlocal -except (ImportError, OSError): - tzwin, tzwinlocal = None, None - -ZERO = datetime.timedelta(0) -EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal() - -class tzutc(datetime.tzinfo): - - def utcoffset(self, dt): - return ZERO - - def dst(self, dt): - return ZERO - - def tzname(self, dt): - return "UTC" - - def __eq__(self, other): - return (isinstance(other, tzutc) or - (isinstance(other, tzoffset) and other._offset == ZERO)) - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return "%s()" % self.__class__.__name__ - - __reduce__ = object.__reduce__ - -class tzoffset(datetime.tzinfo): - - def __init__(self, name, offset): - self._name = name - self._offset = datetime.timedelta(seconds=offset) - - def utcoffset(self, dt): - return self._offset - - def dst(self, dt): - return ZERO - - def tzname(self, dt): - return self._name - - def __eq__(self, other): - return (isinstance(other, tzoffset) and - self._offset == other._offset) - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return "%s(%s, %s)" % (self.__class__.__name__, - `self._name`, - self._offset.days*86400+self._offset.seconds) - - __reduce__ = object.__reduce__ - -class tzlocal(datetime.tzinfo): - - _std_offset = datetime.timedelta(seconds=-time.timezone) - if time.daylight: - _dst_offset = datetime.timedelta(seconds=-time.altzone) - else: - _dst_offset = _std_offset - - def utcoffset(self, dt): - if self._isdst(dt): - return self._dst_offset - else: - return self._std_offset - - def dst(self, dt): - if self._isdst(dt): - return self._dst_offset-self._std_offset - else: - return ZERO - - def tzname(self, dt): - return time.tzname[self._isdst(dt)] - - def _isdst(self, dt): - # We can't use mktime here. It is unstable when deciding if - # the hour near to a change is DST or not. - # - # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour, - # dt.minute, dt.second, dt.weekday(), 0, -1)) - # return time.localtime(timestamp).tm_isdst - # - # The code above yields the following result: - # - #>>> import tz, datetime - #>>> t = tz.tzlocal() - #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() - #'BRDT' - #>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname() - #'BRST' - #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() - #'BRST' - #>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname() - #'BRDT' - #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() - #'BRDT' - # - # Here is a more stable implementation: - # - timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400 - + dt.hour * 3600 - + dt.minute * 60 - + dt.second) - return time.localtime(timestamp+time.timezone).tm_isdst - - def __eq__(self, other): - if not isinstance(other, tzlocal): - return False - return (self._std_offset == other._std_offset and - self._dst_offset == other._dst_offset) - return True - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return "%s()" % self.__class__.__name__ - - __reduce__ = object.__reduce__ - -class _ttinfo(object): - __slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"] - - def __init__(self): - for attr in self.__slots__: - setattr(self, attr, None) - - def __repr__(self): - l = [] - for attr in self.__slots__: - value = getattr(self, attr) - if value is not None: - l.append("%s=%s" % (attr, `value`)) - return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) - - def __eq__(self, other): - if not isinstance(other, _ttinfo): - return False - return (self.offset == other.offset and - self.delta == other.delta and - self.isdst == other.isdst and - self.abbr == other.abbr and - self.isstd == other.isstd and - self.isgmt == other.isgmt) - - def __ne__(self, other): - return not self.__eq__(other) - - def __getstate__(self): - state = {} - for name in self.__slots__: - state[name] = getattr(self, name, None) - return state - - def __setstate__(self, state): - for name in self.__slots__: - if name in state: - setattr(self, name, state[name]) - -class tzfile(datetime.tzinfo): - - # http://www.twinsun.com/tz/tz-link.htm - # ftp://elsie.nci.nih.gov/pub/tz*.tar.gz - - def __init__(self, fileobj): - if isinstance(fileobj, basestring): - self._filename = fileobj - fileobj = open(fileobj) - elif hasattr(fileobj, "name"): - self._filename = fileobj.name - else: - self._filename = `fileobj` - - # From tzfile(5): - # - # The time zone information files used by tzset(3) - # begin with the magic characters "TZif" to identify - # them as time zone information files, followed by - # sixteen bytes reserved for future use, followed by - # six four-byte values of type long, written in a - # ``standard'' byte order (the high-order byte - # of the value is written first). - - if fileobj.read(4) != "TZif": - raise ValueError, "magic not found" - - fileobj.read(16) - - ( - # The number of UTC/local indicators stored in the file. - ttisgmtcnt, - - # The number of standard/wall indicators stored in the file. - ttisstdcnt, - - # The number of leap seconds for which data is - # stored in the file. - leapcnt, - - # The number of "transition times" for which data - # is stored in the file. - timecnt, - - # The number of "local time types" for which data - # is stored in the file (must not be zero). - typecnt, - - # The number of characters of "time zone - # abbreviation strings" stored in the file. - charcnt, - - ) = struct.unpack(">6l", fileobj.read(24)) - - # The above header is followed by tzh_timecnt four-byte - # values of type long, sorted in ascending order. - # These values are written in ``standard'' byte order. - # Each is used as a transition time (as returned by - # time(2)) at which the rules for computing local time - # change. - - if timecnt: - self._trans_list = struct.unpack(">%dl" % timecnt, - fileobj.read(timecnt*4)) - else: - self._trans_list = [] - - # Next come tzh_timecnt one-byte values of type unsigned - # char; each one tells which of the different types of - # ``local time'' types described in the file is associated - # with the same-indexed transition time. These values - # serve as indices into an array of ttinfo structures that - # appears next in the file. - - if timecnt: - self._trans_idx = struct.unpack(">%dB" % timecnt, - fileobj.read(timecnt)) - else: - self._trans_idx = [] - - # Each ttinfo structure is written as a four-byte value - # for tt_gmtoff of type long, in a standard byte - # order, followed by a one-byte value for tt_isdst - # and a one-byte value for tt_abbrind. In each - # structure, tt_gmtoff gives the number of - # seconds to be added to UTC, tt_isdst tells whether - # tm_isdst should be set by localtime(3), and - # tt_abbrind serves as an index into the array of - # time zone abbreviation characters that follow the - # ttinfo structure(s) in the file. - - ttinfo = [] - - for i in range(typecnt): - ttinfo.append(struct.unpack(">lbb", fileobj.read(6))) - - abbr = fileobj.read(charcnt) - - # Then there are tzh_leapcnt pairs of four-byte - # values, written in standard byte order; the - # first value of each pair gives the time (as - # returned by time(2)) at which a leap second - # occurs; the second gives the total number of - # leap seconds to be applied after the given time. - # The pairs of values are sorted in ascending order - # by time. - - # Not used, for now - if leapcnt: - leap = struct.unpack(">%dl" % (leapcnt*2), - fileobj.read(leapcnt*8)) - - # Then there are tzh_ttisstdcnt standard/wall - # indicators, each stored as a one-byte value; - # they tell whether the transition times associated - # with local time types were specified as standard - # time or wall clock time, and are used when - # a time zone file is used in handling POSIX-style - # time zone environment variables. - - if ttisstdcnt: - isstd = struct.unpack(">%db" % ttisstdcnt, - fileobj.read(ttisstdcnt)) - - # Finally, there are tzh_ttisgmtcnt UTC/local - # indicators, each stored as a one-byte value; - # they tell whether the transition times associated - # with local time types were specified as UTC or - # local time, and are used when a time zone file - # is used in handling POSIX-style time zone envi- - # ronment variables. - - if ttisgmtcnt: - isgmt = struct.unpack(">%db" % ttisgmtcnt, - fileobj.read(ttisgmtcnt)) - - # ** Everything has been read ** - - # Build ttinfo list - self._ttinfo_list = [] - for i in range(typecnt): - gmtoff, isdst, abbrind = ttinfo[i] - # Round to full-minutes if that's not the case. Python's - # datetime doesn't accept sub-minute timezones. Check - # http://python.org/sf/1447945 for some information. - gmtoff = (gmtoff+30)//60*60 - tti = _ttinfo() - tti.offset = gmtoff - tti.delta = datetime.timedelta(seconds=gmtoff) - tti.isdst = isdst - tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)] - tti.isstd = (ttisstdcnt > i and isstd[i] != 0) - tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0) - self._ttinfo_list.append(tti) - - # Replace ttinfo indexes for ttinfo objects. - trans_idx = [] - for idx in self._trans_idx: - trans_idx.append(self._ttinfo_list[idx]) - self._trans_idx = tuple(trans_idx) - - # Set standard, dst, and before ttinfos. before will be - # used when a given time is before any transitions, - # and will be set to the first non-dst ttinfo, or to - # the first dst, if all of them are dst. - self._ttinfo_std = None - self._ttinfo_dst = None - self._ttinfo_before = None - if self._ttinfo_list: - if not self._trans_list: - self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0] - else: - for i in range(timecnt-1,-1,-1): - tti = self._trans_idx[i] - if not self._ttinfo_std and not tti.isdst: - self._ttinfo_std = tti - elif not self._ttinfo_dst and tti.isdst: - self._ttinfo_dst = tti - if self._ttinfo_std and self._ttinfo_dst: - break - else: - if self._ttinfo_dst and not self._ttinfo_std: - self._ttinfo_std = self._ttinfo_dst - - for tti in self._ttinfo_list: - if not tti.isdst: - self._ttinfo_before = tti - break - else: - self._ttinfo_before = self._ttinfo_list[0] - - # Now fix transition times to become relative to wall time. - # - # I'm not sure about this. In my tests, the tz source file - # is setup to wall time, and in the binary file isstd and - # isgmt are off, so it should be in wall time. OTOH, it's - # always in gmt time. Let me know if you have comments - # about this. - laststdoffset = 0 - self._trans_list = list(self._trans_list) - for i in range(len(self._trans_list)): - tti = self._trans_idx[i] - if not tti.isdst: - # This is std time. - self._trans_list[i] += tti.offset - laststdoffset = tti.offset - else: - # This is dst time. Convert to std. - self._trans_list[i] += laststdoffset - self._trans_list = tuple(self._trans_list) - - def _find_ttinfo(self, dt, laststd=0): - timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400 - + dt.hour * 3600 - + dt.minute * 60 - + dt.second) - idx = 0 - for trans in self._trans_list: - if timestamp < trans: - break - idx += 1 - else: - return self._ttinfo_std - if idx == 0: - return self._ttinfo_before - if laststd: - while idx > 0: - tti = self._trans_idx[idx-1] - if not tti.isdst: - return tti - idx -= 1 - else: - return self._ttinfo_std - else: - return self._trans_idx[idx-1] - - def utcoffset(self, dt): - if not self._ttinfo_std: - return ZERO - return self._find_ttinfo(dt).delta - - def dst(self, dt): - if not self._ttinfo_dst: - return ZERO - tti = self._find_ttinfo(dt) - if not tti.isdst: - return ZERO - - # The documentation says that utcoffset()-dst() must - # be constant for every dt. - return tti.delta-self._find_ttinfo(dt, laststd=1).delta - - # An alternative for that would be: - # - # return self._ttinfo_dst.offset-self._ttinfo_std.offset - # - # However, this class stores historical changes in the - # dst offset, so I belive that this wouldn't be the right - # way to implement this. - - def tzname(self, dt): - if not self._ttinfo_std: - return None - return self._find_ttinfo(dt).abbr - - def __eq__(self, other): - if not isinstance(other, tzfile): - return False - return (self._trans_list == other._trans_list and - self._trans_idx == other._trans_idx and - self._ttinfo_list == other._ttinfo_list) - - def __ne__(self, other): - return not self.__eq__(other) - - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, `self._filename`) - - def __reduce__(self): - if not os.path.isfile(self._filename): - raise ValueError, "Unpickable %s class" % self.__class__.__name__ - return (self.__class__, (self._filename,)) - -class tzrange(datetime.tzinfo): - - def __init__(self, stdabbr, stdoffset=None, - dstabbr=None, dstoffset=None, - start=None, end=None): - global relativedelta - if not relativedelta: - from dateutil import relativedelta - self._std_abbr = stdabbr - self._dst_abbr = dstabbr - if stdoffset is not None: - self._std_offset = datetime.timedelta(seconds=stdoffset) - else: - self._std_offset = ZERO - if dstoffset is not None: - self._dst_offset = datetime.timedelta(seconds=dstoffset) - elif dstabbr and stdoffset is not None: - self._dst_offset = self._std_offset+datetime.timedelta(hours=+1) - else: - self._dst_offset = ZERO - if dstabbr and start is None: - self._start_delta = relativedelta.relativedelta( - hours=+2, month=4, day=1, weekday=relativedelta.SU(+1)) - else: - self._start_delta = start - if dstabbr and end is None: - self._end_delta = relativedelta.relativedelta( - hours=+1, month=10, day=31, weekday=relativedelta.SU(-1)) - else: - self._end_delta = end - - def utcoffset(self, dt): - if self._isdst(dt): - return self._dst_offset - else: - return self._std_offset - - def dst(self, dt): - if self._isdst(dt): - return self._dst_offset-self._std_offset - else: - return ZERO - - def tzname(self, dt): - if self._isdst(dt): - return self._dst_abbr - else: - return self._std_abbr - - def _isdst(self, dt): - if not self._start_delta: - return False - year = datetime.datetime(dt.year,1,1) - start = year+self._start_delta - end = year+self._end_delta - dt = dt.replace(tzinfo=None) - if start < end: - return dt >= start and dt < end - else: - return dt >= start or dt < end - - def __eq__(self, other): - if not isinstance(other, tzrange): - return False - return (self._std_abbr == other._std_abbr and - self._dst_abbr == other._dst_abbr and - self._std_offset == other._std_offset and - self._dst_offset == other._dst_offset and - self._start_delta == other._start_delta and - self._end_delta == other._end_delta) - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return "%s(...)" % self.__class__.__name__ - - __reduce__ = object.__reduce__ - -class tzstr(tzrange): - - def __init__(self, s): - global parser - if not parser: - from dateutil import parser - self._s = s - - res = parser._parsetz(s) - if res is None: - raise ValueError, "unknown string format" - - # Here we break the compatibility with the TZ variable handling. - # GMT-3 actually *means* the timezone -3. - if res.stdabbr in ("GMT", "UTC"): - res.stdoffset *= -1 - - # We must initialize it first, since _delta() needs - # _std_offset and _dst_offset set. Use False in start/end - # to avoid building it two times. - tzrange.__init__(self, res.stdabbr, res.stdoffset, - res.dstabbr, res.dstoffset, - start=False, end=False) - - if not res.dstabbr: - self._start_delta = None - self._end_delta = None - else: - self._start_delta = self._delta(res.start) - if self._start_delta: - self._end_delta = self._delta(res.end, isend=1) - - def _delta(self, x, isend=0): - kwargs = {} - if x.month is not None: - kwargs["month"] = x.month - if x.weekday is not None: - kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week) - if x.week > 0: - kwargs["day"] = 1 - else: - kwargs["day"] = 31 - elif x.day: - kwargs["day"] = x.day - elif x.yday is not None: - kwargs["yearday"] = x.yday - elif x.jyday is not None: - kwargs["nlyearday"] = x.jyday - if not kwargs: - # Default is to start on first sunday of april, and end - # on last sunday of october. - if not isend: - kwargs["month"] = 4 - kwargs["day"] = 1 - kwargs["weekday"] = relativedelta.SU(+1) - else: - kwargs["month"] = 10 - kwargs["day"] = 31 - kwargs["weekday"] = relativedelta.SU(-1) - if x.time is not None: - kwargs["seconds"] = x.time - else: - # Default is 2AM. - kwargs["seconds"] = 7200 - if isend: - # Convert to standard time, to follow the documented way - # of working with the extra hour. See the documentation - # of the tzinfo class. - delta = self._dst_offset-self._std_offset - kwargs["seconds"] -= delta.seconds+delta.days*86400 - return relativedelta.relativedelta(**kwargs) - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, `self._s`) - -class _tzicalvtzcomp: - def __init__(self, tzoffsetfrom, tzoffsetto, isdst, - tzname=None, rrule=None): - self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom) - self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto) - self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom - self.isdst = isdst - self.tzname = tzname - self.rrule = rrule - -class _tzicalvtz(datetime.tzinfo): - def __init__(self, tzid, comps=[]): - self._tzid = tzid - self._comps = comps - self._cachedate = [] - self._cachecomp = [] - - def _find_comp(self, dt): - if len(self._comps) == 1: - return self._comps[0] - dt = dt.replace(tzinfo=None) - try: - return self._cachecomp[self._cachedate.index(dt)] - except ValueError: - pass - lastcomp = None - lastcompdt = None - for comp in self._comps: - if not comp.isdst: - # Handle the extra hour in DST -> STD - compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True) - else: - compdt = comp.rrule.before(dt, inc=True) - if compdt and (not lastcompdt or lastcompdt < compdt): - lastcompdt = compdt - lastcomp = comp - if not lastcomp: - # RFC says nothing about what to do when a given - # time is before the first onset date. We'll look for the - # first standard component, or the first component, if - # none is found. - for comp in self._comps: - if not comp.isdst: - lastcomp = comp - break - else: - lastcomp = comp[0] - self._cachedate.insert(0, dt) - self._cachecomp.insert(0, lastcomp) - if len(self._cachedate) > 10: - self._cachedate.pop() - self._cachecomp.pop() - return lastcomp - - def utcoffset(self, dt): - return self._find_comp(dt).tzoffsetto - - def dst(self, dt): - comp = self._find_comp(dt) - if comp.isdst: - return comp.tzoffsetdiff - else: - return ZERO - - def tzname(self, dt): - return self._find_comp(dt).tzname - - def __repr__(self): - return "" % `self._tzid` - - __reduce__ = object.__reduce__ - -class tzical: - def __init__(self, fileobj): - global rrule - if not rrule: - from dateutil import rrule - - if isinstance(fileobj, basestring): - self._s = fileobj - fileobj = open(fileobj) - elif hasattr(fileobj, "name"): - self._s = fileobj.name - else: - self._s = `fileobj` - - self._vtz = {} - - self._parse_rfc(fileobj.read()) - - def keys(self): - return self._vtz.keys() - - def get(self, tzid=None): - if tzid is None: - keys = self._vtz.keys() - if len(keys) == 0: - raise ValueError, "no timezones defined" - elif len(keys) > 1: - raise ValueError, "more than one timezone available" - tzid = keys[0] - return self._vtz.get(tzid) - - def _parse_offset(self, s): - s = s.strip() - if not s: - raise ValueError, "empty offset" - if s[0] in ('+', '-'): - signal = (-1,+1)[s[0]=='+'] - s = s[1:] - else: - signal = +1 - if len(s) == 4: - return (int(s[:2])*3600+int(s[2:])*60)*signal - elif len(s) == 6: - return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal - else: - raise ValueError, "invalid offset: "+s - - def _parse_rfc(self, s): - lines = s.splitlines() - if not lines: - raise ValueError, "empty string" - - # Unfold - i = 0 - while i < len(lines): - line = lines[i].rstrip() - if not line: - del lines[i] - elif i > 0 and line[0] == " ": - lines[i-1] += line[1:] - del lines[i] - else: - i += 1 - - tzid = None - comps = [] - invtz = False - comptype = None - for line in lines: - if not line: - continue - name, value = line.split(':', 1) - parms = name.split(';') - if not parms: - raise ValueError, "empty property name" - name = parms[0].upper() - parms = parms[1:] - if invtz: - if name == "BEGIN": - if value in ("STANDARD", "DAYLIGHT"): - # Process component - pass - else: - raise ValueError, "unknown component: "+value - comptype = value - founddtstart = False - tzoffsetfrom = None - tzoffsetto = None - rrulelines = [] - tzname = None - elif name == "END": - if value == "VTIMEZONE": - if comptype: - raise ValueError, \ - "component not closed: "+comptype - if not tzid: - raise ValueError, \ - "mandatory TZID not found" - if not comps: - raise ValueError, \ - "at least one component is needed" - # Process vtimezone - self._vtz[tzid] = _tzicalvtz(tzid, comps) - invtz = False - elif value == comptype: - if not founddtstart: - raise ValueError, \ - "mandatory DTSTART not found" - if tzoffsetfrom is None: - raise ValueError, \ - "mandatory TZOFFSETFROM not found" - if tzoffsetto is None: - raise ValueError, \ - "mandatory TZOFFSETFROM not found" - # Process component - rr = None - if rrulelines: - rr = rrule.rrulestr("\n".join(rrulelines), - compatible=True, - ignoretz=True, - cache=True) - comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto, - (comptype == "DAYLIGHT"), - tzname, rr) - comps.append(comp) - comptype = None - else: - raise ValueError, \ - "invalid component end: "+value - elif comptype: - if name == "DTSTART": - rrulelines.append(line) - founddtstart = True - elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"): - rrulelines.append(line) - elif name == "TZOFFSETFROM": - if parms: - raise ValueError, \ - "unsupported %s parm: %s "%(name, parms[0]) - tzoffsetfrom = self._parse_offset(value) - elif name == "TZOFFSETTO": - if parms: - raise ValueError, \ - "unsupported TZOFFSETTO parm: "+parms[0] - tzoffsetto = self._parse_offset(value) - elif name == "TZNAME": - if parms: - raise ValueError, \ - "unsupported TZNAME parm: "+parms[0] - tzname = value - elif name == "COMMENT": - pass - else: - raise ValueError, "unsupported property: "+name - else: - if name == "TZID": - if parms: - raise ValueError, \ - "unsupported TZID parm: "+parms[0] - tzid = value - elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"): - pass - else: - raise ValueError, "unsupported property: "+name - elif name == "BEGIN" and value == "VTIMEZONE": - tzid = None - comps = [] - invtz = True - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, `self._s`) - -if sys.platform != "win32": - TZFILES = ["/etc/localtime", "localtime"] - TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"] -else: - TZFILES = [] - TZPATHS = [] - -def gettz(name=None): - tz = None - if not name: - try: - name = os.environ["TZ"] - except KeyError: - pass - if name is None or name == ":": - for filepath in TZFILES: - if not os.path.isabs(filepath): - filename = filepath - for path in TZPATHS: - filepath = os.path.join(path, filename) - if os.path.isfile(filepath): - break - else: - continue - if os.path.isfile(filepath): - try: - tz = tzfile(filepath) - break - except (IOError, OSError, ValueError): - pass - else: - tz = tzlocal() - else: - if name.startswith(":"): - name = name[:-1] - if os.path.isabs(name): - if os.path.isfile(name): - tz = tzfile(name) - else: - tz = None - else: - for path in TZPATHS: - filepath = os.path.join(path, name) - if not os.path.isfile(filepath): - filepath = filepath.replace(' ','_') - if not os.path.isfile(filepath): - continue - try: - tz = tzfile(filepath) - break - except (IOError, OSError, ValueError): - pass - else: - tz = None - if tzwin: - try: - tz = tzwin(name) - except OSError: - pass - if not tz: - from dateutil.zoneinfo import gettz - tz = gettz(name) - if not tz: - for c in name: - # name must have at least one offset to be a tzstr - if c in "0123456789": - try: - tz = tzstr(name) - except ValueError: - pass - break - else: - if name in ("GMT", "UTC"): - tz = tzutc() - elif name in time.tzname: - tz = tzlocal() - return tz - -# vim:ts=4:sw=4:et diff --git a/lib/dateutil_py2/tzwin.py b/lib/dateutil_py2/tzwin.py deleted file mode 100644 index 073e0ff68e3f..000000000000 --- a/lib/dateutil_py2/tzwin.py +++ /dev/null @@ -1,180 +0,0 @@ -# This code was originally contributed by Jeffrey Harris. -import datetime -import struct -import _winreg - -__author__ = "Jeffrey Harris & Gustavo Niemeyer " - -__all__ = ["tzwin", "tzwinlocal"] - -ONEWEEK = datetime.timedelta(7) - -TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" -TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" -TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" - -def _settzkeyname(): - global TZKEYNAME - handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) - try: - _winreg.OpenKey(handle, TZKEYNAMENT).Close() - TZKEYNAME = TZKEYNAMENT - except WindowsError: - TZKEYNAME = TZKEYNAME9X - handle.Close() - -_settzkeyname() - -class tzwinbase(datetime.tzinfo): - """tzinfo class based on win32's timezones available in the registry.""" - - def utcoffset(self, dt): - if self._isdst(dt): - return datetime.timedelta(minutes=self._dstoffset) - else: - return datetime.timedelta(minutes=self._stdoffset) - - def dst(self, dt): - if self._isdst(dt): - minutes = self._dstoffset - self._stdoffset - return datetime.timedelta(minutes=minutes) - else: - return datetime.timedelta(0) - - def tzname(self, dt): - if self._isdst(dt): - return self._dstname - else: - return self._stdname - - def list(): - """Return a list of all time zones known to the system.""" - handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) - tzkey = _winreg.OpenKey(handle, TZKEYNAME) - result = [_winreg.EnumKey(tzkey, i) - for i in range(_winreg.QueryInfoKey(tzkey)[0])] - tzkey.Close() - handle.Close() - return result - list = staticmethod(list) - - def display(self): - return self._display - - def _isdst(self, dt): - dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek, - self._dsthour, self._dstminute, - self._dstweeknumber) - dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek, - self._stdhour, self._stdminute, - self._stdweeknumber) - if dston < dstoff: - return dston <= dt.replace(tzinfo=None) < dstoff - else: - return not dstoff <= dt.replace(tzinfo=None) < dston - - -class tzwin(tzwinbase): - - def __init__(self, name): - self._name = name - - handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) - tzkey = _winreg.OpenKey(handle, "%s\%s" % (TZKEYNAME, name)) - keydict = valuestodict(tzkey) - tzkey.Close() - handle.Close() - - self._stdname = keydict["Std"].encode("iso-8859-1") - self._dstname = keydict["Dlt"].encode("iso-8859-1") - - self._display = keydict["Display"] - - # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm - tup = struct.unpack("=3l16h", keydict["TZI"]) - self._stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 - self._dstoffset = self._stdoffset-tup[2] # + DaylightBias * -1 - - (self._stdmonth, - self._stddayofweek, # Sunday = 0 - self._stdweeknumber, # Last = 5 - self._stdhour, - self._stdminute) = tup[4:9] - - (self._dstmonth, - self._dstdayofweek, # Sunday = 0 - self._dstweeknumber, # Last = 5 - self._dsthour, - self._dstminute) = tup[12:17] - - def __repr__(self): - return "tzwin(%s)" % repr(self._name) - - def __reduce__(self): - return (self.__class__, (self._name,)) - - -class tzwinlocal(tzwinbase): - - def __init__(self): - - handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) - - tzlocalkey = _winreg.OpenKey(handle, TZLOCALKEYNAME) - keydict = valuestodict(tzlocalkey) - tzlocalkey.Close() - - self._stdname = keydict["StandardName"].encode("iso-8859-1") - self._dstname = keydict["DaylightName"].encode("iso-8859-1") - - try: - tzkey = _winreg.OpenKey(handle, "%s\%s"%(TZKEYNAME, self._stdname)) - _keydict = valuestodict(tzkey) - self._display = _keydict["Display"] - tzkey.Close() - except OSError: - self._display = None - - handle.Close() - - self._stdoffset = -keydict["Bias"]-keydict["StandardBias"] - self._dstoffset = self._stdoffset-keydict["DaylightBias"] - - - # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm - tup = struct.unpack("=8h", keydict["StandardStart"]) - - (self._stdmonth, - self._stddayofweek, # Sunday = 0 - self._stdweeknumber, # Last = 5 - self._stdhour, - self._stdminute) = tup[1:6] - - tup = struct.unpack("=8h", keydict["DaylightStart"]) - - (self._dstmonth, - self._dstdayofweek, # Sunday = 0 - self._dstweeknumber, # Last = 5 - self._dsthour, - self._dstminute) = tup[1:6] - - def __reduce__(self): - return (self.__class__, ()) - -def picknthweekday(year, month, dayofweek, hour, minute, whichweek): - """dayofweek == 0 means Sunday, whichweek 5 means last instance""" - first = datetime.datetime(year, month, 1, hour, minute) - weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1)) - for n in xrange(whichweek): - dt = weekdayone+(whichweek-n)*ONEWEEK - if dt.month == month: - return dt - -def valuestodict(key): - """Convert a registry key's values to a dictionary.""" - dict = {} - size = _winreg.QueryInfoKey(key)[1] - for i in range(size): - data = _winreg.EnumValue(key, i) - dict[data[0]] = data[1] - return dict diff --git a/lib/dateutil_py2/zoneinfo/__init__.py b/lib/dateutil_py2/zoneinfo/__init__.py deleted file mode 100644 index 9bed6264c8b9..000000000000 --- a/lib/dateutil_py2/zoneinfo/__init__.py +++ /dev/null @@ -1,87 +0,0 @@ -""" -Copyright (c) 2003-2005 Gustavo Niemeyer - -This module offers extensions to the standard python 2.3+ -datetime module. -""" -from dateutil.tz import tzfile -from tarfile import TarFile -import os - -__author__ = "Gustavo Niemeyer " -__license__ = "PSF License" - -__all__ = ["setcachesize", "gettz", "rebuild"] - -CACHE = [] -CACHESIZE = 10 - -class tzfile(tzfile): - def __reduce__(self): - return (gettz, (self._filename,)) - -def getzoneinfofile(): - filenames = os.listdir(os.path.join(os.path.dirname(__file__))) - filenames.sort() - filenames.reverse() - for entry in filenames: - if entry.startswith("zoneinfo") and ".tar." in entry: - return os.path.join(os.path.dirname(__file__), entry) - return None - -ZONEINFOFILE = getzoneinfofile() - -del getzoneinfofile - -def setcachesize(size): - global CACHESIZE, CACHE - CACHESIZE = size - del CACHE[size:] - -def gettz(name): - tzinfo = None - if ZONEINFOFILE: - for cachedname, tzinfo in CACHE: - if cachedname == name: - break - else: - tf = TarFile.open(ZONEINFOFILE) - try: - zonefile = tf.extractfile(name) - except KeyError: - tzinfo = None - else: - tzinfo = tzfile(zonefile) - tf.close() - CACHE.insert(0, (name, tzinfo)) - del CACHE[CACHESIZE:] - return tzinfo - -def rebuild(filename, tag=None, format="gz"): - import tempfile, shutil - tmpdir = tempfile.mkdtemp() - zonedir = os.path.join(tmpdir, "zoneinfo") - moduledir = os.path.dirname(__file__) - if tag: tag = "-"+tag - targetname = "zoneinfo%s.tar.%s" % (tag, format) - try: - tf = TarFile.open(filename) - for name in tf.getnames(): - if not (name.endswith(".sh") or - name.endswith(".tab") or - name == "leapseconds"): - tf.extract(name, tmpdir) - filepath = os.path.join(tmpdir, name) - os.system("zic -d %s %s" % (zonedir, filepath)) - tf.close() - target = os.path.join(moduledir, targetname) - for entry in os.listdir(moduledir): - if entry.startswith("zoneinfo") and ".tar." in entry: - os.unlink(os.path.join(moduledir, entry)) - tf = TarFile.open(target, "w:%s" % format) - for entry in os.listdir(zonedir): - entrypath = os.path.join(zonedir, entry) - tf.add(entrypath, entry) - tf.close() - finally: - shutil.rmtree(tmpdir) diff --git a/lib/dateutil_py2/zoneinfo/zoneinfo-2010g.tar.gz b/lib/dateutil_py2/zoneinfo/zoneinfo-2010g.tar.gz deleted file mode 100644 index 8bd4f96402be..000000000000 Binary files a/lib/dateutil_py2/zoneinfo/zoneinfo-2010g.tar.gz and /dev/null differ diff --git a/lib/dateutil_py3/LICENSE b/lib/dateutil_py3/LICENSE deleted file mode 100644 index 5834335bd9da..000000000000 --- a/lib/dateutil_py3/LICENSE +++ /dev/null @@ -1,30 +0,0 @@ -dateutil - Extensions to the standard Python datetime module. - -Copyright (c) 2003-2011 - Gustavo Niemeyer -Copyright (c) 2012 - Tomi Pieviläinen - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/lib/dateutil_py3/NEWS b/lib/dateutil_py3/NEWS deleted file mode 100644 index 3a0a8ed12aae..000000000000 --- a/lib/dateutil_py3/NEWS +++ /dev/null @@ -1,164 +0,0 @@ -Version 2.1 ------------ - -- New maintainer - -- Dateutil now works on Python 2.6, 2.7 and 3.2 from same codebase (with six) - -- #704047: Ismael Carnales' patch for a new time format - -- Small bug fixes, thanks for reporters! - - -Version 2.0 ------------ - -- Ported to Python 3, by Brian Jones. If you need dateutil for Python 2.X, - please continue using the 1.X series. - -- There's no such thing as a "PSF License". This source code is now - made available under the Simplified BSD license. See LICENSE for - details. - -Version 1.5 ------------ - -- As reported by Mathieu Bridon, rrules were matching the bysecond rules - incorrectly against byminute in some circumstances when the SECONDLY - frequency was in use, due to a copy & paste bug. The problem has been - unittested and corrected. - -- Adam Ryan reported a problem in the relativedelta implementation which - affected the yearday parameter in the month of January specifically. - This has been unittested and fixed. - -- Updated timezone information. - - -Version 1.4.1 -------------- - -- Updated timezone information. - - -Version 1.4 ------------ - -- Fixed another parser precision problem on conversion of decimal seconds - to microseconds, as reported by Erik Brown. Now these issues are gone - for real since it's not using floating point arithmetic anymore. - -- Fixed case where tzrange.utcoffset and tzrange.dst() might fail due - to a date being used where a datetime was expected (reported and fixed - by Lennart Regebro). - -- Prevent tzstr from introducing daylight timings in strings that didn't - specify them (reported by Lennart Regebro). - -- Calls like gettz("GMT+3") and gettz("UTC-2") will now return the - expected values, instead of the TZ variable behavior. - -- Fixed DST signal handling in zoneinfo files. Reported by - Nicholas F. Fabry and John-Mark Gurney. - - -Version 1.3 ------------ - -- Fixed precision problem on conversion of decimal seconds to - microseconds, as reported by Skip Montanaro. - -- Fixed bug in constructor of parser, and converted parser classes to - new-style classes. Original report and patch by Michael Elsdörfer. - -- Initialize tzid and comps in tz.py, to prevent the code from ever - raising a NameError (even with broken files). Johan Dahlin suggested - the fix after a pyflakes run. - -- Version is now published in dateutil.__version__, as requested - by Darren Dale. - -- All code is compatible with new-style division. - - -Version 1.2 ------------ - -- Now tzfile will round timezones to full-minutes if necessary, - since Python's datetime doesn't support sub-minute offsets. - Thanks to Ilpo Nyyssönen for reporting the issue. - -- Removed bare string exceptions, as reported and fixed by - Wilfredo Sánchez Vega. - -- Fix bug in leap count parsing (reported and fixed by Eugene Oden). - - -Version 1.1 ------------ - -- Fixed rrule byyearday handling. Abramo Bagnara pointed out that - RFC2445 allows negative numbers. - -- Fixed --prefix handling in setup.py (by Sidnei da Silva). - -- Now tz.gettz() returns a tzlocal instance when not given any - arguments and no other timezone information is found. - -- Updating timezone information to version 2005q. - - -Version 1.0 ------------ - -- Fixed parsing of XXhXXm formatted time after day/month/year - has been parsed. - -- Added patch by Jeffrey Harris optimizing rrule.__contains__. - - -Version 0.9 ------------ - -- Fixed pickling of timezone types, as reported by - Andreas Köhler. - -- Implemented internal timezone information with binary - timezone files [1]. datautil.tz.gettz() function will now - try to use the system timezone files, and fallback to - the internal versions. It's also possible to ask for - the internal versions directly by using - dateutil.zoneinfo.gettz(). - -- New tzwin timezone type, allowing access to Windows - internal timezones (contributed by Jeffrey Harris). - -- Fixed parsing of unicode date strings. - -- Accept parserinfo instances as the parser constructor - parameter, besides parserinfo (sub)classes. - -- Changed weekday to spell the not-set n value as None - instead of 0. - -- Fixed other reported bugs. - -[1] http://www.twinsun.com/tz/tz-link.htm - - -Version 0.5 ------------ - -- Removed FREQ_ prefix from rrule frequency constants - WARNING: this breaks compatibility with previous versions. - -- Fixed rrule.between() for cases where "after" is achieved - before even starting, as reported by Andreas Köhler. - -- Fixed two digit zero-year parsing (such as 31-Dec-00), as - reported by Jim Abramson, and included test case for this. - -- Sort exdate and rdate before iterating over them, so that - it's not necessary to sort them before adding to the rruleset, - as reported by Nicholas Piper. - diff --git a/lib/dateutil_py3/README b/lib/dateutil_py3/README deleted file mode 100644 index 9453699e7d54..000000000000 --- a/lib/dateutil_py3/README +++ /dev/null @@ -1,1970 +0,0 @@ -## This file is in the moin format. The latest version is found -## at https://moin.conectiva.com.br/DateUtil - -== Contents == -[[TableOfContents]] - -== Description == -The '''dateutil''' module provides powerful extensions to -the standard '''datetime''' module, available in Python. - -== Features == - - * Computing of relative deltas (next month, next year, - next monday, last week of month, etc); - - * Computing of relative deltas between two given - date and/or datetime objects; - - * Computing of dates based on very flexible recurrence rules, - using a superset of the - [ftp://ftp.rfc-editor.org/in-notes/rfc2445.txt iCalendar] - specification. Parsing of RFC strings is supported as well. - - * Generic parsing of dates in almost any string format; - - * Timezone (tzinfo) implementations for tzfile(5) format - files (/etc/localtime, /usr/share/zoneinfo, etc), TZ - environment string (in all known formats), iCalendar - format files, given ranges (with help from relative deltas), - local machine timezone, fixed offset timezone, UTC timezone, - and Windows registry-based time zones. - - * Internal up-to-date world timezone information based on - Olson's database. - - * Computing of Easter Sunday dates for any given year, - using Western, Orthodox or Julian algorithms; - - * More than 400 test cases. - -== Quick example == -Here's a snapshot, just to give an idea about the power of the -package. For more examples, look at the documentation below. - -Suppose you want to know how much time is left, in -years/months/days/etc, before the next easter happening on a -year with a Friday 13th in August, and you want to get today's -date out of the "date" unix system command. Here is the code: -{{{ -from dateutil.relativedelta import * -from dateutil.easter import * -from dateutil.rrule import * -from dateutil.parser import * -from datetime import * -import commands -import os -now = parse(commands.getoutput("date")) -today = now.date() -year = rrule(YEARLY,bymonth=8,bymonthday=13,byweekday=FR)[0].year -rdelta = relativedelta(easter(year), today) -print "Today is:", today -print "Year with next Aug 13th on a Friday is:", year -print "How far is the Easter of that year:", rdelta -print "And the Easter of that year is:", today+rdelta -}}} - -And here's the output: -{{{ -Today is: 2003-10-11 -Year with next Aug 13th on a Friday is: 2004 -How far is the Easter of that year: relativedelta(months=+6) -And the Easter of that year is: 2004-04-11 -}}} - -{i} Being exactly 6 months ahead was '''really''' a coincidence :) - -== Download == -The following files are available. - * attachment:python-dateutil-1.0.tar.bz2 - * attachment:python-dateutil-1.0-1.noarch.rpm - -== Author == -The dateutil module was written by GustavoNiemeyer . - -== Documentation == -The following modules are available. - -=== relativedelta === -This module offers the '''relativedelta''' type, which is based -on the specification of the excelent work done by M.-A. Lemburg in his -[http://www.egenix.com/files/python/mxDateTime.html mxDateTime] -extension. However, notice that this type '''does not''' implement the -same algorithm as his work. Do not expect it to behave like -{{{mxDateTime}}}'s counterpart. - -==== relativedelta type ==== - -There's two different ways to build a relativedelta instance. The -first one is passing it two {{{date}}}/{{{datetime}}} instances: -{{{ -relativedelta(datetime1, datetime2) -}}} - -This will build the relative difference between {{{datetime1}}} and -{{{datetime2}}}, so that the following constraint is always true: -{{{ -datetime2+relativedelta(datetime1, datetime2) == datetime1 -}}} - -Notice that instead of {{{datetime}}} instances, you may use -{{{date}}} instances, or a mix of both. - -And the other way is to use any of the following keyword arguments: - - year, month, day, hour, minute, second, microsecond:: - Absolute information. - - years, months, weeks, days, hours, minutes, seconds, microseconds:: - Relative information, may be negative. - - weekday:: - One of the weekday instances ({{{MO}}}, {{{TU}}}, etc). These - instances may receive a parameter {{{n}}}, specifying the {{{n}}}th - weekday, which could be positive or negative (like {{{MO(+2)}}} or - {{{MO(-3)}}}. Not specifying it is the same as specifying {{{+1}}}. - You can also use an integer, where {{{0=MO}}}. Notice that, - for example, if the calculated date is already Monday, using - {{{MO}}} or {{{MO(+1)}}} (which is the same thing in this context), - won't change the day. - - leapdays:: - Will add given days to the date found, but only if the computed - year is a leap year and the computed date is post 28 of february. - - yearday, nlyearday:: - Set the yearday or the non-leap year day (jump leap days). - These are converted to {{{day}}}/{{{month}}}/{{{leapdays}}} - information. - -==== Behavior of operations ==== -If you're curious about exactly how the relative delta will act -on operations, here is a description of its behavior. - - 1. Calculate the absolute year, using the {{{year}}} argument, or the - original datetime year, if the argument is not present. - 1. Add the relative {{{years}}} argument to the absolute year. - 1. Do steps 1 and 2 for {{{month}}}/{{{months}}}. - 1. Calculate the absolute day, using the {{{day}}} argument, or the - original datetime day, if the argument is not present. Then, subtract - from the day until it fits in the year and month found after their - operations. - 1. Add the relative {{{days}}} argument to the absolute day. Notice - that the {{{weeks}}} argument is multiplied by 7 and added to {{{days}}}. - 1. If {{{leapdays}}} is present, the computed year is a leap year, and - the computed month is after february, remove one day from the found date. - 1. Do steps 1 and 2 for {{{hour}}}/{{{hours}}}, {{{minute}}}/{{{minutes}}}, - {{{second}}}/{{{seconds}}}, {{{microsecond}}}/{{{microseconds}}}. - 1. If the {{{weekday}}} argument is present, calculate the {{{n}}}th - occurrence of the given weekday. - -==== Examples ==== - -Let's begin our trip. -{{{ ->>> from datetime import *; from dateutil.relativedelta import * ->>> import calendar -}}} - -Store some values. -{{{ ->>> NOW = datetime.now() ->>> TODAY = date.today() ->>> NOW -datetime.datetime(2003, 9, 17, 20, 54, 47, 282310) ->>> TODAY -datetime.date(2003, 9, 17) -}}} - -Next month. -{{{ ->>> NOW+relativedelta(months=+1) -datetime.datetime(2003, 10, 17, 20, 54, 47, 282310) -}}} - -Next month, plus one week. -{{{ ->>> NOW+relativedelta(months=+1, weeks=+1) -datetime.datetime(2003, 10, 24, 20, 54, 47, 282310) -}}} - -Next month, plus one week, at 10am. -{{{ ->>> TODAY+relativedelta(months=+1, weeks=+1, hour=10) -datetime.datetime(2003, 10, 24, 10, 0) -}}} - -Let's try the other way around. Notice that the -hour setting we get in the relativedelta is relative, -since it's a difference, and the weeks parameter -has gone. -{{{ ->>> relativedelta(datetime(2003, 10, 24, 10, 0), TODAY) -relativedelta(months=+1, days=+7, hours=+10) -}}} - -One month before one year. -{{{ ->>> NOW+relativedelta(years=+1, months=-1) -datetime.datetime(2004, 8, 17, 20, 54, 47, 282310) -}}} - -How does it handle months with different numbers of days? -Notice that adding one month will never cross the month -boundary. -{{{ ->>> date(2003,1,27)+relativedelta(months=+1) -datetime.date(2003, 2, 27) ->>> date(2003,1,31)+relativedelta(months=+1) -datetime.date(2003, 2, 28) ->>> date(2003,1,31)+relativedelta(months=+2) -datetime.date(2003, 3, 31) -}}} - -The logic for years is the same, even on leap years. -{{{ ->>> date(2000,2,28)+relativedelta(years=+1) -datetime.date(2001, 2, 28) ->>> date(2000,2,29)+relativedelta(years=+1) -datetime.date(2001, 2, 28) - ->>> date(1999,2,28)+relativedelta(years=+1) -datetime.date(2000, 2, 28) ->>> date(1999,3,1)+relativedelta(years=+1) -datetime.date(2000, 3, 1) - ->>> date(2001,2,28)+relativedelta(years=-1) -datetime.date(2000, 2, 28) ->>> date(2001,3,1)+relativedelta(years=-1) -datetime.date(2000, 3, 1) -}}} - -Next friday. -{{{ ->>> TODAY+relativedelta(weekday=FR) -datetime.date(2003, 9, 19) - ->>> TODAY+relativedelta(weekday=calendar.FRIDAY) -datetime.date(2003, 9, 19) -}}} - -Last friday in this month. -{{{ ->>> TODAY+relativedelta(day=31, weekday=FR(-1)) -datetime.date(2003, 9, 26) -}}} - -Next wednesday (it's today!). -{{{ ->>> TODAY+relativedelta(weekday=WE(+1)) -datetime.date(2003, 9, 17) -}}} - -Next wednesday, but not today. -{{{ ->>> TODAY+relativedelta(days=+1, weekday=WE(+1)) -datetime.date(2003, 9, 24) -}}} - -Following -[http://www.cl.cam.ac.uk/~mgk25/iso-time.html ISO year week number notation] -find the first day of the 15th week of 1997. -{{{ ->>> datetime(1997,1,1)+relativedelta(day=4, weekday=MO(-1), weeks=+14) -datetime.datetime(1997, 4, 7, 0, 0) -}}} - -How long ago has the millennium changed? -{{{ ->>> relativedelta(NOW, date(2001,1,1)) -relativedelta(years=+2, months=+8, days=+16, - hours=+20, minutes=+54, seconds=+47, microseconds=+282310) -}}} - -How old is John? -{{{ ->>> johnbirthday = datetime(1978, 4, 5, 12, 0) ->>> relativedelta(NOW, johnbirthday) -relativedelta(years=+25, months=+5, days=+12, - hours=+8, minutes=+54, seconds=+47, microseconds=+282310) -}}} - -It works with dates too. -{{{ ->>> relativedelta(TODAY, johnbirthday) -relativedelta(years=+25, months=+5, days=+11, hours=+12) -}}} - -Obtain today's date using the yearday: -{{{ ->>> date(2003, 1, 1)+relativedelta(yearday=260) -datetime.date(2003, 9, 17) -}}} - -We can use today's date, since yearday should be absolute -in the given year: -{{{ ->>> TODAY+relativedelta(yearday=260) -datetime.date(2003, 9, 17) -}}} - -Last year it should be in the same day: -{{{ ->>> date(2002, 1, 1)+relativedelta(yearday=260) -datetime.date(2002, 9, 17) -}}} - -But not in a leap year: -{{{ ->>> date(2000, 1, 1)+relativedelta(yearday=260) -datetime.date(2000, 9, 16) -}}} - -We can use the non-leap year day to ignore this: -{{{ ->>> date(2000, 1, 1)+relativedelta(nlyearday=260) -datetime.date(2000, 9, 17) -}}} - -=== rrule === -The rrule module offers a small, complete, and very fast, implementation -of the recurrence rules documented in the -[ftp://ftp.rfc-editor.org/in-notes/rfc2445.txt iCalendar RFC], including -support for caching of results. - -==== rrule type ==== -That's the base of the rrule operation. It accepts all the keywords -defined in the RFC as its constructor parameters (except {{{byday}}}, -which was renamed to {{{byweekday}}}) and more. The constructor -prototype is: -{{{ -rrule(freq) -}}} - -Where {{{freq}}} must be one of {{{YEARLY}}}, {{{MONTHLY}}}, -{{{WEEKLY}}}, {{{DAILY}}}, {{{HOURLY}}}, {{{MINUTELY}}}, -or {{{SECONDLY}}}. - -Additionally, it supports the following keyword arguments: - - cache:: - If given, it must be a boolean value specifying to enable - or disable caching of results. If you will use the same - {{{rrule}}} instance multiple times, enabling caching will - improve the performance considerably. - - dtstart:: - The recurrence start. Besides being the base for the - recurrence, missing parameters in the final recurrence - instances will also be extracted from this date. If not - given, {{{datetime.now()}}} will be used instead. - - interval:: - The interval between each {{{freq}}} iteration. For example, - when using {{{YEARLY}}}, an interval of {{{2}}} means - once every two years, but with {{{HOURLY}}}, it means - once every two hours. The default interval is {{{1}}}. - - wkst:: - The week start day. Must be one of the {{{MO}}}, {{{TU}}}, - {{{WE}}} constants, or an integer, specifying the first day - of the week. This will affect recurrences based on weekly - periods. The default week start is got from - {{{calendar.firstweekday()}}}, and may be modified by - {{{calendar.setfirstweekday()}}}. - - count:: - How many occurrences will be generated. - - until:: - If given, this must be a {{{datetime}}} instance, that will - specify the limit of the recurrence. If a recurrence instance - happens to be the same as the {{{datetime}}} instance given - in the {{{until}}} keyword, this will be the last occurrence. - - bysetpos:: - If given, it must be either an integer, or a sequence of - integers, positive or negative. Each given integer will - specify an occurrence number, corresponding to the nth - occurrence of the rule inside the frequency period. For - example, a {{{bysetpos}}} of {{{-1}}} if combined with a - {{{MONTHLY}}} frequency, and a {{{byweekday}}} of - {{{(MO, TU, WE, TH, FR)}}}, will result in the last work - day of every month. - - bymonth:: - If given, it must be either an integer, or a sequence of - integers, meaning the months to apply the recurrence to. - - bymonthday:: - If given, it must be either an integer, or a sequence of - integers, meaning the month days to apply the recurrence to. - - byyearday:: - If given, it must be either an integer, or a sequence of - integers, meaning the year days to apply the recurrence to. - - byweekno:: - If given, it must be either an integer, or a sequence of - integers, meaning the week numbers to apply the recurrence - to. Week numbers have the meaning described in ISO8601, - that is, the first week of the year is that containing at - least four days of the new year. - - byweekday:: - If given, it must be either an integer ({{{0 == MO}}}), a - sequence of integers, one of the weekday constants - ({{{MO}}}, {{{TU}}}, etc), or a sequence of these constants. - When given, these variables will define the weekdays where - the recurrence will be applied. It's also possible to use - an argument {{{n}}} for the weekday instances, which will - mean the {{{n}}}''th'' occurrence of this weekday in the - period. For example, with {{{MONTHLY}}}, or with - {{{YEARLY}}} and {{{BYMONTH}}}, using {{{FR(+1)}}} - in {{{byweekday}}} will specify the first friday of the - month where the recurrence happens. Notice that in the RFC - documentation, this is specified as {{{BYDAY}}}, but was - renamed to avoid the ambiguity of that keyword. - - byhour:: - If given, it must be either an integer, or a sequence of - integers, meaning the hours to apply the recurrence to. - - byminute:: - If given, it must be either an integer, or a sequence of - integers, meaning the minutes to apply the recurrence to. - - bysecond:: - If given, it must be either an integer, or a sequence of - integers, meaning the seconds to apply the recurrence to. - - byeaster:: - If given, it must be either an integer, or a sequence of - integers, positive or negative. Each integer will define - an offset from the Easter Sunday. Passing the offset - {{{0}}} to {{{byeaster}}} will yield the Easter Sunday - itself. This is an extension to the RFC specification. - -==== rrule methods ==== -The following methods are available in {{{rrule}}} instances: - - rrule.before(dt, inc=False):: - Returns the last recurrence before the given {{{datetime}}} - instance. The {{{inc}}} keyword defines what happens if - {{{dt}}} '''is''' an occurrence. With {{{inc == True}}}, - if {{{dt}}} itself is an occurrence, it will be returned. - - rrule.after(dt, inc=False):: - Returns the first recurrence after the given {{{datetime}}} - instance. The {{{inc}}} keyword defines what happens if - {{{dt}}} '''is''' an occurrence. With {{{inc == True}}}, - if {{{dt}}} itself is an occurrence, it will be returned. - - rrule.between(after, before, inc=False):: - Returns all the occurrences of the rrule between {{{after}}} - and {{{before}}}. The {{{inc}}} keyword defines what happens - if {{{after}}} and/or {{{before}}} are themselves occurrences. - With {{{inc == True}}}, they will be included in the list, - if they are found in the recurrence set. - - rrule.count():: - Returns the number of recurrences in this set. It will have - go trough the whole recurrence, if this hasn't been done - before. - -Besides these methods, {{{rrule}}} instances also support -the {{{__getitem__()}}} and {{{__contains__()}}} special methods, -meaning that these are valid expressions: -{{{ -rr = rrule(...) -if datetime(...) in rr: - ... -print rr[0] -print rr[-1] -print rr[1:2] -print rr[::-2] -}}} - -The getitem/slicing mechanism is smart enough to avoid getting the whole -recurrence set, if possible. - -==== Notes ==== - - * The rrule type has no {{{byday}}} keyword. The equivalent keyword - has been replaced by the {{{byweekday}}} keyword, to remove the - ambiguity present in the original keyword. - - * Unlike documented in the RFC, the starting datetime ({{{dtstart}}}) - is not the first recurrence instance, unless it does fit in the - specified rules. In a python module context, this behavior makes more - sense than otherwise. Notice that you can easily get the original - behavior by using a rruleset and adding the {{{dtstart}}} as an - {{{rdate}}} recurrence. - - * Unlike documented in the RFC, every keyword is valid on every - frequency (the RFC documents that {{{byweekno}}} is only valid - on yearly frequencies, for example). - - * In addition to the documented keywords, a {{{byeaster}}} keyword - was introduced, making it easy to compute recurrent events relative - to the Easter Sunday. - -==== rrule examples ==== -These examples were converted from the RFC. - -Prepare the environment. -{{{ ->>> from dateutil.rrule import * ->>> from dateutil.parser import * ->>> from datetime import * - ->>> import pprint ->>> import sys ->>> sys.displayhook = pprint.pprint -}}} - -Daily, for 10 occurrences. -{{{ ->>> list(rrule(DAILY, count=10, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 3, 9, 0), - datetime.datetime(1997, 9, 4, 9, 0), - datetime.datetime(1997, 9, 5, 9, 0), - datetime.datetime(1997, 9, 6, 9, 0), - datetime.datetime(1997, 9, 7, 9, 0), - datetime.datetime(1997, 9, 8, 9, 0), - datetime.datetime(1997, 9, 9, 9, 0), - datetime.datetime(1997, 9, 10, 9, 0), - datetime.datetime(1997, 9, 11, 9, 0)] -}}} - -Daily until December 24, 1997 -{{{ ->>> list(rrule(DAILY, - dtstart=parse("19970902T090000"), - until=parse("19971224T000000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 3, 9, 0), - datetime.datetime(1997, 9, 4, 9, 0), - (...) - datetime.datetime(1997, 12, 21, 9, 0), - datetime.datetime(1997, 12, 22, 9, 0), - datetime.datetime(1997, 12, 23, 9, 0)] -}}} - -Every other day, 5 occurrences. -{{{ ->>> list(rrule(DAILY, interval=2, count=5, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 4, 9, 0), - datetime.datetime(1997, 9, 6, 9, 0), - datetime.datetime(1997, 9, 8, 9, 0), - datetime.datetime(1997, 9, 10, 9, 0)] -}}} - -Every 10 days, 5 occurrences. -{{{ ->>> list(rrule(DAILY, interval=10, count=5, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 12, 9, 0), - datetime.datetime(1997, 9, 22, 9, 0), - datetime.datetime(1997, 10, 2, 9, 0), - datetime.datetime(1997, 10, 12, 9, 0)] -}}} - -Everyday in January, for 3 years. -{{{ ->>> list(rrule(YEARLY, bymonth=1, byweekday=range(7), - dtstart=parse("19980101T090000"), - until=parse("20000131T090000"))) -[datetime.datetime(1998, 1, 1, 9, 0), - datetime.datetime(1998, 1, 2, 9, 0), - (...) - datetime.datetime(1998, 1, 30, 9, 0), - datetime.datetime(1998, 1, 31, 9, 0), - datetime.datetime(1999, 1, 1, 9, 0), - datetime.datetime(1999, 1, 2, 9, 0), - (...) - datetime.datetime(1999, 1, 30, 9, 0), - datetime.datetime(1999, 1, 31, 9, 0), - datetime.datetime(2000, 1, 1, 9, 0), - datetime.datetime(2000, 1, 2, 9, 0), - (...) - datetime.datetime(2000, 1, 29, 9, 0), - datetime.datetime(2000, 1, 31, 9, 0)] -}}} - -Same thing, in another way. -{{{ ->>> list(rrule(DAILY, bymonth=1, - dtstart=parse("19980101T090000"), - until=parse("20000131T090000"))) -(...) -}}} - -Weekly for 10 occurrences. -{{{ ->>> list(rrule(WEEKLY, count=10, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 9, 9, 0), - datetime.datetime(1997, 9, 16, 9, 0), - datetime.datetime(1997, 9, 23, 9, 0), - datetime.datetime(1997, 9, 30, 9, 0), - datetime.datetime(1997, 10, 7, 9, 0), - datetime.datetime(1997, 10, 14, 9, 0), - datetime.datetime(1997, 10, 21, 9, 0), - datetime.datetime(1997, 10, 28, 9, 0), - datetime.datetime(1997, 11, 4, 9, 0)] -}}} - -Every other week, 6 occurrences. -{{{ ->>> list(rrule(WEEKLY, interval=2, count=6, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 16, 9, 0), - datetime.datetime(1997, 9, 30, 9, 0), - datetime.datetime(1997, 10, 14, 9, 0), - datetime.datetime(1997, 10, 28, 9, 0), - datetime.datetime(1997, 11, 11, 9, 0)] -}}} - -Weekly on Tuesday and Thursday for 5 weeks. -{{{ ->>> list(rrule(WEEKLY, count=10, wkst=SU, byweekday=(TU,TH), - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 4, 9, 0), - datetime.datetime(1997, 9, 9, 9, 0), - datetime.datetime(1997, 9, 11, 9, 0), - datetime.datetime(1997, 9, 16, 9, 0), - datetime.datetime(1997, 9, 18, 9, 0), - datetime.datetime(1997, 9, 23, 9, 0), - datetime.datetime(1997, 9, 25, 9, 0), - datetime.datetime(1997, 9, 30, 9, 0), - datetime.datetime(1997, 10, 2, 9, 0)] -}}} - -Every other week on Tuesday and Thursday, for 8 occurrences. -{{{ ->>> list(rrule(WEEKLY, interval=2, count=8, - wkst=SU, byweekday=(TU,TH), - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 4, 9, 0), - datetime.datetime(1997, 9, 16, 9, 0), - datetime.datetime(1997, 9, 18, 9, 0), - datetime.datetime(1997, 9, 30, 9, 0), - datetime.datetime(1997, 10, 2, 9, 0), - datetime.datetime(1997, 10, 14, 9, 0), - datetime.datetime(1997, 10, 16, 9, 0)] -}}} - -Monthly on the 1st Friday for ten occurrences. -{{{ ->>> list(rrule(MONTHLY, count=10, byweekday=FR(1), - dtstart=parse("19970905T090000"))) -[datetime.datetime(1997, 9, 5, 9, 0), - datetime.datetime(1997, 10, 3, 9, 0), - datetime.datetime(1997, 11, 7, 9, 0), - datetime.datetime(1997, 12, 5, 9, 0), - datetime.datetime(1998, 1, 2, 9, 0), - datetime.datetime(1998, 2, 6, 9, 0), - datetime.datetime(1998, 3, 6, 9, 0), - datetime.datetime(1998, 4, 3, 9, 0), - datetime.datetime(1998, 5, 1, 9, 0), - datetime.datetime(1998, 6, 5, 9, 0)] -}}} - -Every other month on the 1st and last Sunday of the month for 10 occurrences. -{{{ ->>> list(rrule(MONTHLY, interval=2, count=10, - byweekday=(SU(1), SU(-1)), - dtstart=parse("19970907T090000"))) -[datetime.datetime(1997, 9, 7, 9, 0), - datetime.datetime(1997, 9, 28, 9, 0), - datetime.datetime(1997, 11, 2, 9, 0), - datetime.datetime(1997, 11, 30, 9, 0), - datetime.datetime(1998, 1, 4, 9, 0), - datetime.datetime(1998, 1, 25, 9, 0), - datetime.datetime(1998, 3, 1, 9, 0), - datetime.datetime(1998, 3, 29, 9, 0), - datetime.datetime(1998, 5, 3, 9, 0), - datetime.datetime(1998, 5, 31, 9, 0)] -}}} - -Monthly on the second to last Monday of the month for 6 months. -{{{ ->>> list(rrule(MONTHLY, count=6, byweekday=MO(-2), - dtstart=parse("19970922T090000"))) -[datetime.datetime(1997, 9, 22, 9, 0), - datetime.datetime(1997, 10, 20, 9, 0), - datetime.datetime(1997, 11, 17, 9, 0), - datetime.datetime(1997, 12, 22, 9, 0), - datetime.datetime(1998, 1, 19, 9, 0), - datetime.datetime(1998, 2, 16, 9, 0)] -}}} - -Monthly on the third to the last day of the month, for 6 months. -{{{ ->>> list(rrule(MONTHLY, count=6, bymonthday=-3, - dtstart=parse("19970928T090000"))) -[datetime.datetime(1997, 9, 28, 9, 0), - datetime.datetime(1997, 10, 29, 9, 0), - datetime.datetime(1997, 11, 28, 9, 0), - datetime.datetime(1997, 12, 29, 9, 0), - datetime.datetime(1998, 1, 29, 9, 0), - datetime.datetime(1998, 2, 26, 9, 0)] -}}} - -Monthly on the 2nd and 15th of the month for 5 occurrences. -{{{ ->>> list(rrule(MONTHLY, count=5, bymonthday=(2,15), - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 15, 9, 0), - datetime.datetime(1997, 10, 2, 9, 0), - datetime.datetime(1997, 10, 15, 9, 0), - datetime.datetime(1997, 11, 2, 9, 0)] -}}} - -Monthly on the first and last day of the month for 3 occurrences. -{{{ ->>> list(rrule(MONTHLY, count=5, bymonthday=(-1,1,), - dtstart=parse("1997090 -2T090000"))) -[datetime.datetime(1997, 9, 30, 9, 0), - datetime.datetime(1997, 10, 1, 9, 0), - datetime.datetime(1997, 10, 31, 9, 0), - datetime.datetime(1997, 11, 1, 9, 0), - datetime.datetime(1997, 11, 30, 9, 0)] -}}} - -Every 18 months on the 10th thru 15th of the month for 10 occurrences. -{{{ ->>> list(rrule(MONTHLY, interval=18, count=10, - bymonthday=range(10,16), - dtstart=parse("19970910T090000"))) -[datetime.datetime(1997, 9, 10, 9, 0), - datetime.datetime(1997, 9, 11, 9, 0), - datetime.datetime(1997, 9, 12, 9, 0), - datetime.datetime(1997, 9, 13, 9, 0), - datetime.datetime(1997, 9, 14, 9, 0), - datetime.datetime(1997, 9, 15, 9, 0), - datetime.datetime(1999, 3, 10, 9, 0), - datetime.datetime(1999, 3, 11, 9, 0), - datetime.datetime(1999, 3, 12, 9, 0), - datetime.datetime(1999, 3, 13, 9, 0)] -}}} - -Every Tuesday, every other month, 6 occurences. -{{{ ->>> list(rrule(MONTHLY, interval=2, count=6, byweekday=TU, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 9, 9, 0), - datetime.datetime(1997, 9, 16, 9, 0), - datetime.datetime(1997, 9, 23, 9, 0), - datetime.datetime(1997, 9, 30, 9, 0), - datetime.datetime(1997, 11, 4, 9, 0)] -}}} - -Yearly in June and July for 10 occurrences. -{{{ ->>> list(rrule(YEARLY, count=4, bymonth=(6,7), - dtstart=parse("19970610T0900 -00"))) -[datetime.datetime(1997, 6, 10, 9, 0), - datetime.datetime(1997, 7, 10, 9, 0), - datetime.datetime(1998, 6, 10, 9, 0), - datetime.datetime(1998, 7, 10, 9, 0)] -}}} - -Every 3rd year on the 1st, 100th and 200th day for 4 occurrences. -{{{ ->>> list(rrule(YEARLY, count=4, interval=3, byyearday=(1,100,200), - dtstart=parse("19970101T090000"))) -[datetime.datetime(1997, 1, 1, 9, 0), - datetime.datetime(1997, 4, 10, 9, 0), - datetime.datetime(1997, 7, 19, 9, 0), - datetime.datetime(2000, 1, 1, 9, 0)] -}}} - -Every 20th Monday of the year, 3 occurrences. -{{{ ->>> list(rrule(YEARLY, count=3, byweekday=MO(20), - dtstart=parse("19970519T090000"))) -[datetime.datetime(1997, 5, 19, 9, 0), - datetime.datetime(1998, 5, 18, 9, 0), - datetime.datetime(1999, 5, 17, 9, 0)] -}}} - -Monday of week number 20 (where the default start of the week is Monday), -3 occurrences. -{{{ ->>> list(rrule(YEARLY, count=3, byweekno=20, byweekday=MO, - dtstart=parse("19970512T090000"))) -[datetime.datetime(1997, 5, 12, 9, 0), - datetime.datetime(1998, 5, 11, 9, 0), - datetime.datetime(1999, 5, 17, 9, 0)] -}}} - -The week number 1 may be in the last year. -{{{ ->>> list(rrule(WEEKLY, count=3, byweekno=1, byweekday=MO, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 12, 29, 9, 0), - datetime.datetime(1999, 1, 4, 9, 0), - datetime.datetime(2000, 1, 3, 9, 0)] -}}} - -And the week numbers greater than 51 may be in the next year. -{{{ ->>> list(rrule(WEEKLY, count=3, byweekno=52, byweekday=SU, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 12, 28, 9, 0), - datetime.datetime(1998, 12, 27, 9, 0), - datetime.datetime(2000, 1, 2, 9, 0)] -}}} - -Only some years have week number 53: -{{{ ->>> list(rrule(WEEKLY, count=3, byweekno=53, byweekday=MO, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1998, 12, 28, 9, 0), - datetime.datetime(2004, 12, 27, 9, 0), - datetime.datetime(2009, 12, 28, 9, 0)] -}}} - -Every Friday the 13th, 4 occurrences. -{{{ ->>> list(rrule(YEARLY, count=4, byweekday=FR, bymonthday=13, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1998, 2, 13, 9, 0), - datetime.datetime(1998, 3, 13, 9, 0), - datetime.datetime(1998, 11, 13, 9, 0), - datetime.datetime(1999, 8, 13, 9, 0)] -}}} - -Every four years, the first Tuesday after a Monday in November, -3 occurrences (U.S. Presidential Election day): -{{{ ->>> list(rrule(YEARLY, interval=4, count=3, bymonth=11, - byweekday=TU, bymonthday=(2,3,4,5,6,7,8), - dtstart=parse("19961105T090000"))) -[datetime.datetime(1996, 11, 5, 9, 0), - datetime.datetime(2000, 11, 7, 9, 0), - datetime.datetime(2004, 11, 2, 9, 0)] -}}} - -The 3rd instance into the month of one of Tuesday, Wednesday or -Thursday, for the next 3 months: -{{{ ->>> list(rrule(MONTHLY, count=3, byweekday=(TU,WE,TH), - bysetpos=3, dtstart=parse("19970904T090000"))) -[datetime.datetime(1997, 9, 4, 9, 0), - datetime.datetime(1997, 10, 7, 9, 0), - datetime.datetime(1997, 11, 6, 9, 0)] -}}} - -The 2nd to last weekday of the month, 3 occurrences. -{{{ ->>> list(rrule(MONTHLY, count=3, byweekday=(MO,TU,WE,TH,FR), - bysetpos=-2, dtstart=parse("19970929T090000"))) -[datetime.datetime(1997, 9, 29, 9, 0), - datetime.datetime(1997, 10, 30, 9, 0), - datetime.datetime(1997, 11, 27, 9, 0)] -}}} - -Every 3 hours from 9:00 AM to 5:00 PM on a specific day. -{{{ ->>> list(rrule(HOURLY, interval=3, - dtstart=parse("19970902T090000"), - until=parse("19970902T170000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 2, 12, 0), - datetime.datetime(1997, 9, 2, 15, 0)] -}}} - -Every 15 minutes for 6 occurrences. -{{{ ->>> list(rrule(MINUTELY, interval=15, count=6, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 2, 9, 15), - datetime.datetime(1997, 9, 2, 9, 30), - datetime.datetime(1997, 9, 2, 9, 45), - datetime.datetime(1997, 9, 2, 10, 0), - datetime.datetime(1997, 9, 2, 10, 15)] -}}} - -Every hour and a half for 4 occurrences. -{{{ ->>> list(rrule(MINUTELY, interval=90, count=4, - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 2, 10, 30), - datetime.datetime(1997, 9, 2, 12, 0), - datetime.datetime(1997, 9, 2, 13, 30)] -}}} - -Every 20 minutes from 9:00 AM to 4:40 PM for two days. -{{{ ->>> list(rrule(MINUTELY, interval=20, count=48, - byhour=range(9,17), byminute=(0,20,40), - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 2, 9, 20), - (...) - datetime.datetime(1997, 9, 2, 16, 20), - datetime.datetime(1997, 9, 2, 16, 40), - datetime.datetime(1997, 9, 3, 9, 0), - datetime.datetime(1997, 9, 3, 9, 20), - (...) - datetime.datetime(1997, 9, 3, 16, 20), - datetime.datetime(1997, 9, 3, 16, 40)] -}}} - -An example where the days generated makes a difference because of {{{wkst}}}. -{{{ ->>> list(rrule(WEEKLY, interval=2, count=4, - byweekday=(TU,SU), wkst=MO, - dtstart=parse("19970805T090000"))) -[datetime.datetime(1997, 8, 5, 9, 0), - datetime.datetime(1997, 8, 10, 9, 0), - datetime.datetime(1997, 8, 19, 9, 0), - datetime.datetime(1997, 8, 24, 9, 0)] - ->>> list(rrule(WEEKLY, interval=2, count=4, - byweekday=(TU,SU), wkst=SU, - dtstart=parse("19970805T090000"))) -[datetime.datetime(1997, 8, 5, 9, 0), - datetime.datetime(1997, 8, 17, 9, 0), - datetime.datetime(1997, 8, 19, 9, 0), - datetime.datetime(1997, 8, 31, 9, 0)] -}}} - -==== rruleset type ==== -The {{{rruleset}}} type allows more complex recurrence setups, mixing -multiple rules, dates, exclusion rules, and exclusion dates. -The type constructor takes the following keyword arguments: - - cache:: - If True, caching of results will be enabled, improving performance - of multiple queries considerably. - -==== rruleset methods ==== -The following methods are available: - - rruleset.rrule(rrule):: - Include the given {{{rrule}}} instance in the recurrence set - generation. - - rruleset.rdate(dt):: - Include the given {{{datetime}}} instance in the recurrence - set generation. - - rruleset.exrule(rrule):: - Include the given {{{rrule}}} instance in the recurrence set - exclusion list. Dates which are part of the given recurrence - rules will not be generated, even if some inclusive {{{rrule}}} - or {{{rdate}}} matches them. - - rruleset.exdate(dt):: - Include the given {{{datetime}}} instance in the recurrence set - exclusion list. Dates included that way will not be generated, - even if some inclusive {{{rrule}}} or {{{rdate}}} matches them. - - rruleset.before(dt, inc=False):: - Returns the last recurrence before the given {{{datetime}}} - instance. The {{{inc}}} keyword defines what happens if - {{{dt}}} '''is''' an occurrence. With {{{inc == True}}}, - if {{{dt}}} itself is an occurrence, it will be returned. - - rruleset.after(dt, inc=False):: - Returns the first recurrence after the given {{{datetime}}} - instance. The {{{inc}}} keyword defines what happens if - {{{dt}}} '''is''' an occurrence. With {{{inc == True}}}, - if {{{dt}}} itself is an occurrence, it will be returned. - - rruleset.between(after, before, inc=False):: - Returns all the occurrences of the rrule between {{{after}}} - and {{{before}}}. The {{{inc}}} keyword defines what happens - if {{{after}}} and/or {{{before}}} are themselves occurrences. - With {{{inc == True}}}, they will be included in the list, - if they are found in the recurrence set. - - rruleset.count():: - Returns the number of recurrences in this set. It will have - go trough the whole recurrence, if this hasn't been done - before. - -Besides these methods, {{{rruleset}}} instances also support -the {{{__getitem__()}}} and {{{__contains__()}}} special methods, -meaning that these are valid expressions: -{{{ -set = rruleset(...) -if datetime(...) in set: - ... -print set[0] -print set[-1] -print set[1:2] -print set[::-2] -}}} - -The getitem/slicing mechanism is smart enough to avoid getting the whole -recurrence set, if possible. - -==== rruleset examples ==== -Daily, for 7 days, jumping Saturday and Sunday occurrences. -{{{ ->>> set = rruleset() ->>> set.rrule(rrule(DAILY, count=7, - dtstart=parse("19970902T090000"))) ->>> set.exrule(rrule(YEARLY, byweekday=(SA,SU), - dtstart=parse("19970902T090000"))) ->>> list(set) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 3, 9, 0), - datetime.datetime(1997, 9, 4, 9, 0), - datetime.datetime(1997, 9, 5, 9, 0), - datetime.datetime(1997, 9, 8, 9, 0)] -}}} - -Weekly, for 4 weeks, plus one time on day 7, and not on day 16. -{{{ ->>> set = rruleset() ->>> set.rrule(rrule(WEEKLY, count=4, - dtstart=parse("19970902T090000"))) ->>> set.rdate(datetime.datetime(1997, 9, 7, 9, 0)) ->>> set.exdate(datetime.datetime(1997, 9, 16, 9, 0)) ->>> list(set) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 7, 9, 0), - datetime.datetime(1997, 9, 9, 9, 0), - datetime.datetime(1997, 9, 23, 9, 0)] -}}} - -==== rrulestr() function ==== -The {{{rrulestr()}}} function is a parser for ''RFC-like'' syntaxes. -The function prototype is: -{{{ -rrulestr(str) -}}} - -The string passed as parameter may be a multiple line string, a -single line string, or just the {{{RRULE}}} property value. - -Additionally, it accepts the following keyword arguments: - - cache:: - If {{{True}}}, the {{{rruleset}}} or {{{rrule}}} created instance - will cache its results. Default is not to cache. - - dtstart:: - If given, it must be a {{{datetime}}} instance that will be used - when no {{{DTSTART}}} property is found in the parsed string. If - it is not given, and the property is not found, {{{datetime.now()}}} - will be used instead. - - unfold:: - If set to {{{True}}}, lines will be unfolded following the RFC - specification. It defaults to {{{False}}}, meaning that spaces - before every line will be stripped. - - forceset:: - If set to {{{True}}} a {{{rruleset}}} instance will be returned, - even if only a single rule is found. The default is to return an - {{{rrule}}} if possible, and an {{{rruleset}}} if necessary. - - compatible:: - If set to {{{True}}}, the parser will operate in RFC-compatible - mode. Right now it means that {{{unfold}}} will be turned on, - and if a {{{DTSTART}}} is found, it will be considered the first - recurrence instance, as documented in the RFC. - - ignoretz:: - If set to {{{True}}}, the date parser will ignore timezone - information available in the {{{DTSTART}}} property, or the - {{{UNTIL}}} attribute. - - tzinfos:: - If set, it will be passed to the datetime string parser to - resolve unknown timezone settings. For more information about - what could be used here, check the parser documentation. - -==== rrulestr() examples ==== - -Every 10 days, 5 occurrences. -{{{ ->>> list(rrulestr(""" -... DTSTART:19970902T090000 -... RRULE:FREQ=DAILY;INTERVAL=10;COUNT=5 -... """)) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 12, 9, 0), - datetime.datetime(1997, 9, 22, 9, 0), - datetime.datetime(1997, 10, 2, 9, 0), - datetime.datetime(1997, 10, 12, 9, 0)] -}}} - -Same thing, but passing only the {{{RRULE}}} value. -{{{ ->>> list(rrulestr("FREQ=DAILY;INTERVAL=10;COUNT=5", - dtstart=parse("19970902T090000"))) -[datetime.datetime(1997, 9, 2, 9, 0), - datetime.datetime(1997, 9, 12, 9, 0), - datetime.datetime(1997, 9, 22, 9, 0), - datetime.datetime(1997, 10, 2, 9, 0), - datetime.datetime(1997, 10, 12, 9, 0)] -}}} - -Notice that when using a single rule, it returns an -{{{rrule}}} instance, unless {{{forceset}}} was used. -{{{ ->>> rrulestr("FREQ=DAILY;INTERVAL=10;COUNT=5") - - ->>> rrulestr(""" -... DTSTART:19970902T090000 -... RRULE:FREQ=DAILY;INTERVAL=10;COUNT=5 -... """) - - ->>> rrulestr("FREQ=DAILY;INTERVAL=10;COUNT=5", forceset=True) - -}}} - -But when an {{{rruleset}}} is needed, it is automatically used. -{{{ ->>> rrulestr(""" -... DTSTART:19970902T090000 -... RRULE:FREQ=DAILY;INTERVAL=10;COUNT=5 -... RRULE:FREQ=DAILY;INTERVAL=5;COUNT=3 -... """) - -}}} - -=== parser === -This module offers a generic date/time string parser which is -able to parse most known formats to represent a date and/or -time. - -==== parse() function ==== -That's probably the only function you'll need from this module. -It offers you an interface to access the parser functionality and -extract a {{{datetime}}} type out of a string. - -The prototype of this function is: -{{{ -parse(timestr) -}}} - -Additionally, the following keyword arguments are available: - - default:: - If given, this must be a {{{datetime}}} instance. Any fields - missing in the parsed date will be copied from this instance. - The default value is the current date, at 00:00:00am. - - ignoretz:: - If this is true, even if a timezone is found in the string, - the parser will not use it. - - tzinfos:: - Using this keyword argument you may provide custom timezones - to the parser. If given, it must be either a dictionary with - the timezone abbreviation as key, or a function accepting a - timezone abbreviation and offset as argument. The dictionary - values and the function return must be a timezone offset - in seconds, a tzinfo subclass, or a string defining the - timezone (in the TZ environment variable format). - - dayfirst:: - This option allow one to change the precedence in which - days are parsed in date strings. The default is given in the - parserinfo instance (the default parserinfo has it set to - False). If {{{dayfirst}}} is False, the {{{MM-DD-YYYY}}} - format will have precedence over {{{DD-MM-YYYY}}} in an - ambiguous date. - - yearfirst:: - This option allow one to change the precedence in which - years are parsed in date strings. The default is given in - the parserinfo instance (the default parserinfo has it set - to False). If {{{yearfirst}}} is false, the {{{MM-DD-YY}}} - format will have precedence over {{{YY-MM-DD}}} in an - ambiguous date. - - fuzzy:: - If {{{fuzzy}}} is set to True, unknown tokens in the string - will be ignored. - - parserinfo:: - This parameter allows one to change how the string is parsed, - by using a different parserinfo class instance. Using it you - may, for example, intenationalize the parser strings, or make - it ignore additional words. - -==== Format precedence ==== -Whenever an ambiguous date is found, the {{{dayfirst}}} and -{{{yearfirst}}} parameters will control how the information -is processed. Here is the precedence in each case: - -If {{{dayfirst}}} is {{{False}}} and {{{yearfirst}}} is {{{False}}}, -(default, if no parameter is given): - - * {{{MM-DD-YY}}} - * {{{DD-MM-YY}}} - * {{{YY-MM-DD}}} - -If {{{dayfirst}}} is {{{True}}} and {{{yearfirst}}} is {{{False}}}: - - * {{{DD-MM-YY}}} - * {{{MM-DD-YY}}} - * {{{YY-MM-DD}}} - -If {{{dayfirst}}} is {{{False}}} and {{{yearfirst}}} is {{{True}}}: - - * {{{YY-MM-DD}}} - * {{{MM-DD-YY}}} - * {{{DD-MM-YY}}} - -If {{{dayfirst}}} is {{{True}}} and {{{yearfirst}}} is {{{True}}}: - - * {{{YY-MM-DD}}} - * {{{DD-MM-YY}}} - * {{{MM-DD-YY}}} - -==== Converting two digit years ==== -When a two digit year is found, it is processed considering -the current year, so that the computed year is never more -than 49 years after the current year, nor 50 years before the -current year. In other words, if we are in year 2003, and the -year 30 is found, it will be considered as 2030, but if the -year 60 is found, it will be considered 1960. - -==== Examples ==== -The following code will prepare the environment: -{{{ ->>> from dateutil.parser import * ->>> from dateutil.tz import * ->>> from datetime import * ->>> TZOFFSETS = {"BRST": -10800} ->>> BRSTTZ = tzoffset(-10800, "BRST") ->>> DEFAULT = datetime(2003, 9, 25) -}}} - -Some simple examples based on the {{{date}}} command, using the -{{{TZOFFSET}}} dictionary to provide the BRST timezone offset. -{{{ ->>> parse("Thu Sep 25 10:36:28 BRST 2003", tzinfos=TZOFFSETS) -datetime.datetime(2003, 9, 25, 10, 36, 28, - tzinfo=tzoffset('BRST', -10800)) - ->>> parse("2003 10:36:28 BRST 25 Sep Thu", tzinfos=TZOFFSETS) -datetime.datetime(2003, 9, 25, 10, 36, 28, - tzinfo=tzoffset('BRST', -10800)) -}}} - -Notice that since BRST is my local timezone, parsing it without -further timezone settings will yield a {{{tzlocal}}} timezone. -{{{ ->>> parse("Thu Sep 25 10:36:28 BRST 2003") -datetime.datetime(2003, 9, 25, 10, 36, 28, tzinfo=tzlocal()) -}}} - -We can also ask to ignore the timezone explicitly: -{{{ ->>> parse("Thu Sep 25 10:36:28 BRST 2003", ignoretz=True) -datetime.datetime(2003, 9, 25, 10, 36, 28) -}}} - -That's the same as processing a string without timezone: -{{{ ->>> parse("Thu Sep 25 10:36:28 2003") -datetime.datetime(2003, 9, 25, 10, 36, 28) -}}} - -Without the year, but passing our {{{DEFAULT}}} datetime to return -the same year, no mattering what year we currently are in: -{{{ ->>> parse("Thu Sep 25 10:36:28", default=DEFAULT) -datetime.datetime(2003, 9, 25, 10, 36, 28) -}}} - -Strip it further: -{{{ ->>> parse("Thu Sep 10:36:28", default=DEFAULT) -datetime.datetime(2003, 9, 25, 10, 36, 28) - ->>> parse("Thu 10:36:28", default=DEFAULT) -datetime.datetime(2003, 9, 25, 10, 36, 28) - ->>> parse("Thu 10:36", default=DEFAULT) -datetime.datetime(2003, 9, 25, 10, 36) - ->>> parse("10:36", default=DEFAULT) -datetime.datetime(2003, 9, 25, 10, 36) ->>> -}}} - -Strip in a different way: -{{{ ->>> parse("Thu Sep 25 2003") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("Sep 25 2003") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("Sep 2003", default=DEFAULT) -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("Sep", default=DEFAULT) -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("2003", default=DEFAULT) -datetime.datetime(2003, 9, 25, 0, 0) -}}} - -Another format, based on {{{date -R}}} (RFC822): -{{{ ->>> parse("Thu, 25 Sep 2003 10:49:41 -0300") -datetime.datetime(2003, 9, 25, 10, 49, 41, - tzinfo=tzoffset(None, -10800)) -}}} - -ISO format: -{{{ ->>> parse("2003-09-25T10:49:41.5-03:00") -datetime.datetime(2003, 9, 25, 10, 49, 41, 500000, - tzinfo=tzoffset(None, -10800)) -}}} - -Some variations: -{{{ ->>> parse("2003-09-25T10:49:41") -datetime.datetime(2003, 9, 25, 10, 49, 41) - ->>> parse("2003-09-25T10:49") -datetime.datetime(2003, 9, 25, 10, 49) - ->>> parse("2003-09-25T10") -datetime.datetime(2003, 9, 25, 10, 0) - ->>> parse("2003-09-25") -datetime.datetime(2003, 9, 25, 0, 0) -}}} - -ISO format, without separators: -{{{ ->>> parse("20030925T104941.5-0300") -datetime.datetime(2003, 9, 25, 10, 49, 41, 500000, - tzinfo=tzinfo=tzoffset(None, -10800)) - ->>> parse("20030925T104941-0300") -datetime.datetime(2003, 9, 25, 10, 49, 41, - tzinfo=tzoffset(None, -10800)) - ->>> parse("20030925T104941") -datetime.datetime(2003, 9, 25, 10, 49, 41) - ->>> parse("20030925T1049") -datetime.datetime(2003, 9, 25, 10, 49) - ->>> parse("20030925T10") -datetime.datetime(2003, 9, 25, 10, 0) - ->>> parse("20030925") -datetime.datetime(2003, 9, 25, 0, 0) -}}} - -Everything together. -{{{ ->>> parse("199709020900") -datetime.datetime(1997, 9, 2, 9, 0) ->>> parse("19970902090059") -datetime.datetime(1997, 9, 2, 9, 0, 59) -}}} - -Different date orderings: -{{{ ->>> parse("2003-09-25") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("2003-Sep-25") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("25-Sep-2003") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("Sep-25-2003") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("09-25-2003") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("25-09-2003") -datetime.datetime(2003, 9, 25, 0, 0) -}}} - -Check some ambiguous dates: -{{{ ->>> parse("10-09-2003") -datetime.datetime(2003, 10, 9, 0, 0) - ->>> parse("10-09-2003", dayfirst=True) -datetime.datetime(2003, 9, 10, 0, 0) - ->>> parse("10-09-03") -datetime.datetime(2003, 10, 9, 0, 0) - ->>> parse("10-09-03", yearfirst=True) -datetime.datetime(2010, 9, 3, 0, 0) -}}} - -Other date separators are allowed: -{{{ ->>> parse("2003.Sep.25") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("2003/09/25") -datetime.datetime(2003, 9, 25, 0, 0) -}}} - -Even with spaces: -{{{ ->>> parse("2003 Sep 25") -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("2003 09 25") -datetime.datetime(2003, 9, 25, 0, 0) -}}} - -Hours with letters work: -{{{ ->>> parse("10h36m28.5s", default=DEFAULT) -datetime.datetime(2003, 9, 25, 10, 36, 28, 500000) - ->>> parse("01s02h03m", default=DEFAULT) -datetime.datetime(2003, 9, 25, 2, 3, 1) - ->>> parse("01h02m03", default=DEFAULT) -datetime.datetime(2003, 9, 3, 1, 2) - ->>> parse("01h02", default=DEFAULT) -datetime.datetime(2003, 9, 2, 1, 0) - ->>> parse("01h02s", default=DEFAULT) -datetime.datetime(2003, 9, 25, 1, 0, 2) -}}} - -With AM/PM: -{{{ ->>> parse("10h am", default=DEFAULT) -datetime.datetime(2003, 9, 25, 10, 0) - ->>> parse("10pm", default=DEFAULT) -datetime.datetime(2003, 9, 25, 22, 0) - ->>> parse("12:00am", default=DEFAULT) -datetime.datetime(2003, 9, 25, 0, 0) - ->>> parse("12pm", default=DEFAULT) -datetime.datetime(2003, 9, 25, 12, 0) -}}} - -Some special treating for ''pertain'' relations: -{{{ ->>> parse("Sep 03", default=DEFAULT) -datetime.datetime(2003, 9, 3, 0, 0) - ->>> parse("Sep of 03", default=DEFAULT) -datetime.datetime(2003, 9, 25, 0, 0) -}}} - -Fuzzy parsing: -{{{ ->>> s = "Today is 25 of September of 2003, exactly " \ -... "at 10:49:41 with timezone -03:00." ->>> parse(s, fuzzy=True) -datetime.datetime(2003, 9, 25, 10, 49, 41, - tzinfo=tzoffset(None, -10800)) -}}} - -Other random formats: -{{{ ->>> parse("Wed, July 10, '96") -datetime.datetime(1996, 7, 10, 0, 0) - ->>> parse("1996.07.10 AD at 15:08:56 PDT", ignoretz=True) -datetime.datetime(1996, 7, 10, 15, 8, 56) - ->>> parse("Tuesday, April 12, 1952 AD 3:30:42pm PST", ignoretz=True) -datetime.datetime(1952, 4, 12, 15, 30, 42) - ->>> parse("November 5, 1994, 8:15:30 am EST", ignoretz=True) -datetime.datetime(1994, 11, 5, 8, 15, 30) - ->>> parse("3rd of May 2001") -datetime.datetime(2001, 5, 3, 0, 0) - ->>> parse("5:50 A.M. on June 13, 1990") -datetime.datetime(1990, 6, 13, 5, 50) -}}} - -=== easter === -This module offers a generic easter computing method for -any given year, using Western, Orthodox or Julian algorithms. - -==== easter() function ==== -This method was ported from the work done by -[http://users.chariot.net.au/~gmarts/eastalg.htm GM Arts], -on top of the algorithm by -[http://www.tondering.dk/claus/calendar.html Claus Tondering], -which was based in part on the algorithm of Ouding (1940), -as quoted in "Explanatory Supplement to the Astronomical -Almanac", P. Kenneth Seidelmann, editor. - -This algorithm implements three different easter -calculation methods: - - 1. Original calculation in Julian calendar, valid in - dates after 326 AD - 1. Original method, with date converted to Gregorian - calendar, valid in years 1583 to 4099 - 1. Revised method, in Gregorian calendar, valid in - years 1583 to 4099 as well - -These methods are represented by the constants: -{{{ -EASTER_JULIAN = 1 -EASTER_ORTHODOX = 2 -EASTER_WESTERN = 3 -}}} - -The default method is method 3. - -=== tz === -This module offers timezone implementations subclassing -the abstract {{{datetime.tzinfo}}} type. There are -classes to handle [http://www.twinsun.com/tz/tz-link.htm tzfile] -format files (usually are in /etc/localtime, -/usr/share/zoneinfo, etc), TZ environment string (in all -known formats), given ranges (with help from relative -deltas), local machine timezone, fixed offset timezone, -and UTC timezone. - -==== tzutc type ==== -This type implements a basic UTC timezone. The constructor of this -type accepts no parameters. - -==== tzutc examples ==== -{{{ ->>> from datetime import * ->>> from dateutil.tz import * - ->>> datetime.now() -datetime.datetime(2003, 9, 27, 9, 40, 1, 521290) - ->>> datetime.now(tzutc()) -datetime.datetime(2003, 9, 27, 12, 40, 12, 156379, tzinfo=tzutc()) - ->>> datetime.now(tzutc()).tzname() -'UTC' -}}} - -==== tzoffset type ==== -This type implements a fixed offset timezone, with no -support to daylight saving times. Here is the prototype of the -type constructor: -{{{ -tzoffset(name, offset) -}}} - -The {{{name}}} parameter may be optionally set to {{{None}}}, and -{{{offset}}} must be given in seconds. - -==== tzoffset examples ==== -{{{ ->>> from datetime import * ->>> from dateutil.tz import * - ->>> datetime.now(tzoffset("BRST", -10800)) -datetime.datetime(2003, 9, 27, 9, 52, 43, 624904, - tzinfo=tzinfo=tzoffset('BRST', -10800)) - ->>> datetime.now(tzoffset("BRST", -10800)).tzname() -'BRST' - ->>> datetime.now(tzoffset("BRST", -10800)).astimezone(tzutc()) -datetime.datetime(2003, 9, 27, 12, 53, 11, 446419, - tzinfo=tzutc()) -}}} - -==== tzlocal type ==== -This type implements timezone settings as known by the -operating system. The constructor of this type accepts no -parameters. - -==== tzlocal examples ==== -{{{ ->>> from datetime import * ->>> from dateutil.tz import * - ->>> datetime.now(tzlocal()) -datetime.datetime(2003, 9, 27, 10, 1, 43, 673605, - tzinfo=tzlocal()) - ->>> datetime.now(tzlocal()).tzname() -'BRST' - ->>> datetime.now(tzlocal()).astimezone(tzoffset(None, 0)) -datetime.datetime(2003, 9, 27, 13, 3, 0, 11493, - tzinfo=tzoffset(None, 0)) -}}} - -==== tzstr type ==== -This type implements timezone settings extracted from a -string in known TZ environment variable formats. Here is the prototype -of the constructor: -{{{ -tzstr(str) -}}} - -==== tzstr examples ==== -Here are examples of the recognized formats: - - * {{{EST5EDT}}} - * {{{EST5EDT,4,0,6,7200,10,0,26,7200,3600}}} - * {{{EST5EDT,4,1,0,7200,10,-1,0,7200,3600}}} - * {{{EST5EDT4,M4.1.0/02:00:00,M10-5-0/02:00}}} - * {{{EST5EDT4,95/02:00:00,298/02:00}}} - * {{{EST5EDT4,J96/02:00:00,J299/02:00}}} - -Notice that if daylight information is not present, but a -daylight abbreviation was provided, {{{tzstr}}} will follow the -convention of using the first sunday of April to start daylight -saving, and the last sunday of October to end it. If start or -end time is not present, 2AM will be used, and if the daylight -offset is not present, the standard offset plus one hour will -be used. This convention is the same as used in the GNU libc. - -This also means that some of the above examples are exactly -equivalent, and all of these examples are equivalent -in the year of 2003. - -Here is the example mentioned in the -[http://www.python.org/doc/current/lib/module-time.html time module documentation]. -{{{ ->>> os.environ['TZ'] = 'EST+05EDT,M4.1.0,M10.5.0' ->>> time.tzset() ->>> time.strftime('%X %x %Z') -'02:07:36 05/08/03 EDT' ->>> os.environ['TZ'] = 'AEST-10AEDT-11,M10.5.0,M3.5.0' ->>> time.tzset() ->>> time.strftime('%X %x %Z') -'16:08:12 05/08/03 AEST' -}}} - -And here is an example showing the same information using {{{tzstr}}}, -without touching system settings. -{{{ ->>> tz1 = tzstr('EST+05EDT,M4.1.0,M10.5.0') ->>> tz2 = tzstr('AEST-10AEDT-11,M10.5.0,M3.5.0') ->>> dt = datetime(2003, 5, 8, 2, 7, 36, tzinfo=tz1) ->>> dt.strftime('%X %x %Z') -'02:07:36 05/08/03 EDT' ->>> dt.astimezone(tz2).strftime('%X %x %Z') -'16:07:36 05/08/03 AEST' -}}} - -Are these really equivalent? -{{{ ->>> tzstr('EST5EDT') == tzstr('EST5EDT,4,1,0,7200,10,-1,0,7200,3600') -True -}}} - -Check the daylight limit. -{{{ ->>> datetime(2003, 4, 6, 1, 59, tzinfo=tz).tzname() -'EST' ->>> datetime(2003, 4, 6, 2, 00, tzinfo=tz).tzname() -'EDT' ->>> datetime(2003, 10, 26, 0, 59, tzinfo=tz).tzname() -'EDT' ->>> datetime(2003, 10, 26, 1, 00, tzinfo=tz).tzname() -'EST' -}}} - -==== tzrange type ==== -This type offers the same functionality as the {{{tzstr}}} type, but -instead of timezone strings, information is passed using -{{{relativedelta}}}s which are applied to a datetime set to the first -day of the year. Here is the prototype of this type's constructor: -{{{ -tzrange(stdabbr, stdoffset=None, dstabbr=None, dstoffset=None, - start=None, end=None): -}}} - -Offsets must be given in seconds. Information not provided will be -set to the defaults, as explained in the {{{tzstr}}} section above. - -==== tzrange examples ==== -{{{ ->>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT") -True - ->>> from dateutil.relativedelta import * ->>> range1 = tzrange("EST", -18000, "EDT") ->>> range2 = tzrange("EST", -18000, "EDT", -14400, -... relativedelta(hours=+2, month=4, day=1, - weekday=SU(+1)), -... relativedelta(hours=+1, month=10, day=31, - weekday=SU(-1))) ->>> tzstr('EST5EDT') == range1 == range2 -True -}}} - -Notice a minor detail in the last example: while the DST should end -at 2AM, the delta will catch 1AM. That's because the daylight saving -time should end at 2AM standard time (the difference between STD and -DST is 1h in the given example) instead of the DST time. That's how -the {{{tzinfo}}} subtypes should deal with the extra hour that happens -when going back to the standard time. Check -[http://www.python.org/doc/current/lib/datetime-tzinfo.html tzinfo documentation] -for more information. - -==== tzfile type ==== -This type allows one to use tzfile(5) format timezone files to extract -current and historical zone information. Here is the type constructor -prototype: -{{{ -tzfile(fileobj) -}}} - -Where {{{fileobj}}} is either a filename or a file-like object with -a {{{read()}}} method. - -==== tzfile examples ==== -{{{ ->>> tz = tzfile("/etc/localtime") ->>> datetime.now(tz) -datetime.datetime(2003, 9, 27, 12, 3, 48, 392138, - tzinfo=tzfile('/etc/localtime')) - ->>> datetime.now(tz).astimezone(tzutc()) -datetime.datetime(2003, 9, 27, 15, 3, 53, 70863, - tzinfo=tzutc()) - ->>> datetime.now(tz).tzname() -'BRST' ->>> datetime(2003, 1, 1, tzinfo=tz).tzname() -'BRDT' -}}} - -Check the daylight limit. -{{{ ->>> tz = tzfile('/usr/share/zoneinfo/EST5EDT') ->>> datetime(2003, 4, 6, 1, 59, tzinfo=tz).tzname() -'EST' ->>> datetime(2003, 4, 6, 2, 00, tzinfo=tz).tzname() -'EDT' ->>> datetime(2003, 10, 26, 0, 59, tzinfo=tz).tzname() -'EDT' ->>> datetime(2003, 10, 26, 1, 00, tzinfo=tz).tzname() -'EST' -}}} - -==== tzical type ==== -This type is able to parse -[ftp://ftp.rfc-editor.org/in-notes/rfc2445.txt iCalendar] -style {{{VTIMEZONE}}} sessions into a Python timezone object. -The constuctor prototype is: -{{{ -tzical(fileobj) -}}} - -Where {{{fileobj}}} is either a filename or a file-like object with -a {{{read()}}} method. - -==== tzical methods ==== - - tzical.get(tzid=None):: - Since a single iCalendar file may contain more than one timezone, - you must ask for the timezone you want with this method. If there's - more than one timezone in the parsed file, you'll need to pass the - {{{tzid}}} parameter. Otherwise, leaving it empty will yield the only - available timezone. - -==== tzical examples ==== -Here is a sample file extracted from the RFC. This file defines -the {{{EST5EDT}}} timezone, and will be used in the following example. -{{{ -BEGIN:VTIMEZONE -TZID:US-Eastern -LAST-MODIFIED:19870101T000000Z -TZURL:http://zones.stds_r_us.net/tz/US-Eastern -BEGIN:STANDARD -DTSTART:19671029T020000 -RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10 -TZOFFSETFROM:-0400 -TZOFFSETTO:-0500 -TZNAME:EST -END:STANDARD -BEGIN:DAYLIGHT -DTSTART:19870405T020000 -RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4 -TZOFFSETFROM:-0500 -TZOFFSETTO:-0400 -TZNAME:EDT -END:DAYLIGHT -END:VTIMEZONE -}}} - -And here is an example exploring a {{{tzical}}} type: -{{{ ->>> from dateutil.tz import *; from datetime import * - ->>> tz = tzical('EST5EDT.ics') ->>> tz.keys() -['US-Eastern'] - ->>> est = tz.get('US-Eastern') ->>> est - - ->>> datetime.now(est) -datetime.datetime(2003, 10, 6, 19, 44, 18, 667987, - tzinfo=) - ->>> est == tz.get() -True -}}} - -Let's check the daylight ranges, as usual: -{{{ ->>> datetime(2003, 4, 6, 1, 59, tzinfo=est).tzname() -'EST' ->>> datetime(2003, 4, 6, 2, 00, tzinfo=est).tzname() -'EDT' - ->>> datetime(2003, 10, 26, 0, 59, tzinfo=est).tzname() -'EDT' ->>> datetime(2003, 10, 26, 1, 00, tzinfo=est).tzname() -'EST' -}}} - -==== tzwin type ==== -This type offers access to internal registry-based Windows timezones. -The constuctor prototype is: -{{{ -tzwin(name) -}}} - -Where {{{name}}} is the timezone name. There's a static {{{tzwin.list()}}} -method to check the available names, - -==== tzwin methods ==== - - tzwin.display():: - This method returns the timezone extended name. - - tzwin.list():: - This static method lists all available timezone names. - -==== tzwin examples ==== -{{{ ->>> tz = tzwin("E. South America Standard Time") -}}} - -==== tzwinlocal type ==== -This type offers access to internal registry-based Windows timezones. -The constructor accepts no parameters, so the prototype is: -{{{ -tzwinlocal() -}}} - -==== tzwinlocal methods ==== - - tzwinlocal.display():: - This method returns the timezone extended name, and returns - {{{None}}} if one is not available. - -==== tzwinlocal examples ==== -{{{ ->>> tz = tzwinlocal() -}}} - -==== gettz() function ==== -This function is a helper that will try its best to get the right -timezone for your environment, or for the given string. The prototype -is as follows: -{{{ -gettz(name=None) -}}} - -If given, the parameter may be a filename, a path relative to the base -of the timezone information path (the base could be -{{{/usr/share/zoneinfo}}}, for example), a string timezone -specification, or a timezone abbreviation. If {{{name}}} is not given, -and the {{{TZ}}} environment variable is set, it's used instead. If the -parameter is not given, and {{{TZ}}} is not set, the default tzfile -paths will be tried. Then, if no timezone information is found, -an internal compiled database of timezones is used. When running -on Windows, the internal registry-based Windows timezones are also -considered. - -Example: -{{{ ->>> from dateutil.tz import * ->>> gettz() -tzfile('/etc/localtime') - ->>> gettz("America/Sao Paulo") -tzfile('/usr/share/zoneinfo/America/Sao_Paulo') - ->>> gettz("EST5EDT") -tzfile('/usr/share/zoneinfo/EST5EDT') - ->>> gettz("EST5") -tzstr('EST5') - ->>> gettz('BRST') -tzlocal() - ->>> os.environ["TZ"] = "America/Sao Paulo" ->>> gettz() -tzfile('/usr/share/zoneinfo/America/Sao_Paulo') - ->>> os.environ["TZ"] = "BRST" ->>> gettz() -tzlocal() - ->>> gettz("Unavailable") ->>> -}}} - -=== zoneinfo === -This module provides direct access to the internal compiled -database of timezones. The timezone data and the compiling tools -are obtained from the following project: - - http://www.twinsun.com/tz/tz-link.htm - -==== gettz() function ==== -This function will try to retrieve the given timezone information -from the internal compiled database, and will cache its results. - -Example: -{{{ ->>> from dateutil import zoneinfo ->>> zoneinfo.gettz("Brazil/East") -tzfile('Brazil/East') -}}} - -## vim:ft=moin diff --git a/lib/dateutil_py3/__init__.py b/lib/dateutil_py3/__init__.py deleted file mode 100644 index a23cea5218cc..000000000000 --- a/lib/dateutil_py3/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Copyright (c) 2003-2010 Gustavo Niemeyer - -This module offers extensions to the standard Python -datetime module. -""" -__author__ = "Tomi Pieviläinen " -__license__ = "Simplified BSD" -__version__ = "2.1-mpl" diff --git a/lib/dateutil_py3/easter.py b/lib/dateutil_py3/easter.py deleted file mode 100644 index d8a38844f9e3..000000000000 --- a/lib/dateutil_py3/easter.py +++ /dev/null @@ -1,91 +0,0 @@ -""" -Copyright (c) 2003-2007 Gustavo Niemeyer - -This module offers extensions to the standard Python -datetime module. -""" -__license__ = "Simplified BSD" - -import datetime - -__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] - -EASTER_JULIAN = 1 -EASTER_ORTHODOX = 2 -EASTER_WESTERN = 3 - -def easter(year, method=EASTER_WESTERN): - """ - This method was ported from the work done by GM Arts, - on top of the algorithm by Claus Tondering, which was - based in part on the algorithm of Ouding (1940), as - quoted in "Explanatory Supplement to the Astronomical - Almanac", P. Kenneth Seidelmann, editor. - - This algorithm implements three different easter - calculation methods: - - 1 - Original calculation in Julian calendar, valid in - dates after 326 AD - 2 - Original method, with date converted to Gregorian - calendar, valid in years 1583 to 4099 - 3 - Revised method, in Gregorian calendar, valid in - years 1583 to 4099 as well - - These methods are represented by the constants: - - EASTER_JULIAN = 1 - EASTER_ORTHODOX = 2 - EASTER_WESTERN = 3 - - The default method is method 3. - - More about the algorithm may be found at: - - http://users.chariot.net.au/~gmarts/eastalg.htm - - and - - http://www.tondering.dk/claus/calendar.html - - """ - - if not (1 <= method <= 3): - raise ValueError("invalid method") - - # g - Golden year - 1 - # c - Century - # h - (23 - Epact) mod 30 - # i - Number of days from March 21 to Paschal Full Moon - # j - Weekday for PFM (0=Sunday, etc) - # p - Number of days from March 21 to Sunday on or before PFM - # (-6 to 28 methods 1 & 3, to 56 for method 2) - # e - Extra days to add for method 2 (converting Julian - # date to Gregorian date) - - y = year - g = y % 19 - e = 0 - if method < 3: - # Old method - i = (19*g+15)%30 - j = (y+y//4+i)%7 - if method == 2: - # Extra dates to convert Julian to Gregorian date - e = 10 - if y > 1600: - e = e+y//100-16-(y//100-16)//4 - else: - # New method - c = y//100 - h = (c-c//4-(8*c+13)//25+19*g+15)%30 - i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11)) - j = (y+y//4+i+2-c+c//4)%7 - - # p can be from -6 to 56 corresponding to dates 22 March to 23 May - # (later dates apply to method 2, although 23 May never actually occurs) - p = i-j+e - d = 1+(p+27+(p+6)//40)%31 - m = 3+(p+26)//30 - return datetime.date(int(y), int(m), int(d)) - diff --git a/lib/dateutil_py3/parser.py b/lib/dateutil_py3/parser.py deleted file mode 100644 index a2604a35ba06..000000000000 --- a/lib/dateutil_py3/parser.py +++ /dev/null @@ -1,909 +0,0 @@ -# -*- coding:iso-8859-1 -*- -""" -Copyright (c) 2003-2007 Gustavo Niemeyer - -This module offers extensions to the standard Python -datetime module. -""" -from __future__ import unicode_literals -__license__ = "Simplified BSD" - - -import datetime -import string -import time -import sys -import os -import collections - -try: - from io import StringIO -except ImportError: - from io import StringIO - -from six import text_type, binary_type, integer_types - -from . import relativedelta -from . import tz - - -__all__ = ["parse", "parserinfo"] - - -# Some pointers: -# -# http://www.cl.cam.ac.uk/~mgk25/iso-time.html -# http://www.iso.ch/iso/en/prods-services/popstds/datesandtime.html -# http://www.w3.org/TR/NOTE-datetime -# http://ringmaster.arc.nasa.gov/tools/time_formats.html -# http://search.cpan.org/author/MUIR/Time-modules-2003.0211/lib/Time/ParseDate.pm -# http://stein.cshl.org/jade/distrib/docs/java.text.SimpleDateFormat.html - - -class _timelex(object): - - def __init__(self, instream): - if isinstance(instream, text_type): - instream = StringIO(instream) - self.instream = instream - self.wordchars = ('abcdfeghijklmnopqrstuvwxyz' - 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_' - 'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ' - 'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ') - self.numchars = '0123456789' - self.whitespace = ' \t\r\n' - self.charstack = [] - self.tokenstack = [] - self.eof = False - - def get_token(self): - if self.tokenstack: - return self.tokenstack.pop(0) - seenletters = False - token = None - state = None - wordchars = self.wordchars - numchars = self.numchars - whitespace = self.whitespace - while not self.eof: - if self.charstack: - nextchar = self.charstack.pop(0) - else: - nextchar = self.instream.read(1) - while nextchar == '\x00': - nextchar = self.instream.read(1) - if not nextchar: - self.eof = True - break - elif not state: - token = nextchar - if nextchar in wordchars: - state = 'a' - elif nextchar in numchars: - state = '0' - elif nextchar in whitespace: - token = ' ' - break # emit token - else: - break # emit token - elif state == 'a': - seenletters = True - if nextchar in wordchars: - token += nextchar - elif nextchar == '.': - token += nextchar - state = 'a.' - else: - self.charstack.append(nextchar) - break # emit token - elif state == '0': - if nextchar in numchars: - token += nextchar - elif nextchar == '.': - token += nextchar - state = '0.' - else: - self.charstack.append(nextchar) - break # emit token - elif state == 'a.': - seenletters = True - if nextchar == '.' or nextchar in wordchars: - token += nextchar - elif nextchar in numchars and token[-1] == '.': - token += nextchar - state = '0.' - else: - self.charstack.append(nextchar) - break # emit token - elif state == '0.': - if nextchar == '.' or nextchar in numchars: - token += nextchar - elif nextchar in wordchars and token[-1] == '.': - token += nextchar - state = 'a.' - else: - self.charstack.append(nextchar) - break # emit token - if (state in ('a.', '0.') and - (seenletters or token.count('.') > 1 or token[-1] == '.')): - l = token.split('.') - token = l[0] - for tok in l[1:]: - self.tokenstack.append('.') - if tok: - self.tokenstack.append(tok) - return token - - def __iter__(self): - return self - - def __next__(self): - token = self.get_token() - if token is None: - raise StopIteration - return token - - def next(self): - return self.__next__() # Python 2.x support - - def split(cls, s): - return list(cls(s)) - split = classmethod(split) - - -class _resultbase(object): - - def __init__(self): - for attr in self.__slots__: - setattr(self, attr, None) - - def _repr(self, classname): - l = [] - for attr in self.__slots__: - value = getattr(self, attr) - if value is not None: - l.append("%s=%s" % (attr, repr(value))) - return "%s(%s)" % (classname, ", ".join(l)) - - def __repr__(self): - return self._repr(self.__class__.__name__) - - -class parserinfo(object): - - # m from a.m/p.m, t from ISO T separator - JUMP = [" ", ".", ",", ";", "-", "/", "'", - "at", "on", "and", "ad", "m", "t", "of", - "st", "nd", "rd", "th"] - - WEEKDAYS = [("Mon", "Monday"), - ("Tue", "Tuesday"), - ("Wed", "Wednesday"), - ("Thu", "Thursday"), - ("Fri", "Friday"), - ("Sat", "Saturday"), - ("Sun", "Sunday")] - MONTHS = [("Jan", "January"), - ("Feb", "February"), - ("Mar", "March"), - ("Apr", "April"), - ("May", "May"), - ("Jun", "June"), - ("Jul", "July"), - ("Aug", "August"), - ("Sep", "Sept", "September"), - ("Oct", "October"), - ("Nov", "November"), - ("Dec", "December")] - HMS = [("h", "hour", "hours"), - ("m", "minute", "minutes"), - ("s", "second", "seconds")] - AMPM = [("am", "a"), - ("pm", "p")] - UTCZONE = ["UTC", "GMT", "Z"] - PERTAIN = ["of"] - TZOFFSET = {} - - def __init__(self, dayfirst=False, yearfirst=False): - self._jump = self._convert(self.JUMP) - self._weekdays = self._convert(self.WEEKDAYS) - self._months = self._convert(self.MONTHS) - self._hms = self._convert(self.HMS) - self._ampm = self._convert(self.AMPM) - self._utczone = self._convert(self.UTCZONE) - self._pertain = self._convert(self.PERTAIN) - - self.dayfirst = dayfirst - self.yearfirst = yearfirst - - self._year = time.localtime().tm_year - self._century = self._year//100*100 - - def _convert(self, lst): - dct = {} - for i in range(len(lst)): - v = lst[i] - if isinstance(v, tuple): - for v in v: - dct[v.lower()] = i - else: - dct[v.lower()] = i - return dct - - def jump(self, name): - return name.lower() in self._jump - - def weekday(self, name): - if len(name) >= 3: - try: - return self._weekdays[name.lower()] - except KeyError: - pass - return None - - def month(self, name): - if len(name) >= 3: - try: - return self._months[name.lower()]+1 - except KeyError: - pass - return None - - def hms(self, name): - try: - return self._hms[name.lower()] - except KeyError: - return None - - def ampm(self, name): - try: - return self._ampm[name.lower()] - except KeyError: - return None - - def pertain(self, name): - return name.lower() in self._pertain - - def utczone(self, name): - return name.lower() in self._utczone - - def tzoffset(self, name): - if name in self._utczone: - return 0 - return self.TZOFFSET.get(name) - - def convertyear(self, year): - if year < 100: - year += self._century - if abs(year-self._year) >= 50: - if year < self._year: - year += 100 - else: - year -= 100 - return year - - def validate(self, res): - # move to info - if res.year is not None: - res.year = self.convertyear(res.year) - if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z': - res.tzname = "UTC" - res.tzoffset = 0 - elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname): - res.tzoffset = 0 - return True - - -class parser(object): - - def __init__(self, info=None): - self.info = info or parserinfo() - - def parse(self, timestr, default=None, - ignoretz=False, tzinfos=None, - **kwargs): - if not default: - default = datetime.datetime.now().replace(hour=0, minute=0, - second=0, microsecond=0) - res = self._parse(timestr, **kwargs) - if res is None: - raise ValueError("unknown string format") - repl = {} - for attr in ["year", "month", "day", "hour", - "minute", "second", "microsecond"]: - value = getattr(res, attr) - if value is not None: - repl[attr] = value - ret = default.replace(**repl) - if res.weekday is not None and not res.day: - ret = ret+relativedelta.relativedelta(weekday=res.weekday) - if not ignoretz: - if isinstance(tzinfos, collections.Callable) or tzinfos and res.tzname in tzinfos: - if isinstance(tzinfos, collections.Callable): - tzdata = tzinfos(res.tzname, res.tzoffset) - else: - tzdata = tzinfos.get(res.tzname) - if isinstance(tzdata, datetime.tzinfo): - tzinfo = tzdata - elif isinstance(tzdata, text_type): - tzinfo = tz.tzstr(tzdata) - elif isinstance(tzdata, integer_types): - tzinfo = tz.tzoffset(res.tzname, tzdata) - else: - raise ValueError("offset must be tzinfo subclass, " \ - "tz string, or int offset") - ret = ret.replace(tzinfo=tzinfo) - elif res.tzname and res.tzname in time.tzname: - ret = ret.replace(tzinfo=tz.tzlocal()) - elif res.tzoffset == 0: - ret = ret.replace(tzinfo=tz.tzutc()) - elif res.tzoffset: - ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) - return ret - - class _result(_resultbase): - __slots__ = ["year", "month", "day", "weekday", - "hour", "minute", "second", "microsecond", - "tzname", "tzoffset"] - - def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False): - info = self.info - if dayfirst is None: - dayfirst = info.dayfirst - if yearfirst is None: - yearfirst = info.yearfirst - res = self._result() - l = _timelex.split(timestr) - try: - - # year/month/day list - ymd = [] - - # Index of the month string in ymd - mstridx = -1 - - len_l = len(l) - i = 0 - while i < len_l: - - # Check if it's a number - try: - value_repr = l[i] - value = float(value_repr) - except ValueError: - value = None - - if value is not None: - # Token is a number - len_li = len(l[i]) - i += 1 - if (len(ymd) == 3 and len_li in (2, 4) - and (i >= len_l or (l[i] != ':' and - info.hms(l[i]) is None))): - # 19990101T23[59] - s = l[i-1] - res.hour = int(s[:2]) - if len_li == 4: - res.minute = int(s[2:]) - elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6): - # YYMMDD or HHMMSS[.ss] - s = l[i-1] - if not ymd and l[i-1].find('.') == -1: - ymd.append(info.convertyear(int(s[:2]))) - ymd.append(int(s[2:4])) - ymd.append(int(s[4:])) - else: - # 19990101T235959[.59] - res.hour = int(s[:2]) - res.minute = int(s[2:4]) - res.second, res.microsecond = _parsems(s[4:]) - elif len_li == 8: - # YYYYMMDD - s = l[i-1] - ymd.append(int(s[:4])) - ymd.append(int(s[4:6])) - ymd.append(int(s[6:])) - elif len_li in (12, 14): - # YYYYMMDDhhmm[ss] - s = l[i-1] - ymd.append(int(s[:4])) - ymd.append(int(s[4:6])) - ymd.append(int(s[6:8])) - res.hour = int(s[8:10]) - res.minute = int(s[10:12]) - if len_li == 14: - res.second = int(s[12:]) - elif ((i < len_l and info.hms(l[i]) is not None) or - (i+1 < len_l and l[i] == ' ' and - info.hms(l[i+1]) is not None)): - # HH[ ]h or MM[ ]m or SS[.ss][ ]s - if l[i] == ' ': - i += 1 - idx = info.hms(l[i]) - while True: - if idx == 0: - res.hour = int(value) - if value%1: - res.minute = int(60*(value%1)) - elif idx == 1: - res.minute = int(value) - if value%1: - res.second = int(60*(value%1)) - elif idx == 2: - res.second, res.microsecond = \ - _parsems(value_repr) - i += 1 - if i >= len_l or idx == 2: - break - # 12h00 - try: - value_repr = l[i] - value = float(value_repr) - except ValueError: - break - else: - i += 1 - idx += 1 - if i < len_l: - newidx = info.hms(l[i]) - if newidx is not None: - idx = newidx - elif i == len_l and l[i-2] == ' ' and info.hms(l[i-3]) is not None: - # X h MM or X m SS - idx = info.hms(l[i-3]) + 1 - if idx == 1: - res.minute = int(value) - if value%1: - res.second = int(60*(value%1)) - elif idx == 2: - res.second, res.microsecond = \ - _parsems(value_repr) - i += 1 - elif i+1 < len_l and l[i] == ':': - # HH:MM[:SS[.ss]] - res.hour = int(value) - i += 1 - value = float(l[i]) - res.minute = int(value) - if value%1: - res.second = int(60*(value%1)) - i += 1 - if i < len_l and l[i] == ':': - res.second, res.microsecond = _parsems(l[i+1]) - i += 2 - elif i < len_l and l[i] in ('-', '/', '.'): - sep = l[i] - ymd.append(int(value)) - i += 1 - if i < len_l and not info.jump(l[i]): - try: - # 01-01[-01] - ymd.append(int(l[i])) - except ValueError: - # 01-Jan[-01] - value = info.month(l[i]) - if value is not None: - ymd.append(value) - assert mstridx == -1 - mstridx = len(ymd)-1 - else: - return None - i += 1 - if i < len_l and l[i] == sep: - # We have three members - i += 1 - value = info.month(l[i]) - if value is not None: - ymd.append(value) - mstridx = len(ymd)-1 - assert mstridx == -1 - else: - ymd.append(int(l[i])) - i += 1 - elif i >= len_l or info.jump(l[i]): - if i+1 < len_l and info.ampm(l[i+1]) is not None: - # 12 am - res.hour = int(value) - if res.hour < 12 and info.ampm(l[i+1]) == 1: - res.hour += 12 - elif res.hour == 12 and info.ampm(l[i+1]) == 0: - res.hour = 0 - i += 1 - else: - # Year, month or day - ymd.append(int(value)) - i += 1 - elif info.ampm(l[i]) is not None: - # 12am - res.hour = int(value) - if res.hour < 12 and info.ampm(l[i]) == 1: - res.hour += 12 - elif res.hour == 12 and info.ampm(l[i]) == 0: - res.hour = 0 - i += 1 - elif not fuzzy: - return None - else: - i += 1 - continue - - # Check weekday - value = info.weekday(l[i]) - if value is not None: - res.weekday = value - i += 1 - continue - - # Check month name - value = info.month(l[i]) - if value is not None: - ymd.append(value) - assert mstridx == -1 - mstridx = len(ymd)-1 - i += 1 - if i < len_l: - if l[i] in ('-', '/'): - # Jan-01[-99] - sep = l[i] - i += 1 - ymd.append(int(l[i])) - i += 1 - if i < len_l and l[i] == sep: - # Jan-01-99 - i += 1 - ymd.append(int(l[i])) - i += 1 - elif (i+3 < len_l and l[i] == l[i+2] == ' ' - and info.pertain(l[i+1])): - # Jan of 01 - # In this case, 01 is clearly year - try: - value = int(l[i+3]) - except ValueError: - # Wrong guess - pass - else: - # Convert it here to become unambiguous - ymd.append(info.convertyear(value)) - i += 4 - continue - - # Check am/pm - value = info.ampm(l[i]) - if value is not None: - if value == 1 and res.hour < 12: - res.hour += 12 - elif value == 0 and res.hour == 12: - res.hour = 0 - i += 1 - continue - - # Check for a timezone name - if (res.hour is not None and len(l[i]) <= 5 and - res.tzname is None and res.tzoffset is None and - not [x for x in l[i] if x not in string.ascii_uppercase]): - res.tzname = l[i] - res.tzoffset = info.tzoffset(res.tzname) - i += 1 - - # Check for something like GMT+3, or BRST+3. Notice - # that it doesn't mean "I am 3 hours after GMT", but - # "my time +3 is GMT". If found, we reverse the - # logic so that timezone parsing code will get it - # right. - if i < len_l and l[i] in ('+', '-'): - l[i] = ('+', '-')[l[i] == '+'] - res.tzoffset = None - if info.utczone(res.tzname): - # With something like GMT+3, the timezone - # is *not* GMT. - res.tzname = None - - continue - - # Check for a numbered timezone - if res.hour is not None and l[i] in ('+', '-'): - signal = (-1, 1)[l[i] == '+'] - i += 1 - len_li = len(l[i]) - if len_li == 4: - # -0300 - res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60 - elif i+1 < len_l and l[i+1] == ':': - # -03:00 - res.tzoffset = int(l[i])*3600+int(l[i+2])*60 - i += 2 - elif len_li <= 2: - # -[0]3 - res.tzoffset = int(l[i][:2])*3600 - else: - return None - i += 1 - res.tzoffset *= signal - - # Look for a timezone name between parenthesis - if (i+3 < len_l and - info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and - 3 <= len(l[i+2]) <= 5 and - not [x for x in l[i+2] - if x not in string.ascii_uppercase]): - # -0300 (BRST) - res.tzname = l[i+2] - i += 4 - continue - - # Check jumps - if not (info.jump(l[i]) or fuzzy): - return None - - i += 1 - - # Process year/month/day - len_ymd = len(ymd) - if len_ymd > 3: - # More than three members!? - return None - elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2): - # One member, or two members with a month string - if mstridx != -1: - res.month = ymd[mstridx] - del ymd[mstridx] - if len_ymd > 1 or mstridx == -1: - if ymd[0] > 31: - res.year = ymd[0] - else: - res.day = ymd[0] - elif len_ymd == 2: - # Two members with numbers - if ymd[0] > 31: - # 99-01 - res.year, res.month = ymd - elif ymd[1] > 31: - # 01-99 - res.month, res.year = ymd - elif dayfirst and ymd[1] <= 12: - # 13-01 - res.day, res.month = ymd - else: - # 01-13 - res.month, res.day = ymd - if len_ymd == 3: - # Three members - if mstridx == 0: - res.month, res.day, res.year = ymd - elif mstridx == 1: - if ymd[0] > 31 or (yearfirst and ymd[2] <= 31): - # 99-Jan-01 - res.year, res.month, res.day = ymd - else: - # 01-Jan-01 - # Give precendence to day-first, since - # two-digit years is usually hand-written. - res.day, res.month, res.year = ymd - elif mstridx == 2: - # WTF!? - if ymd[1] > 31: - # 01-99-Jan - res.day, res.year, res.month = ymd - else: - # 99-01-Jan - res.year, res.day, res.month = ymd - else: - if ymd[0] > 31 or \ - (yearfirst and ymd[1] <= 12 and ymd[2] <= 31): - # 99-01-01 - res.year, res.month, res.day = ymd - elif ymd[0] > 12 or (dayfirst and ymd[1] <= 12): - # 13-01-01 - res.day, res.month, res.year = ymd - else: - # 01-13-01 - res.month, res.day, res.year = ymd - - except (IndexError, ValueError, AssertionError): - return None - - if not info.validate(res): - return None - return res - -DEFAULTPARSER = parser() -def parse(timestr, parserinfo=None, **kwargs): - # Python 2.x support: datetimes return their string presentation as - # bytes in 2.x and unicode in 3.x, so it's reasonable to expect that - # the parser will get both kinds. Internally we use unicode only. - if isinstance(timestr, binary_type): - timestr = timestr.decode() - if parserinfo: - return parser(parserinfo).parse(timestr, **kwargs) - else: - return DEFAULTPARSER.parse(timestr, **kwargs) - - -class _tzparser(object): - - class _result(_resultbase): - - __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset", - "start", "end"] - - class _attr(_resultbase): - __slots__ = ["month", "week", "weekday", - "yday", "jyday", "day", "time"] - - def __repr__(self): - return self._repr("") - - def __init__(self): - _resultbase.__init__(self) - self.start = self._attr() - self.end = self._attr() - - def parse(self, tzstr): - res = self._result() - l = _timelex.split(tzstr) - try: - - len_l = len(l) - - i = 0 - while i < len_l: - # BRST+3[BRDT[+2]] - j = i - while j < len_l and not [x for x in l[j] - if x in "0123456789:,-+"]: - j += 1 - if j != i: - if not res.stdabbr: - offattr = "stdoffset" - res.stdabbr = "".join(l[i:j]) - else: - offattr = "dstoffset" - res.dstabbr = "".join(l[i:j]) - i = j - if (i < len_l and - (l[i] in ('+', '-') or l[i][0] in "0123456789")): - if l[i] in ('+', '-'): - # Yes, that's right. See the TZ variable - # documentation. - signal = (1, -1)[l[i] == '+'] - i += 1 - else: - signal = -1 - len_li = len(l[i]) - if len_li == 4: - # -0300 - setattr(res, offattr, - (int(l[i][:2])*3600+int(l[i][2:])*60)*signal) - elif i+1 < len_l and l[i+1] == ':': - # -03:00 - setattr(res, offattr, - (int(l[i])*3600+int(l[i+2])*60)*signal) - i += 2 - elif len_li <= 2: - # -[0]3 - setattr(res, offattr, - int(l[i][:2])*3600*signal) - else: - return None - i += 1 - if res.dstabbr: - break - else: - break - - if i < len_l: - for j in range(i, len_l): - if l[j] == ';': l[j] = ',' - - assert l[i] == ',' - - i += 1 - - if i >= len_l: - pass - elif (8 <= l.count(',') <= 9 and - not [y for x in l[i:] if x != ',' - for y in x if y not in "0123456789"]): - # GMT0BST,3,0,30,3600,10,0,26,7200[,3600] - for x in (res.start, res.end): - x.month = int(l[i]) - i += 2 - if l[i] == '-': - value = int(l[i+1])*-1 - i += 1 - else: - value = int(l[i]) - i += 2 - if value: - x.week = value - x.weekday = (int(l[i])-1)%7 - else: - x.day = int(l[i]) - i += 2 - x.time = int(l[i]) - i += 2 - if i < len_l: - if l[i] in ('-', '+'): - signal = (-1, 1)[l[i] == "+"] - i += 1 - else: - signal = 1 - res.dstoffset = (res.stdoffset+int(l[i]))*signal - elif (l.count(',') == 2 and l[i:].count('/') <= 2 and - not [y for x in l[i:] if x not in (',', '/', 'J', 'M', - '.', '-', ':') - for y in x if y not in "0123456789"]): - for x in (res.start, res.end): - if l[i] == 'J': - # non-leap year day (1 based) - i += 1 - x.jyday = int(l[i]) - elif l[i] == 'M': - # month[-.]week[-.]weekday - i += 1 - x.month = int(l[i]) - i += 1 - assert l[i] in ('-', '.') - i += 1 - x.week = int(l[i]) - if x.week == 5: - x.week = -1 - i += 1 - assert l[i] in ('-', '.') - i += 1 - x.weekday = (int(l[i])-1)%7 - else: - # year day (zero based) - x.yday = int(l[i])+1 - - i += 1 - - if i < len_l and l[i] == '/': - i += 1 - # start time - len_li = len(l[i]) - if len_li == 4: - # -0300 - x.time = (int(l[i][:2])*3600+int(l[i][2:])*60) - elif i+1 < len_l and l[i+1] == ':': - # -03:00 - x.time = int(l[i])*3600+int(l[i+2])*60 - i += 2 - if i+1 < len_l and l[i+1] == ':': - i += 2 - x.time += int(l[i]) - elif len_li <= 2: - # -[0]3 - x.time = (int(l[i][:2])*3600) - else: - return None - i += 1 - - assert i == len_l or l[i] == ',' - - i += 1 - - assert i >= len_l - - except (IndexError, ValueError, AssertionError): - return None - - return res - - -DEFAULTTZPARSER = _tzparser() -def _parsetz(tzstr): - return DEFAULTTZPARSER.parse(tzstr) - - -def _parsems(value): - """Parse a I[.F] seconds value into (seconds, microseconds).""" - if "." not in value: - return int(value), 0 - else: - i, f = value.split(".") - return int(i), int(f.ljust(6, "0")[:6]) - - -# vim:ts=4:sw=4:et diff --git a/lib/dateutil_py3/relativedelta.py b/lib/dateutil_py3/relativedelta.py deleted file mode 100644 index 4393bcbcde22..000000000000 --- a/lib/dateutil_py3/relativedelta.py +++ /dev/null @@ -1,436 +0,0 @@ -""" -Copyright (c) 2003-2010 Gustavo Niemeyer - -This module offers extensions to the standard Python -datetime module. -""" -__license__ = "Simplified BSD" - -import datetime -import calendar - -from six import integer_types - -__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] - -class weekday(object): - __slots__ = ["weekday", "n"] - - def __init__(self, weekday, n=None): - self.weekday = weekday - self.n = n - - def __call__(self, n): - if n == self.n: - return self - else: - return self.__class__(self.weekday, n) - - def __eq__(self, other): - try: - if self.weekday != other.weekday or self.n != other.n: - return False - except AttributeError: - return False - return True - - def __repr__(self): - s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] - if not self.n: - return s - else: - return "%s(%+d)" % (s, self.n) - -MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)]) - -class relativedelta(object): - """ -The relativedelta type is based on the specification of the excelent -work done by M.-A. Lemburg in his mx.DateTime extension. However, -notice that this type does *NOT* implement the same algorithm as -his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. - -There's two different ways to build a relativedelta instance. The -first one is passing it two date/datetime classes: - - relativedelta(datetime1, datetime2) - -And the other way is to use the following keyword arguments: - - year, month, day, hour, minute, second, microsecond: - Absolute information. - - years, months, weeks, days, hours, minutes, seconds, microseconds: - Relative information, may be negative. - - weekday: - One of the weekday instances (MO, TU, etc). These instances may - receive a parameter N, specifying the Nth weekday, which could - be positive or negative (like MO(+1) or MO(-2). Not specifying - it is the same as specifying +1. You can also use an integer, - where 0=MO. - - leapdays: - Will add given days to the date found, if year is a leap - year, and the date found is post 28 of february. - - yearday, nlyearday: - Set the yearday or the non-leap year day (jump leap days). - These are converted to day/month/leapdays information. - -Here is the behavior of operations with relativedelta: - -1) Calculate the absolute year, using the 'year' argument, or the - original datetime year, if the argument is not present. - -2) Add the relative 'years' argument to the absolute year. - -3) Do steps 1 and 2 for month/months. - -4) Calculate the absolute day, using the 'day' argument, or the - original datetime day, if the argument is not present. Then, - subtract from the day until it fits in the year and month - found after their operations. - -5) Add the relative 'days' argument to the absolute day. Notice - that the 'weeks' argument is multiplied by 7 and added to - 'days'. - -6) Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds, - microsecond/microseconds. - -7) If the 'weekday' argument is present, calculate the weekday, - with the given (wday, nth) tuple. wday is the index of the - weekday (0-6, 0=Mon), and nth is the number of weeks to add - forward or backward, depending on its signal. Notice that if - the calculated date is already Monday, for example, using - (0, 1) or (0, -1) won't change the day. - """ - - def __init__(self, dt1=None, dt2=None, - years=0, months=0, days=0, leapdays=0, weeks=0, - hours=0, minutes=0, seconds=0, microseconds=0, - year=None, month=None, day=None, weekday=None, - yearday=None, nlyearday=None, - hour=None, minute=None, second=None, microsecond=None): - if dt1 and dt2: - if (not isinstance(dt1, datetime.date)) or (not isinstance(dt2, datetime.date)): - raise TypeError("relativedelta only diffs datetime/date") - if not type(dt1) == type(dt2): #isinstance(dt1, type(dt2)): - if not isinstance(dt1, datetime.datetime): - dt1 = datetime.datetime.fromordinal(dt1.toordinal()) - elif not isinstance(dt2, datetime.datetime): - dt2 = datetime.datetime.fromordinal(dt2.toordinal()) - self.years = 0 - self.months = 0 - self.days = 0 - self.leapdays = 0 - self.hours = 0 - self.minutes = 0 - self.seconds = 0 - self.microseconds = 0 - self.year = None - self.month = None - self.day = None - self.weekday = None - self.hour = None - self.minute = None - self.second = None - self.microsecond = None - self._has_time = 0 - - months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month) - self._set_months(months) - dtm = self.__radd__(dt2) - if dt1 < dt2: - while dt1 > dtm: - months += 1 - self._set_months(months) - dtm = self.__radd__(dt2) - else: - while dt1 < dtm: - months -= 1 - self._set_months(months) - dtm = self.__radd__(dt2) - delta = dt1 - dtm - self.seconds = delta.seconds+delta.days*86400 - self.microseconds = delta.microseconds - else: - self.years = years - self.months = months - self.days = days+weeks*7 - self.leapdays = leapdays - self.hours = hours - self.minutes = minutes - self.seconds = seconds - self.microseconds = microseconds - self.year = year - self.month = month - self.day = day - self.hour = hour - self.minute = minute - self.second = second - self.microsecond = microsecond - - if isinstance(weekday, integer_types): - self.weekday = weekdays[weekday] - else: - self.weekday = weekday - - yday = 0 - if nlyearday: - yday = nlyearday - elif yearday: - yday = yearday - if yearday > 59: - self.leapdays = -1 - if yday: - ydayidx = [31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 366] - for idx, ydays in enumerate(ydayidx): - if yday <= ydays: - self.month = idx+1 - if idx == 0: - self.day = yday - else: - self.day = yday-ydayidx[idx-1] - break - else: - raise ValueError("invalid year day (%d)" % yday) - - self._fix() - - def _fix(self): - if abs(self.microseconds) > 999999: - s = self.microseconds//abs(self.microseconds) - div, mod = divmod(self.microseconds*s, 1000000) - self.microseconds = mod*s - self.seconds += div*s - if abs(self.seconds) > 59: - s = self.seconds//abs(self.seconds) - div, mod = divmod(self.seconds*s, 60) - self.seconds = mod*s - self.minutes += div*s - if abs(self.minutes) > 59: - s = self.minutes//abs(self.minutes) - div, mod = divmod(self.minutes*s, 60) - self.minutes = mod*s - self.hours += div*s - if abs(self.hours) > 23: - s = self.hours//abs(self.hours) - div, mod = divmod(self.hours*s, 24) - self.hours = mod*s - self.days += div*s - if abs(self.months) > 11: - s = self.months//abs(self.months) - div, mod = divmod(self.months*s, 12) - self.months = mod*s - self.years += div*s - if (self.hours or self.minutes or self.seconds or self.microseconds or - self.hour is not None or self.minute is not None or - self.second is not None or self.microsecond is not None): - self._has_time = 1 - else: - self._has_time = 0 - - def _set_months(self, months): - self.months = months - if abs(self.months) > 11: - s = self.months//abs(self.months) - div, mod = divmod(self.months*s, 12) - self.months = mod*s - self.years = div*s - else: - self.years = 0 - - def __add__(self, other): - if isinstance(other, relativedelta): - return relativedelta(years=other.years+self.years, - months=other.months+self.months, - days=other.days+self.days, - hours=other.hours+self.hours, - minutes=other.minutes+self.minutes, - seconds=other.seconds+self.seconds, - microseconds=other.microseconds+self.microseconds, - leapdays=other.leapdays or self.leapdays, - year=other.year or self.year, - month=other.month or self.month, - day=other.day or self.day, - weekday=other.weekday or self.weekday, - hour=other.hour or self.hour, - minute=other.minute or self.minute, - second=other.second or self.second, - microsecond=other.microsecond or self.microsecond) - if not isinstance(other, datetime.date): - raise TypeError("unsupported type for add operation") - elif self._has_time and not isinstance(other, datetime.datetime): - other = datetime.datetime.fromordinal(other.toordinal()) - year = (self.year or other.year)+self.years - month = self.month or other.month - if self.months: - assert 1 <= abs(self.months) <= 12 - month += self.months - if month > 12: - year += 1 - month -= 12 - elif month < 1: - year -= 1 - month += 12 - day = min(calendar.monthrange(year, month)[1], - self.day or other.day) - repl = {"year": year, "month": month, "day": day} - for attr in ["hour", "minute", "second", "microsecond"]: - value = getattr(self, attr) - if value is not None: - repl[attr] = value - days = self.days - if self.leapdays and month > 2 and calendar.isleap(year): - days += self.leapdays - ret = (other.replace(**repl) - + datetime.timedelta(days=days, - hours=self.hours, - minutes=self.minutes, - seconds=self.seconds, - microseconds=self.microseconds)) - if self.weekday: - weekday, nth = self.weekday.weekday, self.weekday.n or 1 - jumpdays = (abs(nth)-1)*7 - if nth > 0: - jumpdays += (7-ret.weekday()+weekday)%7 - else: - jumpdays += (ret.weekday()-weekday)%7 - jumpdays *= -1 - ret += datetime.timedelta(days=jumpdays) - return ret - - def __radd__(self, other): - return self.__add__(other) - - def __rsub__(self, other): - return self.__neg__().__radd__(other) - - def __sub__(self, other): - if not isinstance(other, relativedelta): - raise TypeError("unsupported type for sub operation") - return relativedelta(years=self.years-other.years, - months=self.months-other.months, - days=self.days-other.days, - hours=self.hours-other.hours, - minutes=self.minutes-other.minutes, - seconds=self.seconds-other.seconds, - microseconds=self.microseconds-other.microseconds, - leapdays=self.leapdays or other.leapdays, - year=self.year or other.year, - month=self.month or other.month, - day=self.day or other.day, - weekday=self.weekday or other.weekday, - hour=self.hour or other.hour, - minute=self.minute or other.minute, - second=self.second or other.second, - microsecond=self.microsecond or other.microsecond) - - def __neg__(self): - return relativedelta(years=-self.years, - months=-self.months, - days=-self.days, - hours=-self.hours, - minutes=-self.minutes, - seconds=-self.seconds, - microseconds=-self.microseconds, - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - def __bool__(self): - return not (not self.years and - not self.months and - not self.days and - not self.hours and - not self.minutes and - not self.seconds and - not self.microseconds and - not self.leapdays and - self.year is None and - self.month is None and - self.day is None and - self.weekday is None and - self.hour is None and - self.minute is None and - self.second is None and - self.microsecond is None) - - def __mul__(self, other): - f = float(other) - return relativedelta(years=int(self.years*f), - months=int(self.months*f), - days=int(self.days*f), - hours=int(self.hours*f), - minutes=int(self.minutes*f), - seconds=int(self.seconds*f), - microseconds=int(self.microseconds*f), - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - __rmul__ = __mul__ - - def __eq__(self, other): - if not isinstance(other, relativedelta): - return False - if self.weekday or other.weekday: - if not self.weekday or not other.weekday: - return False - if self.weekday.weekday != other.weekday.weekday: - return False - n1, n2 = self.weekday.n, other.weekday.n - if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): - return False - return (self.years == other.years and - self.months == other.months and - self.days == other.days and - self.hours == other.hours and - self.minutes == other.minutes and - self.seconds == other.seconds and - self.leapdays == other.leapdays and - self.year == other.year and - self.month == other.month and - self.day == other.day and - self.hour == other.hour and - self.minute == other.minute and - self.second == other.second and - self.microsecond == other.microsecond) - - def __ne__(self, other): - return not self.__eq__(other) - - def __div__(self, other): - return self.__mul__(1/float(other)) - - __truediv__ = __div__ - - def __repr__(self): - l = [] - for attr in ["years", "months", "days", "leapdays", - "hours", "minutes", "seconds", "microseconds"]: - value = getattr(self, attr) - if value: - l.append("%s=%+d" % (attr, value)) - for attr in ["year", "month", "day", "weekday", - "hour", "minute", "second", "microsecond"]: - value = getattr(self, attr) - if value is not None: - l.append("%s=%s" % (attr, repr(value))) - return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) - -# vim:ts=4:sw=4:et diff --git a/lib/dateutil_py3/rrule.py b/lib/dateutil_py3/rrule.py deleted file mode 100644 index ad4d3ba70c4e..000000000000 --- a/lib/dateutil_py3/rrule.py +++ /dev/null @@ -1,1112 +0,0 @@ -""" -Copyright (c) 2003-2010 Gustavo Niemeyer - -This module offers extensions to the standard Python -datetime module. -""" -__license__ = "Simplified BSD" - -import itertools -import datetime -import calendar -try: - import _thread -except ImportError: - import thread as _thread -import sys - -from six import advance_iterator, integer_types - -__all__ = ["rrule", "rruleset", "rrulestr", - "YEARLY", "MONTHLY", "WEEKLY", "DAILY", - "HOURLY", "MINUTELY", "SECONDLY", - "MO", "TU", "WE", "TH", "FR", "SA", "SU"] - -# Every mask is 7 days longer to handle cross-year weekly periods. -M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30+ - [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) -M365MASK = list(M366MASK) -M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32)) -MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) -MDAY365MASK = list(MDAY366MASK) -M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0)) -NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) -NMDAY365MASK = list(NMDAY366MASK) -M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) -M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) -WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55 -del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] -MDAY365MASK = tuple(MDAY365MASK) -M365MASK = tuple(M365MASK) - -(YEARLY, - MONTHLY, - WEEKLY, - DAILY, - HOURLY, - MINUTELY, - SECONDLY) = list(range(7)) - -# Imported on demand. -easter = None -parser = None - -class weekday(object): - __slots__ = ["weekday", "n"] - - def __init__(self, weekday, n=None): - if n == 0: - raise ValueError("Can't create weekday with n == 0") - self.weekday = weekday - self.n = n - - def __call__(self, n): - if n == self.n: - return self - else: - return self.__class__(self.weekday, n) - - def __eq__(self, other): - try: - if self.weekday != other.weekday or self.n != other.n: - return False - except AttributeError: - return False - return True - - def __repr__(self): - s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] - if not self.n: - return s - else: - return "%s(%+d)" % (s, self.n) - -MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)]) - -class rrulebase(object): - def __init__(self, cache=False): - if cache: - self._cache = [] - self._cache_lock = _thread.allocate_lock() - self._cache_gen = self._iter() - self._cache_complete = False - else: - self._cache = None - self._cache_complete = False - self._len = None - - def __iter__(self): - if self._cache_complete: - return iter(self._cache) - elif self._cache is None: - return self._iter() - else: - return self._iter_cached() - - def _iter_cached(self): - i = 0 - gen = self._cache_gen - cache = self._cache - acquire = self._cache_lock.acquire - release = self._cache_lock.release - while gen: - if i == len(cache): - acquire() - if self._cache_complete: - break - try: - for j in range(10): - cache.append(advance_iterator(gen)) - except StopIteration: - self._cache_gen = gen = None - self._cache_complete = True - break - release() - yield cache[i] - i += 1 - while i < self._len: - yield cache[i] - i += 1 - - def __getitem__(self, item): - if self._cache_complete: - return self._cache[item] - elif isinstance(item, slice): - if item.step and item.step < 0: - return list(iter(self))[item] - else: - return list(itertools.islice(self, - item.start or 0, - item.stop or sys.maxsize, - item.step or 1)) - elif item >= 0: - gen = iter(self) - try: - for i in range(item+1): - res = advance_iterator(gen) - except StopIteration: - raise IndexError - return res - else: - return list(iter(self))[item] - - def __contains__(self, item): - if self._cache_complete: - return item in self._cache - else: - for i in self: - if i == item: - return True - elif i > item: - return False - return False - - # __len__() introduces a large performance penality. - def count(self): - if self._len is None: - for x in self: pass - return self._len - - def before(self, dt, inc=False): - if self._cache_complete: - gen = self._cache - else: - gen = self - last = None - if inc: - for i in gen: - if i > dt: - break - last = i - else: - for i in gen: - if i >= dt: - break - last = i - return last - - def after(self, dt, inc=False): - if self._cache_complete: - gen = self._cache - else: - gen = self - if inc: - for i in gen: - if i >= dt: - return i - else: - for i in gen: - if i > dt: - return i - return None - - def between(self, after, before, inc=False): - if self._cache_complete: - gen = self._cache - else: - gen = self - started = False - l = [] - if inc: - for i in gen: - if i > before: - break - elif not started: - if i >= after: - started = True - l.append(i) - else: - l.append(i) - else: - for i in gen: - if i >= before: - break - elif not started: - if i > after: - started = True - l.append(i) - else: - l.append(i) - return l - -class rrule(rrulebase): - def __init__(self, freq, dtstart=None, - interval=1, wkst=None, count=None, until=None, bysetpos=None, - bymonth=None, bymonthday=None, byyearday=None, byeaster=None, - byweekno=None, byweekday=None, - byhour=None, byminute=None, bysecond=None, - cache=False): - super(rrule, self).__init__(cache) - global easter - if not dtstart: - dtstart = datetime.datetime.now().replace(microsecond=0) - elif not isinstance(dtstart, datetime.datetime): - dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) - else: - dtstart = dtstart.replace(microsecond=0) - self._dtstart = dtstart - self._tzinfo = dtstart.tzinfo - self._freq = freq - self._interval = interval - self._count = count - if until and not isinstance(until, datetime.datetime): - until = datetime.datetime.fromordinal(until.toordinal()) - self._until = until - if wkst is None: - self._wkst = calendar.firstweekday() - elif isinstance(wkst, integer_types): - self._wkst = wkst - else: - self._wkst = wkst.weekday - if bysetpos is None: - self._bysetpos = None - elif isinstance(bysetpos, integer_types): - if bysetpos == 0 or not (-366 <= bysetpos <= 366): - raise ValueError("bysetpos must be between 1 and 366, " - "or between -366 and -1") - self._bysetpos = (bysetpos,) - else: - self._bysetpos = tuple(bysetpos) - for pos in self._bysetpos: - if pos == 0 or not (-366 <= pos <= 366): - raise ValueError("bysetpos must be between 1 and 366, " - "or between -366 and -1") - if not (byweekno or byyearday or bymonthday or - byweekday is not None or byeaster is not None): - if freq == YEARLY: - if not bymonth: - bymonth = dtstart.month - bymonthday = dtstart.day - elif freq == MONTHLY: - bymonthday = dtstart.day - elif freq == WEEKLY: - byweekday = dtstart.weekday() - # bymonth - if not bymonth: - self._bymonth = None - elif isinstance(bymonth, integer_types): - self._bymonth = (bymonth,) - else: - self._bymonth = tuple(bymonth) - # byyearday - if not byyearday: - self._byyearday = None - elif isinstance(byyearday, integer_types): - self._byyearday = (byyearday,) - else: - self._byyearday = tuple(byyearday) - # byeaster - if byeaster is not None: - if not easter: - from dateutil import easter - if isinstance(byeaster, integer_types): - self._byeaster = (byeaster,) - else: - self._byeaster = tuple(byeaster) - else: - self._byeaster = None - # bymonthay - if not bymonthday: - self._bymonthday = () - self._bynmonthday = () - elif isinstance(bymonthday, integer_types): - if bymonthday < 0: - self._bynmonthday = (bymonthday,) - self._bymonthday = () - else: - self._bymonthday = (bymonthday,) - self._bynmonthday = () - else: - self._bymonthday = tuple([x for x in bymonthday if x > 0]) - self._bynmonthday = tuple([x for x in bymonthday if x < 0]) - # byweekno - if byweekno is None: - self._byweekno = None - elif isinstance(byweekno, integer_types): - self._byweekno = (byweekno,) - else: - self._byweekno = tuple(byweekno) - # byweekday / bynweekday - if byweekday is None: - self._byweekday = None - self._bynweekday = None - elif isinstance(byweekday, integer_types): - self._byweekday = (byweekday,) - self._bynweekday = None - elif hasattr(byweekday, "n"): - if not byweekday.n or freq > MONTHLY: - self._byweekday = (byweekday.weekday,) - self._bynweekday = None - else: - self._bynweekday = ((byweekday.weekday, byweekday.n),) - self._byweekday = None - else: - self._byweekday = [] - self._bynweekday = [] - for wday in byweekday: - if isinstance(wday, integer_types): - self._byweekday.append(wday) - elif not wday.n or freq > MONTHLY: - self._byweekday.append(wday.weekday) - else: - self._bynweekday.append((wday.weekday, wday.n)) - self._byweekday = tuple(self._byweekday) - self._bynweekday = tuple(self._bynweekday) - if not self._byweekday: - self._byweekday = None - elif not self._bynweekday: - self._bynweekday = None - # byhour - if byhour is None: - if freq < HOURLY: - self._byhour = (dtstart.hour,) - else: - self._byhour = None - elif isinstance(byhour, integer_types): - self._byhour = (byhour,) - else: - self._byhour = tuple(byhour) - # byminute - if byminute is None: - if freq < MINUTELY: - self._byminute = (dtstart.minute,) - else: - self._byminute = None - elif isinstance(byminute, integer_types): - self._byminute = (byminute,) - else: - self._byminute = tuple(byminute) - # bysecond - if bysecond is None: - if freq < SECONDLY: - self._bysecond = (dtstart.second,) - else: - self._bysecond = None - elif isinstance(bysecond, integer_types): - self._bysecond = (bysecond,) - else: - self._bysecond = tuple(bysecond) - - if self._freq >= HOURLY: - self._timeset = None - else: - self._timeset = [] - for hour in self._byhour: - for minute in self._byminute: - for second in self._bysecond: - self._timeset.append( - datetime.time(hour, minute, second, - tzinfo=self._tzinfo)) - self._timeset.sort() - self._timeset = tuple(self._timeset) - - def _iter(self): - year, month, day, hour, minute, second, weekday, yearday, _ = \ - self._dtstart.timetuple() - - # Some local variables to speed things up a bit - freq = self._freq - interval = self._interval - wkst = self._wkst - until = self._until - bymonth = self._bymonth - byweekno = self._byweekno - byyearday = self._byyearday - byweekday = self._byweekday - byeaster = self._byeaster - bymonthday = self._bymonthday - bynmonthday = self._bynmonthday - bysetpos = self._bysetpos - byhour = self._byhour - byminute = self._byminute - bysecond = self._bysecond - - ii = _iterinfo(self) - ii.rebuild(year, month) - - getdayset = {YEARLY:ii.ydayset, - MONTHLY:ii.mdayset, - WEEKLY:ii.wdayset, - DAILY:ii.ddayset, - HOURLY:ii.ddayset, - MINUTELY:ii.ddayset, - SECONDLY:ii.ddayset}[freq] - - if freq < HOURLY: - timeset = self._timeset - else: - gettimeset = {HOURLY:ii.htimeset, - MINUTELY:ii.mtimeset, - SECONDLY:ii.stimeset}[freq] - if ((freq >= HOURLY and - self._byhour and hour not in self._byhour) or - (freq >= MINUTELY and - self._byminute and minute not in self._byminute) or - (freq >= SECONDLY and - self._bysecond and second not in self._bysecond)): - timeset = () - else: - timeset = gettimeset(hour, minute, second) - - total = 0 - count = self._count - while True: - # Get dayset with the right frequency - dayset, start, end = getdayset(year, month, day) - - # Do the "hard" work ;-) - filtered = False - for i in dayset[start:end]: - if ((bymonth and ii.mmask[i] not in bymonth) or - (byweekno and not ii.wnomask[i]) or - (byweekday and ii.wdaymask[i] not in byweekday) or - (ii.nwdaymask and not ii.nwdaymask[i]) or - (byeaster and not ii.eastermask[i]) or - ((bymonthday or bynmonthday) and - ii.mdaymask[i] not in bymonthday and - ii.nmdaymask[i] not in bynmonthday) or - (byyearday and - ((i < ii.yearlen and i+1 not in byyearday - and -ii.yearlen+i not in byyearday) or - (i >= ii.yearlen and i+1-ii.yearlen not in byyearday - and -ii.nextyearlen+i-ii.yearlen - not in byyearday)))): - dayset[i] = None - filtered = True - - # Output results - if bysetpos and timeset: - poslist = [] - for pos in bysetpos: - if pos < 0: - daypos, timepos = divmod(pos, len(timeset)) - else: - daypos, timepos = divmod(pos-1, len(timeset)) - try: - i = [x for x in dayset[start:end] - if x is not None][daypos] - time = timeset[timepos] - except IndexError: - pass - else: - date = datetime.date.fromordinal(ii.yearordinal+i) - res = datetime.datetime.combine(date, time) - if res not in poslist: - poslist.append(res) - poslist.sort() - for res in poslist: - if until and res > until: - self._len = total - return - elif res >= self._dtstart: - total += 1 - yield res - if count: - count -= 1 - if not count: - self._len = total - return - else: - for i in dayset[start:end]: - if i is not None: - date = datetime.date.fromordinal(ii.yearordinal+i) - for time in timeset: - res = datetime.datetime.combine(date, time) - if until and res > until: - self._len = total - return - elif res >= self._dtstart: - total += 1 - yield res - if count: - count -= 1 - if not count: - self._len = total - return - - # Handle frequency and interval - fixday = False - if freq == YEARLY: - year += interval - if year > datetime.MAXYEAR: - self._len = total - return - ii.rebuild(year, month) - elif freq == MONTHLY: - month += interval - if month > 12: - div, mod = divmod(month, 12) - month = mod - year += div - if month == 0: - month = 12 - year -= 1 - if year > datetime.MAXYEAR: - self._len = total - return - ii.rebuild(year, month) - elif freq == WEEKLY: - if wkst > weekday: - day += -(weekday+1+(6-wkst))+self._interval*7 - else: - day += -(weekday-wkst)+self._interval*7 - weekday = wkst - fixday = True - elif freq == DAILY: - day += interval - fixday = True - elif freq == HOURLY: - if filtered: - # Jump to one iteration before next day - hour += ((23-hour)//interval)*interval - while True: - hour += interval - div, mod = divmod(hour, 24) - if div: - hour = mod - day += div - fixday = True - if not byhour or hour in byhour: - break - timeset = gettimeset(hour, minute, second) - elif freq == MINUTELY: - if filtered: - # Jump to one iteration before next day - minute += ((1439-(hour*60+minute))//interval)*interval - while True: - minute += interval - div, mod = divmod(minute, 60) - if div: - minute = mod - hour += div - div, mod = divmod(hour, 24) - if div: - hour = mod - day += div - fixday = True - filtered = False - if ((not byhour or hour in byhour) and - (not byminute or minute in byminute)): - break - timeset = gettimeset(hour, minute, second) - elif freq == SECONDLY: - if filtered: - # Jump to one iteration before next day - second += (((86399-(hour*3600+minute*60+second)) - //interval)*interval) - while True: - second += self._interval - div, mod = divmod(second, 60) - if div: - second = mod - minute += div - div, mod = divmod(minute, 60) - if div: - minute = mod - hour += div - div, mod = divmod(hour, 24) - if div: - hour = mod - day += div - fixday = True - if ((not byhour or hour in byhour) and - (not byminute or minute in byminute) and - (not bysecond or second in bysecond)): - break - timeset = gettimeset(hour, minute, second) - - if fixday and day > 28: - daysinmonth = calendar.monthrange(year, month)[1] - if day > daysinmonth: - while day > daysinmonth: - day -= daysinmonth - month += 1 - if month == 13: - month = 1 - year += 1 - if year > datetime.MAXYEAR: - self._len = total - return - daysinmonth = calendar.monthrange(year, month)[1] - ii.rebuild(year, month) - -class _iterinfo(object): - __slots__ = ["rrule", "lastyear", "lastmonth", - "yearlen", "nextyearlen", "yearordinal", "yearweekday", - "mmask", "mrange", "mdaymask", "nmdaymask", - "wdaymask", "wnomask", "nwdaymask", "eastermask"] - - def __init__(self, rrule): - for attr in self.__slots__: - setattr(self, attr, None) - self.rrule = rrule - - def rebuild(self, year, month): - # Every mask is 7 days longer to handle cross-year weekly periods. - rr = self.rrule - if year != self.lastyear: - self.yearlen = 365+calendar.isleap(year) - self.nextyearlen = 365+calendar.isleap(year+1) - firstyday = datetime.date(year, 1, 1) - self.yearordinal = firstyday.toordinal() - self.yearweekday = firstyday.weekday() - - wday = datetime.date(year, 1, 1).weekday() - if self.yearlen == 365: - self.mmask = M365MASK - self.mdaymask = MDAY365MASK - self.nmdaymask = NMDAY365MASK - self.wdaymask = WDAYMASK[wday:] - self.mrange = M365RANGE - else: - self.mmask = M366MASK - self.mdaymask = MDAY366MASK - self.nmdaymask = NMDAY366MASK - self.wdaymask = WDAYMASK[wday:] - self.mrange = M366RANGE - - if not rr._byweekno: - self.wnomask = None - else: - self.wnomask = [0]*(self.yearlen+7) - #no1wkst = firstwkst = self.wdaymask.index(rr._wkst) - no1wkst = firstwkst = (7-self.yearweekday+rr._wkst)%7 - if no1wkst >= 4: - no1wkst = 0 - # Number of days in the year, plus the days we got - # from last year. - wyearlen = self.yearlen+(self.yearweekday-rr._wkst)%7 - else: - # Number of days in the year, minus the days we - # left in last year. - wyearlen = self.yearlen-no1wkst - div, mod = divmod(wyearlen, 7) - numweeks = div+mod//4 - for n in rr._byweekno: - if n < 0: - n += numweeks+1 - if not (0 < n <= numweeks): - continue - if n > 1: - i = no1wkst+(n-1)*7 - if no1wkst != firstwkst: - i -= 7-firstwkst - else: - i = no1wkst - for j in range(7): - self.wnomask[i] = 1 - i += 1 - if self.wdaymask[i] == rr._wkst: - break - if 1 in rr._byweekno: - # Check week number 1 of next year as well - # TODO: Check -numweeks for next year. - i = no1wkst+numweeks*7 - if no1wkst != firstwkst: - i -= 7-firstwkst - if i < self.yearlen: - # If week starts in next year, we - # don't care about it. - for j in range(7): - self.wnomask[i] = 1 - i += 1 - if self.wdaymask[i] == rr._wkst: - break - if no1wkst: - # Check last week number of last year as - # well. If no1wkst is 0, either the year - # started on week start, or week number 1 - # got days from last year, so there are no - # days from last year's last week number in - # this year. - if -1 not in rr._byweekno: - lyearweekday = datetime.date(year-1, 1, 1).weekday() - lno1wkst = (7-lyearweekday+rr._wkst)%7 - lyearlen = 365+calendar.isleap(year-1) - if lno1wkst >= 4: - lno1wkst = 0 - lnumweeks = 52+(lyearlen+ - (lyearweekday-rr._wkst)%7)%7//4 - else: - lnumweeks = 52+(self.yearlen-no1wkst)%7//4 - else: - lnumweeks = -1 - if lnumweeks in rr._byweekno: - for i in range(no1wkst): - self.wnomask[i] = 1 - - if (rr._bynweekday and - (month != self.lastmonth or year != self.lastyear)): - ranges = [] - if rr._freq == YEARLY: - if rr._bymonth: - for month in rr._bymonth: - ranges.append(self.mrange[month-1:month+1]) - else: - ranges = [(0, self.yearlen)] - elif rr._freq == MONTHLY: - ranges = [self.mrange[month-1:month+1]] - if ranges: - # Weekly frequency won't get here, so we may not - # care about cross-year weekly periods. - self.nwdaymask = [0]*self.yearlen - for first, last in ranges: - last -= 1 - for wday, n in rr._bynweekday: - if n < 0: - i = last+(n+1)*7 - i -= (self.wdaymask[i]-wday)%7 - else: - i = first+(n-1)*7 - i += (7-self.wdaymask[i]+wday)%7 - if first <= i <= last: - self.nwdaymask[i] = 1 - - if rr._byeaster: - self.eastermask = [0]*(self.yearlen+7) - eyday = easter.easter(year).toordinal()-self.yearordinal - for offset in rr._byeaster: - self.eastermask[eyday+offset] = 1 - - self.lastyear = year - self.lastmonth = month - - def ydayset(self, year, month, day): - return list(range(self.yearlen)), 0, self.yearlen - - def mdayset(self, year, month, day): - set = [None]*self.yearlen - start, end = self.mrange[month-1:month+1] - for i in range(start, end): - set[i] = i - return set, start, end - - def wdayset(self, year, month, day): - # We need to handle cross-year weeks here. - set = [None]*(self.yearlen+7) - i = datetime.date(year, month, day).toordinal()-self.yearordinal - start = i - for j in range(7): - set[i] = i - i += 1 - #if (not (0 <= i < self.yearlen) or - # self.wdaymask[i] == self.rrule._wkst): - # This will cross the year boundary, if necessary. - if self.wdaymask[i] == self.rrule._wkst: - break - return set, start, i - - def ddayset(self, year, month, day): - set = [None]*self.yearlen - i = datetime.date(year, month, day).toordinal()-self.yearordinal - set[i] = i - return set, i, i+1 - - def htimeset(self, hour, minute, second): - set = [] - rr = self.rrule - for minute in rr._byminute: - for second in rr._bysecond: - set.append(datetime.time(hour, minute, second, - tzinfo=rr._tzinfo)) - set.sort() - return set - - def mtimeset(self, hour, minute, second): - set = [] - rr = self.rrule - for second in rr._bysecond: - set.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) - set.sort() - return set - - def stimeset(self, hour, minute, second): - return (datetime.time(hour, minute, second, - tzinfo=self.rrule._tzinfo),) - - -class rruleset(rrulebase): - - class _genitem(object): - def __init__(self, genlist, gen): - try: - self.dt = advance_iterator(gen) - genlist.append(self) - except StopIteration: - pass - self.genlist = genlist - self.gen = gen - - def __next__(self): - try: - self.dt = advance_iterator(self.gen) - except StopIteration: - self.genlist.remove(self) - - next = __next__ - - def __lt__(self, other): - return self.dt < other.dt - - def __gt__(self, other): - return self.dt > other.dt - - def __eq__(self, other): - return self.dt == other.dt - - def __ne__(self, other): - return self.dt != other.dt - - def __init__(self, cache=False): - super(rruleset, self).__init__(cache) - self._rrule = [] - self._rdate = [] - self._exrule = [] - self._exdate = [] - - def rrule(self, rrule): - self._rrule.append(rrule) - - def rdate(self, rdate): - self._rdate.append(rdate) - - def exrule(self, exrule): - self._exrule.append(exrule) - - def exdate(self, exdate): - self._exdate.append(exdate) - - def _iter(self): - rlist = [] - self._rdate.sort() - self._genitem(rlist, iter(self._rdate)) - for gen in [iter(x) for x in self._rrule]: - self._genitem(rlist, gen) - rlist.sort() - exlist = [] - self._exdate.sort() - self._genitem(exlist, iter(self._exdate)) - for gen in [iter(x) for x in self._exrule]: - self._genitem(exlist, gen) - exlist.sort() - lastdt = None - total = 0 - while rlist: - ritem = rlist[0] - if not lastdt or lastdt != ritem.dt: - while exlist and exlist[0] < ritem: - advance_iterator(exlist[0]) - exlist.sort() - if not exlist or ritem != exlist[0]: - total += 1 - yield ritem.dt - lastdt = ritem.dt - advance_iterator(ritem) - rlist.sort() - self._len = total - -class _rrulestr(object): - - _freq_map = {"YEARLY": YEARLY, - "MONTHLY": MONTHLY, - "WEEKLY": WEEKLY, - "DAILY": DAILY, - "HOURLY": HOURLY, - "MINUTELY": MINUTELY, - "SECONDLY": SECONDLY} - - _weekday_map = {"MO":0,"TU":1,"WE":2,"TH":3,"FR":4,"SA":5,"SU":6} - - def _handle_int(self, rrkwargs, name, value, **kwargs): - rrkwargs[name.lower()] = int(value) - - def _handle_int_list(self, rrkwargs, name, value, **kwargs): - rrkwargs[name.lower()] = [int(x) for x in value.split(',')] - - _handle_INTERVAL = _handle_int - _handle_COUNT = _handle_int - _handle_BYSETPOS = _handle_int_list - _handle_BYMONTH = _handle_int_list - _handle_BYMONTHDAY = _handle_int_list - _handle_BYYEARDAY = _handle_int_list - _handle_BYEASTER = _handle_int_list - _handle_BYWEEKNO = _handle_int_list - _handle_BYHOUR = _handle_int_list - _handle_BYMINUTE = _handle_int_list - _handle_BYSECOND = _handle_int_list - - def _handle_FREQ(self, rrkwargs, name, value, **kwargs): - rrkwargs["freq"] = self._freq_map[value] - - def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): - global parser - if not parser: - from dateutil import parser - try: - rrkwargs["until"] = parser.parse(value, - ignoretz=kwargs.get("ignoretz"), - tzinfos=kwargs.get("tzinfos")) - except ValueError: - raise ValueError("invalid until date") - - def _handle_WKST(self, rrkwargs, name, value, **kwargs): - rrkwargs["wkst"] = self._weekday_map[value] - - def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwarsg): - l = [] - for wday in value.split(','): - for i in range(len(wday)): - if wday[i] not in '+-0123456789': - break - n = wday[:i] or None - w = wday[i:] - if n: n = int(n) - l.append(weekdays[self._weekday_map[w]](n)) - rrkwargs["byweekday"] = l - - _handle_BYDAY = _handle_BYWEEKDAY - - def _parse_rfc_rrule(self, line, - dtstart=None, - cache=False, - ignoretz=False, - tzinfos=None): - if line.find(':') != -1: - name, value = line.split(':') - if name != "RRULE": - raise ValueError("unknown parameter name") - else: - value = line - rrkwargs = {} - for pair in value.split(';'): - name, value = pair.split('=') - name = name.upper() - value = value.upper() - try: - getattr(self, "_handle_"+name)(rrkwargs, name, value, - ignoretz=ignoretz, - tzinfos=tzinfos) - except AttributeError: - raise ValueError("unknown parameter '%s'" % name) - except (KeyError, ValueError): - raise ValueError("invalid '%s': %s" % (name, value)) - return rrule(dtstart=dtstart, cache=cache, **rrkwargs) - - def _parse_rfc(self, s, - dtstart=None, - cache=False, - unfold=False, - forceset=False, - compatible=False, - ignoretz=False, - tzinfos=None): - global parser - if compatible: - forceset = True - unfold = True - s = s.upper() - if not s.strip(): - raise ValueError("empty string") - if unfold: - lines = s.splitlines() - i = 0 - while i < len(lines): - line = lines[i].rstrip() - if not line: - del lines[i] - elif i > 0 and line[0] == " ": - lines[i-1] += line[1:] - del lines[i] - else: - i += 1 - else: - lines = s.split() - if (not forceset and len(lines) == 1 and - (s.find(':') == -1 or s.startswith('RRULE:'))): - return self._parse_rfc_rrule(lines[0], cache=cache, - dtstart=dtstart, ignoretz=ignoretz, - tzinfos=tzinfos) - else: - rrulevals = [] - rdatevals = [] - exrulevals = [] - exdatevals = [] - for line in lines: - if not line: - continue - if line.find(':') == -1: - name = "RRULE" - value = line - else: - name, value = line.split(':', 1) - parms = name.split(';') - if not parms: - raise ValueError("empty property name") - name = parms[0] - parms = parms[1:] - if name == "RRULE": - for parm in parms: - raise ValueError("unsupported RRULE parm: "+parm) - rrulevals.append(value) - elif name == "RDATE": - for parm in parms: - if parm != "VALUE=DATE-TIME": - raise ValueError("unsupported RDATE parm: "+parm) - rdatevals.append(value) - elif name == "EXRULE": - for parm in parms: - raise ValueError("unsupported EXRULE parm: "+parm) - exrulevals.append(value) - elif name == "EXDATE": - for parm in parms: - if parm != "VALUE=DATE-TIME": - raise ValueError("unsupported RDATE parm: "+parm) - exdatevals.append(value) - elif name == "DTSTART": - for parm in parms: - raise ValueError("unsupported DTSTART parm: "+parm) - if not parser: - from dateutil import parser - dtstart = parser.parse(value, ignoretz=ignoretz, - tzinfos=tzinfos) - else: - raise ValueError("unsupported property: "+name) - if (forceset or len(rrulevals) > 1 or - rdatevals or exrulevals or exdatevals): - if not parser and (rdatevals or exdatevals): - from dateutil import parser - set = rruleset(cache=cache) - for value in rrulevals: - set.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, - ignoretz=ignoretz, - tzinfos=tzinfos)) - for value in rdatevals: - for datestr in value.split(','): - set.rdate(parser.parse(datestr, - ignoretz=ignoretz, - tzinfos=tzinfos)) - for value in exrulevals: - set.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, - ignoretz=ignoretz, - tzinfos=tzinfos)) - for value in exdatevals: - for datestr in value.split(','): - set.exdate(parser.parse(datestr, - ignoretz=ignoretz, - tzinfos=tzinfos)) - if compatible and dtstart: - set.rdate(dtstart) - return set - else: - return self._parse_rfc_rrule(rrulevals[0], - dtstart=dtstart, - cache=cache, - ignoretz=ignoretz, - tzinfos=tzinfos) - - def __call__(self, s, **kwargs): - return self._parse_rfc(s, **kwargs) - -rrulestr = _rrulestr() - -# vim:ts=4:sw=4:et diff --git a/lib/dateutil_py3/tz.py b/lib/dateutil_py3/tz.py deleted file mode 100644 index e849fc24b5e2..000000000000 --- a/lib/dateutil_py3/tz.py +++ /dev/null @@ -1,960 +0,0 @@ -""" -Copyright (c) 2003-2007 Gustavo Niemeyer - -This module offers extensions to the standard Python -datetime module. -""" -__license__ = "Simplified BSD" - -from six import string_types, PY3 - -import datetime -import struct -import time -import sys -import os - -relativedelta = None -parser = None -rrule = None - -__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", - "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"] - -try: - from dateutil.tzwin import tzwin, tzwinlocal -except (ImportError, OSError): - tzwin, tzwinlocal = None, None - -def tzname_in_python2(myfunc): - """Change unicode output into bytestrings in Python 2 - - tzname() API changed in Python 3. It used to return bytes, but was changed - to unicode strings - """ - def inner_func(*args, **kwargs): - if PY3: - return myfunc(*args, **kwargs) - else: - return myfunc(*args, **kwargs).encode() - return inner_func - -ZERO = datetime.timedelta(0) -EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal() - -class tzutc(datetime.tzinfo): - - def utcoffset(self, dt): - return ZERO - - def dst(self, dt): - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - return "UTC" - - def __eq__(self, other): - return (isinstance(other, tzutc) or - (isinstance(other, tzoffset) and other._offset == ZERO)) - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return "%s()" % self.__class__.__name__ - - __reduce__ = object.__reduce__ - -class tzoffset(datetime.tzinfo): - - def __init__(self, name, offset): - self._name = name - self._offset = datetime.timedelta(seconds=offset) - - def utcoffset(self, dt): - return self._offset - - def dst(self, dt): - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - return self._name - - def __eq__(self, other): - return (isinstance(other, tzoffset) and - self._offset == other._offset) - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return "%s(%s, %s)" % (self.__class__.__name__, - repr(self._name), - self._offset.days*86400+self._offset.seconds) - - __reduce__ = object.__reduce__ - -class tzlocal(datetime.tzinfo): - - _std_offset = datetime.timedelta(seconds=-time.timezone) - if time.daylight: - _dst_offset = datetime.timedelta(seconds=-time.altzone) - else: - _dst_offset = _std_offset - - def utcoffset(self, dt): - if self._isdst(dt): - return self._dst_offset - else: - return self._std_offset - - def dst(self, dt): - if self._isdst(dt): - return self._dst_offset-self._std_offset - else: - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - return time.tzname[self._isdst(dt)] - - def _isdst(self, dt): - # We can't use mktime here. It is unstable when deciding if - # the hour near to a change is DST or not. - # - # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour, - # dt.minute, dt.second, dt.weekday(), 0, -1)) - # return time.localtime(timestamp).tm_isdst - # - # The code above yields the following result: - # - #>>> import tz, datetime - #>>> t = tz.tzlocal() - #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() - #'BRDT' - #>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname() - #'BRST' - #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() - #'BRST' - #>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname() - #'BRDT' - #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() - #'BRDT' - # - # Here is a more stable implementation: - # - timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400 - + dt.hour * 3600 - + dt.minute * 60 - + dt.second) - return time.localtime(timestamp+time.timezone).tm_isdst - - def __eq__(self, other): - if not isinstance(other, tzlocal): - return False - return (self._std_offset == other._std_offset and - self._dst_offset == other._dst_offset) - return True - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return "%s()" % self.__class__.__name__ - - __reduce__ = object.__reduce__ - -class _ttinfo(object): - __slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"] - - def __init__(self): - for attr in self.__slots__: - setattr(self, attr, None) - - def __repr__(self): - l = [] - for attr in self.__slots__: - value = getattr(self, attr) - if value is not None: - l.append("%s=%s" % (attr, repr(value))) - return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) - - def __eq__(self, other): - if not isinstance(other, _ttinfo): - return False - return (self.offset == other.offset and - self.delta == other.delta and - self.isdst == other.isdst and - self.abbr == other.abbr and - self.isstd == other.isstd and - self.isgmt == other.isgmt) - - def __ne__(self, other): - return not self.__eq__(other) - - def __getstate__(self): - state = {} - for name in self.__slots__: - state[name] = getattr(self, name, None) - return state - - def __setstate__(self, state): - for name in self.__slots__: - if name in state: - setattr(self, name, state[name]) - -class tzfile(datetime.tzinfo): - - # http://www.twinsun.com/tz/tz-link.htm - # ftp://ftp.iana.org/tz/tz*.tar.gz - - def __init__(self, fileobj): - if isinstance(fileobj, string_types): - self._filename = fileobj - fileobj = open(fileobj, 'rb') - elif hasattr(fileobj, "name"): - self._filename = fileobj.name - else: - self._filename = repr(fileobj) - - # From tzfile(5): - # - # The time zone information files used by tzset(3) - # begin with the magic characters "TZif" to identify - # them as time zone information files, followed by - # sixteen bytes reserved for future use, followed by - # six four-byte values of type long, written in a - # ``standard'' byte order (the high-order byte - # of the value is written first). - - if fileobj.read(4).decode() != "TZif": - raise ValueError("magic not found") - - fileobj.read(16) - - ( - # The number of UTC/local indicators stored in the file. - ttisgmtcnt, - - # The number of standard/wall indicators stored in the file. - ttisstdcnt, - - # The number of leap seconds for which data is - # stored in the file. - leapcnt, - - # The number of "transition times" for which data - # is stored in the file. - timecnt, - - # The number of "local time types" for which data - # is stored in the file (must not be zero). - typecnt, - - # The number of characters of "time zone - # abbreviation strings" stored in the file. - charcnt, - - ) = struct.unpack(">6l", fileobj.read(24)) - - # The above header is followed by tzh_timecnt four-byte - # values of type long, sorted in ascending order. - # These values are written in ``standard'' byte order. - # Each is used as a transition time (as returned by - # time(2)) at which the rules for computing local time - # change. - - if timecnt: - self._trans_list = struct.unpack(">%dl" % timecnt, - fileobj.read(timecnt*4)) - else: - self._trans_list = [] - - # Next come tzh_timecnt one-byte values of type unsigned - # char; each one tells which of the different types of - # ``local time'' types described in the file is associated - # with the same-indexed transition time. These values - # serve as indices into an array of ttinfo structures that - # appears next in the file. - - if timecnt: - self._trans_idx = struct.unpack(">%dB" % timecnt, - fileobj.read(timecnt)) - else: - self._trans_idx = [] - - # Each ttinfo structure is written as a four-byte value - # for tt_gmtoff of type long, in a standard byte - # order, followed by a one-byte value for tt_isdst - # and a one-byte value for tt_abbrind. In each - # structure, tt_gmtoff gives the number of - # seconds to be added to UTC, tt_isdst tells whether - # tm_isdst should be set by localtime(3), and - # tt_abbrind serves as an index into the array of - # time zone abbreviation characters that follow the - # ttinfo structure(s) in the file. - - ttinfo = [] - - for i in range(typecnt): - ttinfo.append(struct.unpack(">lbb", fileobj.read(6))) - - abbr = fileobj.read(charcnt).decode() - - # Then there are tzh_leapcnt pairs of four-byte - # values, written in standard byte order; the - # first value of each pair gives the time (as - # returned by time(2)) at which a leap second - # occurs; the second gives the total number of - # leap seconds to be applied after the given time. - # The pairs of values are sorted in ascending order - # by time. - - # Not used, for now - if leapcnt: - leap = struct.unpack(">%dl" % (leapcnt*2), - fileobj.read(leapcnt*8)) - - # Then there are tzh_ttisstdcnt standard/wall - # indicators, each stored as a one-byte value; - # they tell whether the transition times associated - # with local time types were specified as standard - # time or wall clock time, and are used when - # a time zone file is used in handling POSIX-style - # time zone environment variables. - - if ttisstdcnt: - isstd = struct.unpack(">%db" % ttisstdcnt, - fileobj.read(ttisstdcnt)) - - # Finally, there are tzh_ttisgmtcnt UTC/local - # indicators, each stored as a one-byte value; - # they tell whether the transition times associated - # with local time types were specified as UTC or - # local time, and are used when a time zone file - # is used in handling POSIX-style time zone envi- - # ronment variables. - - if ttisgmtcnt: - isgmt = struct.unpack(">%db" % ttisgmtcnt, - fileobj.read(ttisgmtcnt)) - - # ** Everything has been read ** - - # Build ttinfo list - self._ttinfo_list = [] - for i in range(typecnt): - gmtoff, isdst, abbrind = ttinfo[i] - # Round to full-minutes if that's not the case. Python's - # datetime doesn't accept sub-minute timezones. Check - # http://python.org/sf/1447945 for some information. - gmtoff = (gmtoff+30)//60*60 - tti = _ttinfo() - tti.offset = gmtoff - tti.delta = datetime.timedelta(seconds=gmtoff) - tti.isdst = isdst - tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)] - tti.isstd = (ttisstdcnt > i and isstd[i] != 0) - tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0) - self._ttinfo_list.append(tti) - - # Replace ttinfo indexes for ttinfo objects. - trans_idx = [] - for idx in self._trans_idx: - trans_idx.append(self._ttinfo_list[idx]) - self._trans_idx = tuple(trans_idx) - - # Set standard, dst, and before ttinfos. before will be - # used when a given time is before any transitions, - # and will be set to the first non-dst ttinfo, or to - # the first dst, if all of them are dst. - self._ttinfo_std = None - self._ttinfo_dst = None - self._ttinfo_before = None - if self._ttinfo_list: - if not self._trans_list: - self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0] - else: - for i in range(timecnt-1, -1, -1): - tti = self._trans_idx[i] - if not self._ttinfo_std and not tti.isdst: - self._ttinfo_std = tti - elif not self._ttinfo_dst and tti.isdst: - self._ttinfo_dst = tti - if self._ttinfo_std and self._ttinfo_dst: - break - else: - if self._ttinfo_dst and not self._ttinfo_std: - self._ttinfo_std = self._ttinfo_dst - - for tti in self._ttinfo_list: - if not tti.isdst: - self._ttinfo_before = tti - break - else: - self._ttinfo_before = self._ttinfo_list[0] - - # Now fix transition times to become relative to wall time. - # - # I'm not sure about this. In my tests, the tz source file - # is setup to wall time, and in the binary file isstd and - # isgmt are off, so it should be in wall time. OTOH, it's - # always in gmt time. Let me know if you have comments - # about this. - laststdoffset = 0 - self._trans_list = list(self._trans_list) - for i in range(len(self._trans_list)): - tti = self._trans_idx[i] - if not tti.isdst: - # This is std time. - self._trans_list[i] += tti.offset - laststdoffset = tti.offset - else: - # This is dst time. Convert to std. - self._trans_list[i] += laststdoffset - self._trans_list = tuple(self._trans_list) - - def _find_ttinfo(self, dt, laststd=0): - timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400 - + dt.hour * 3600 - + dt.minute * 60 - + dt.second) - idx = 0 - for trans in self._trans_list: - if timestamp < trans: - break - idx += 1 - else: - return self._ttinfo_std - if idx == 0: - return self._ttinfo_before - if laststd: - while idx > 0: - tti = self._trans_idx[idx-1] - if not tti.isdst: - return tti - idx -= 1 - else: - return self._ttinfo_std - else: - return self._trans_idx[idx-1] - - def utcoffset(self, dt): - if not self._ttinfo_std: - return ZERO - return self._find_ttinfo(dt).delta - - def dst(self, dt): - if not self._ttinfo_dst: - return ZERO - tti = self._find_ttinfo(dt) - if not tti.isdst: - return ZERO - - # The documentation says that utcoffset()-dst() must - # be constant for every dt. - return tti.delta-self._find_ttinfo(dt, laststd=1).delta - - # An alternative for that would be: - # - # return self._ttinfo_dst.offset-self._ttinfo_std.offset - # - # However, this class stores historical changes in the - # dst offset, so I belive that this wouldn't be the right - # way to implement this. - - @tzname_in_python2 - def tzname(self, dt): - if not self._ttinfo_std: - return None - return self._find_ttinfo(dt).abbr - - def __eq__(self, other): - if not isinstance(other, tzfile): - return False - return (self._trans_list == other._trans_list and - self._trans_idx == other._trans_idx and - self._ttinfo_list == other._ttinfo_list) - - def __ne__(self, other): - return not self.__eq__(other) - - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, repr(self._filename)) - - def __reduce__(self): - if not os.path.isfile(self._filename): - raise ValueError("Unpickable %s class" % self.__class__.__name__) - return (self.__class__, (self._filename,)) - -class tzrange(datetime.tzinfo): - - def __init__(self, stdabbr, stdoffset=None, - dstabbr=None, dstoffset=None, - start=None, end=None): - global relativedelta - if not relativedelta: - from dateutil import relativedelta - self._std_abbr = stdabbr - self._dst_abbr = dstabbr - if stdoffset is not None: - self._std_offset = datetime.timedelta(seconds=stdoffset) - else: - self._std_offset = ZERO - if dstoffset is not None: - self._dst_offset = datetime.timedelta(seconds=dstoffset) - elif dstabbr and stdoffset is not None: - self._dst_offset = self._std_offset+datetime.timedelta(hours=+1) - else: - self._dst_offset = ZERO - if dstabbr and start is None: - self._start_delta = relativedelta.relativedelta( - hours=+2, month=4, day=1, weekday=relativedelta.SU(+1)) - else: - self._start_delta = start - if dstabbr and end is None: - self._end_delta = relativedelta.relativedelta( - hours=+1, month=10, day=31, weekday=relativedelta.SU(-1)) - else: - self._end_delta = end - - def utcoffset(self, dt): - if self._isdst(dt): - return self._dst_offset - else: - return self._std_offset - - def dst(self, dt): - if self._isdst(dt): - return self._dst_offset-self._std_offset - else: - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - if self._isdst(dt): - return self._dst_abbr - else: - return self._std_abbr - - def _isdst(self, dt): - if not self._start_delta: - return False - year = datetime.datetime(dt.year, 1, 1) - start = year+self._start_delta - end = year+self._end_delta - dt = dt.replace(tzinfo=None) - if start < end: - return dt >= start and dt < end - else: - return dt >= start or dt < end - - def __eq__(self, other): - if not isinstance(other, tzrange): - return False - return (self._std_abbr == other._std_abbr and - self._dst_abbr == other._dst_abbr and - self._std_offset == other._std_offset and - self._dst_offset == other._dst_offset and - self._start_delta == other._start_delta and - self._end_delta == other._end_delta) - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return "%s(...)" % self.__class__.__name__ - - __reduce__ = object.__reduce__ - -class tzstr(tzrange): - - def __init__(self, s): - global parser - if not parser: - from dateutil import parser - self._s = s - - res = parser._parsetz(s) - if res is None: - raise ValueError("unknown string format") - - # Here we break the compatibility with the TZ variable handling. - # GMT-3 actually *means* the timezone -3. - if res.stdabbr in ("GMT", "UTC"): - res.stdoffset *= -1 - - # We must initialize it first, since _delta() needs - # _std_offset and _dst_offset set. Use False in start/end - # to avoid building it two times. - tzrange.__init__(self, res.stdabbr, res.stdoffset, - res.dstabbr, res.dstoffset, - start=False, end=False) - - if not res.dstabbr: - self._start_delta = None - self._end_delta = None - else: - self._start_delta = self._delta(res.start) - if self._start_delta: - self._end_delta = self._delta(res.end, isend=1) - - def _delta(self, x, isend=0): - kwargs = {} - if x.month is not None: - kwargs["month"] = x.month - if x.weekday is not None: - kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week) - if x.week > 0: - kwargs["day"] = 1 - else: - kwargs["day"] = 31 - elif x.day: - kwargs["day"] = x.day - elif x.yday is not None: - kwargs["yearday"] = x.yday - elif x.jyday is not None: - kwargs["nlyearday"] = x.jyday - if not kwargs: - # Default is to start on first sunday of april, and end - # on last sunday of october. - if not isend: - kwargs["month"] = 4 - kwargs["day"] = 1 - kwargs["weekday"] = relativedelta.SU(+1) - else: - kwargs["month"] = 10 - kwargs["day"] = 31 - kwargs["weekday"] = relativedelta.SU(-1) - if x.time is not None: - kwargs["seconds"] = x.time - else: - # Default is 2AM. - kwargs["seconds"] = 7200 - if isend: - # Convert to standard time, to follow the documented way - # of working with the extra hour. See the documentation - # of the tzinfo class. - delta = self._dst_offset-self._std_offset - kwargs["seconds"] -= delta.seconds+delta.days*86400 - return relativedelta.relativedelta(**kwargs) - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, repr(self._s)) - -class _tzicalvtzcomp(object): - def __init__(self, tzoffsetfrom, tzoffsetto, isdst, - tzname=None, rrule=None): - self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom) - self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto) - self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom - self.isdst = isdst - self.tzname = tzname - self.rrule = rrule - -class _tzicalvtz(datetime.tzinfo): - def __init__(self, tzid, comps=[]): - self._tzid = tzid - self._comps = comps - self._cachedate = [] - self._cachecomp = [] - - def _find_comp(self, dt): - if len(self._comps) == 1: - return self._comps[0] - dt = dt.replace(tzinfo=None) - try: - return self._cachecomp[self._cachedate.index(dt)] - except ValueError: - pass - lastcomp = None - lastcompdt = None - for comp in self._comps: - if not comp.isdst: - # Handle the extra hour in DST -> STD - compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True) - else: - compdt = comp.rrule.before(dt, inc=True) - if compdt and (not lastcompdt or lastcompdt < compdt): - lastcompdt = compdt - lastcomp = comp - if not lastcomp: - # RFC says nothing about what to do when a given - # time is before the first onset date. We'll look for the - # first standard component, or the first component, if - # none is found. - for comp in self._comps: - if not comp.isdst: - lastcomp = comp - break - else: - lastcomp = comp[0] - self._cachedate.insert(0, dt) - self._cachecomp.insert(0, lastcomp) - if len(self._cachedate) > 10: - self._cachedate.pop() - self._cachecomp.pop() - return lastcomp - - def utcoffset(self, dt): - return self._find_comp(dt).tzoffsetto - - def dst(self, dt): - comp = self._find_comp(dt) - if comp.isdst: - return comp.tzoffsetdiff - else: - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - return self._find_comp(dt).tzname - - def __repr__(self): - return "" % repr(self._tzid) - - __reduce__ = object.__reduce__ - -class tzical(object): - def __init__(self, fileobj): - global rrule - if not rrule: - from dateutil import rrule - - if isinstance(fileobj, string_types): - self._s = fileobj - fileobj = open(fileobj, 'r') # ical should be encoded in UTF-8 with CRLF - elif hasattr(fileobj, "name"): - self._s = fileobj.name - else: - self._s = repr(fileobj) - - self._vtz = {} - - self._parse_rfc(fileobj.read()) - - def keys(self): - return list(self._vtz.keys()) - - def get(self, tzid=None): - if tzid is None: - keys = list(self._vtz.keys()) - if len(keys) == 0: - raise ValueError("no timezones defined") - elif len(keys) > 1: - raise ValueError("more than one timezone available") - tzid = keys[0] - return self._vtz.get(tzid) - - def _parse_offset(self, s): - s = s.strip() - if not s: - raise ValueError("empty offset") - if s[0] in ('+', '-'): - signal = (-1, +1)[s[0]=='+'] - s = s[1:] - else: - signal = +1 - if len(s) == 4: - return (int(s[:2])*3600+int(s[2:])*60)*signal - elif len(s) == 6: - return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal - else: - raise ValueError("invalid offset: "+s) - - def _parse_rfc(self, s): - lines = s.splitlines() - if not lines: - raise ValueError("empty string") - - # Unfold - i = 0 - while i < len(lines): - line = lines[i].rstrip() - if not line: - del lines[i] - elif i > 0 and line[0] == " ": - lines[i-1] += line[1:] - del lines[i] - else: - i += 1 - - tzid = None - comps = [] - invtz = False - comptype = None - for line in lines: - if not line: - continue - name, value = line.split(':', 1) - parms = name.split(';') - if not parms: - raise ValueError("empty property name") - name = parms[0].upper() - parms = parms[1:] - if invtz: - if name == "BEGIN": - if value in ("STANDARD", "DAYLIGHT"): - # Process component - pass - else: - raise ValueError("unknown component: "+value) - comptype = value - founddtstart = False - tzoffsetfrom = None - tzoffsetto = None - rrulelines = [] - tzname = None - elif name == "END": - if value == "VTIMEZONE": - if comptype: - raise ValueError("component not closed: "+comptype) - if not tzid: - raise ValueError("mandatory TZID not found") - if not comps: - raise ValueError("at least one component is needed") - # Process vtimezone - self._vtz[tzid] = _tzicalvtz(tzid, comps) - invtz = False - elif value == comptype: - if not founddtstart: - raise ValueError("mandatory DTSTART not found") - if tzoffsetfrom is None: - raise ValueError("mandatory TZOFFSETFROM not found") - if tzoffsetto is None: - raise ValueError("mandatory TZOFFSETFROM not found") - # Process component - rr = None - if rrulelines: - rr = rrule.rrulestr("\n".join(rrulelines), - compatible=True, - ignoretz=True, - cache=True) - comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto, - (comptype == "DAYLIGHT"), - tzname, rr) - comps.append(comp) - comptype = None - else: - raise ValueError("invalid component end: "+value) - elif comptype: - if name == "DTSTART": - rrulelines.append(line) - founddtstart = True - elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"): - rrulelines.append(line) - elif name == "TZOFFSETFROM": - if parms: - raise ValueError("unsupported %s parm: %s "%(name, parms[0])) - tzoffsetfrom = self._parse_offset(value) - elif name == "TZOFFSETTO": - if parms: - raise ValueError("unsupported TZOFFSETTO parm: "+parms[0]) - tzoffsetto = self._parse_offset(value) - elif name == "TZNAME": - if parms: - raise ValueError("unsupported TZNAME parm: "+parms[0]) - tzname = value - elif name == "COMMENT": - pass - else: - raise ValueError("unsupported property: "+name) - else: - if name == "TZID": - if parms: - raise ValueError("unsupported TZID parm: "+parms[0]) - tzid = value - elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"): - pass - else: - raise ValueError("unsupported property: "+name) - elif name == "BEGIN" and value == "VTIMEZONE": - tzid = None - comps = [] - invtz = True - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, repr(self._s)) - -if sys.platform != "win32": - TZFILES = ["/etc/localtime", "localtime"] - TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"] -else: - TZFILES = [] - TZPATHS = [] - -def gettz(name=None): - tz = None - if not name: - try: - name = os.environ["TZ"] - except KeyError: - pass - if name is None or name == ":": - for filepath in TZFILES: - if not os.path.isabs(filepath): - filename = filepath - for path in TZPATHS: - filepath = os.path.join(path, filename) - if os.path.isfile(filepath): - break - else: - continue - if os.path.isfile(filepath): - try: - tz = tzfile(filepath) - break - except (IOError, OSError, ValueError): - pass - else: - tz = tzlocal() - else: - if name.startswith(":"): - name = name[:-1] - if os.path.isabs(name): - if os.path.isfile(name): - tz = tzfile(name) - else: - tz = None - else: - for path in TZPATHS: - filepath = os.path.join(path, name) - if not os.path.isfile(filepath): - filepath = filepath.replace(' ', '_') - if not os.path.isfile(filepath): - continue - try: - tz = tzfile(filepath) - break - except (IOError, OSError, ValueError): - pass - else: - tz = None - if tzwin: - try: - tz = tzwin(name) - except OSError: - pass - if not tz: - from dateutil.zoneinfo import gettz - tz = gettz(name) - if not tz: - for c in name: - # name must have at least one offset to be a tzstr - if c in "0123456789": - try: - tz = tzstr(name) - except ValueError: - pass - break - else: - if name in ("GMT", "UTC"): - tz = tzutc() - elif name in time.tzname: - tz = tzlocal() - return tz - -# vim:ts=4:sw=4:et diff --git a/lib/dateutil_py3/tzwin.py b/lib/dateutil_py3/tzwin.py deleted file mode 100644 index 041c6cc3d645..000000000000 --- a/lib/dateutil_py3/tzwin.py +++ /dev/null @@ -1,179 +0,0 @@ -# This code was originally contributed by Jeffrey Harris. -import datetime -import struct -import winreg - - -__all__ = ["tzwin", "tzwinlocal"] - -ONEWEEK = datetime.timedelta(7) - -TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" -TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" -TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" - -def _settzkeyname(): - global TZKEYNAME - handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) - try: - winreg.OpenKey(handle, TZKEYNAMENT).Close() - TZKEYNAME = TZKEYNAMENT - except WindowsError: - TZKEYNAME = TZKEYNAME9X - handle.Close() - -_settzkeyname() - -class tzwinbase(datetime.tzinfo): - """tzinfo class based on win32's timezones available in the registry.""" - - def utcoffset(self, dt): - if self._isdst(dt): - return datetime.timedelta(minutes=self._dstoffset) - else: - return datetime.timedelta(minutes=self._stdoffset) - - def dst(self, dt): - if self._isdst(dt): - minutes = self._dstoffset - self._stdoffset - return datetime.timedelta(minutes=minutes) - else: - return datetime.timedelta(0) - - def tzname(self, dt): - if self._isdst(dt): - return self._dstname - else: - return self._stdname - - def list(): - """Return a list of all time zones known to the system.""" - handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) - tzkey = winreg.OpenKey(handle, TZKEYNAME) - result = [winreg.EnumKey(tzkey, i) - for i in range(winreg.QueryInfoKey(tzkey)[0])] - tzkey.Close() - handle.Close() - return result - list = staticmethod(list) - - def display(self): - return self._display - - def _isdst(self, dt): - dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek, - self._dsthour, self._dstminute, - self._dstweeknumber) - dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek, - self._stdhour, self._stdminute, - self._stdweeknumber) - if dston < dstoff: - return dston <= dt.replace(tzinfo=None) < dstoff - else: - return not dstoff <= dt.replace(tzinfo=None) < dston - - -class tzwin(tzwinbase): - - def __init__(self, name): - self._name = name - - handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) - tzkey = winreg.OpenKey(handle, "%s\%s" % (TZKEYNAME, name)) - keydict = valuestodict(tzkey) - tzkey.Close() - handle.Close() - - self._stdname = keydict["Std"].encode("iso-8859-1") - self._dstname = keydict["Dlt"].encode("iso-8859-1") - - self._display = keydict["Display"] - - # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm - tup = struct.unpack("=3l16h", keydict["TZI"]) - self._stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 - self._dstoffset = self._stdoffset-tup[2] # + DaylightBias * -1 - - (self._stdmonth, - self._stddayofweek, # Sunday = 0 - self._stdweeknumber, # Last = 5 - self._stdhour, - self._stdminute) = tup[4:9] - - (self._dstmonth, - self._dstdayofweek, # Sunday = 0 - self._dstweeknumber, # Last = 5 - self._dsthour, - self._dstminute) = tup[12:17] - - def __repr__(self): - return "tzwin(%s)" % repr(self._name) - - def __reduce__(self): - return (self.__class__, (self._name,)) - - -class tzwinlocal(tzwinbase): - - def __init__(self): - - handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) - - tzlocalkey = winreg.OpenKey(handle, TZLOCALKEYNAME) - keydict = valuestodict(tzlocalkey) - tzlocalkey.Close() - - self._stdname = keydict["StandardName"].encode("iso-8859-1") - self._dstname = keydict["DaylightName"].encode("iso-8859-1") - - try: - tzkey = winreg.OpenKey(handle, "%s\%s"%(TZKEYNAME, self._stdname)) - _keydict = valuestodict(tzkey) - self._display = _keydict["Display"] - tzkey.Close() - except OSError: - self._display = None - - handle.Close() - - self._stdoffset = -keydict["Bias"]-keydict["StandardBias"] - self._dstoffset = self._stdoffset-keydict["DaylightBias"] - - - # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm - tup = struct.unpack("=8h", keydict["StandardStart"]) - - (self._stdmonth, - self._stddayofweek, # Sunday = 0 - self._stdweeknumber, # Last = 5 - self._stdhour, - self._stdminute) = tup[1:6] - - tup = struct.unpack("=8h", keydict["DaylightStart"]) - - (self._dstmonth, - self._dstdayofweek, # Sunday = 0 - self._dstweeknumber, # Last = 5 - self._dsthour, - self._dstminute) = tup[1:6] - - def __reduce__(self): - return (self.__class__, ()) - -def picknthweekday(year, month, dayofweek, hour, minute, whichweek): - """dayofweek == 0 means Sunday, whichweek 5 means last instance""" - first = datetime.datetime(year, month, 1, hour, minute) - weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1)) - for n in range(whichweek): - dt = weekdayone+(whichweek-n)*ONEWEEK - if dt.month == month: - return dt - -def valuestodict(key): - """Convert a registry key's values to a dictionary.""" - dict = {} - size = winreg.QueryInfoKey(key)[1] - for i in range(size): - data = winreg.EnumValue(key, i) - dict[data[0]] = data[1] - return dict diff --git a/lib/dateutil_py3/zoneinfo/__init__.py b/lib/dateutil_py3/zoneinfo/__init__.py deleted file mode 100644 index a1b34874baa3..000000000000 --- a/lib/dateutil_py3/zoneinfo/__init__.py +++ /dev/null @@ -1,90 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Copyright (c) 2003-2005 Gustavo Niemeyer - -This module offers extensions to the standard Python -datetime module. -""" -from dateutil.tz import tzfile -from tarfile import TarFile -import os - -__author__ = "Tomi Pieviläinen " -__license__ = "Simplified BSD" - -__all__ = ["setcachesize", "gettz", "rebuild"] - -CACHE = [] -CACHESIZE = 10 - -class tzfile(tzfile): - def __reduce__(self): - return (gettz, (self._filename,)) - -def getzoneinfofile(): - filenames = sorted(os.listdir(os.path.join(os.path.dirname(__file__)))) - filenames.reverse() - for entry in filenames: - if entry.startswith("zoneinfo") and ".tar." in entry: - return os.path.join(os.path.dirname(__file__), entry) - return None - -ZONEINFOFILE = getzoneinfofile() - -del getzoneinfofile - -def setcachesize(size): - global CACHESIZE, CACHE - CACHESIZE = size - del CACHE[size:] - -def gettz(name): - tzinfo = None - if ZONEINFOFILE: - for cachedname, tzinfo in CACHE: - if cachedname == name: - break - else: - tf = TarFile.open(ZONEINFOFILE) - try: - zonefile = tf.extractfile(name) - except KeyError: - tzinfo = None - else: - tzinfo = tzfile(zonefile) - tf.close() - CACHE.insert(0, (name, tzinfo)) - del CACHE[CACHESIZE:] - return tzinfo - -def rebuild(filename, tag=None, format="gz"): - import tempfile, shutil - tmpdir = tempfile.mkdtemp() - zonedir = os.path.join(tmpdir, "zoneinfo") - moduledir = os.path.dirname(__file__) - if tag: tag = "-"+tag - targetname = "zoneinfo%s.tar.%s" % (tag, format) - try: - tf = TarFile.open(filename) - # The "backwards" zone file contains links to other files, so must be - # processed as last - for name in sorted(tf.getnames(), - key=lambda k: k != "backward" and k or "z"): - if not (name.endswith(".sh") or - name.endswith(".tab") or - name == "leapseconds"): - tf.extract(name, tmpdir) - filepath = os.path.join(tmpdir, name) - os.system("zic -d %s %s" % (zonedir, filepath)) - tf.close() - target = os.path.join(moduledir, targetname) - for entry in os.listdir(moduledir): - if entry.startswith("zoneinfo") and ".tar." in entry: - os.unlink(os.path.join(moduledir, entry)) - tf = TarFile.open(target, "w:%s" % format) - for entry in os.listdir(zonedir): - entrypath = os.path.join(zonedir, entry) - tf.add(entrypath, entry) - tf.close() - finally: - shutil.rmtree(tmpdir) diff --git a/lib/dateutil_py3/zoneinfo/zoneinfo--latest.tar.gz b/lib/dateutil_py3/zoneinfo/zoneinfo--latest.tar.gz deleted file mode 100644 index 12eadffb098a..000000000000 Binary files a/lib/dateutil_py3/zoneinfo/zoneinfo--latest.tar.gz and /dev/null differ diff --git a/lib/dateutil_py3/zoneinfo/zoneinfo-2011d.tar.gz b/lib/dateutil_py3/zoneinfo/zoneinfo-2011d.tar.gz deleted file mode 100644 index a8c96074d8a2..000000000000 Binary files a/lib/dateutil_py3/zoneinfo/zoneinfo-2011d.tar.gz and /dev/null differ diff --git a/lib/matplotlib/__init__.py b/lib/matplotlib/__init__.py index 03bdd3f0b12d..3018250deaac 100644 --- a/lib/matplotlib/__init__.py +++ b/lib/matplotlib/__init__.py @@ -99,10 +99,27 @@ """ from __future__ import print_function +import sys + __version__ = '1.3.x' __version__numpy__ = '1.4' # minimum required numpy version -import os, re, shutil, subprocess, sys, warnings +try: + import dateutil +except ImportError: + raise ImportError("matplotlib requires dateutil") + +try: + import pyparsing +except ImportError: + raise ImportError("matplotlib requires pyparsing") +else: + if sys.version_info[0] >= 3: + if [int(x) for x in pyparsing.__version__.split('.')] <= [1, 5, 6]: + raise ImportError( + "matplotlib requires pyparsing > 1.5.6 on Python 3.x") + +import os, re, shutil, subprocess, warnings import distutils.sysconfig import distutils.version @@ -944,10 +961,10 @@ class rc_context(object): This allows one to do:: >>> with mpl.rc_context(fname='screen.rc'): - >>> plt.plot(x, a) - >>> with mpl.rc_context(fname='print.rc'): - >>> plt.plot(x, b) - >>> plt.plot(x, c) + ... plt.plot(x, a) + ... with mpl.rc_context(fname='print.rc'): + ... plt.plot(x, b) + ... plt.plot(x, c) The 'a' vs 'x' and 'c' vs 'x' plots would have settings from 'screen.rc', while the 'b' vs 'x' plot would have settings from @@ -956,7 +973,7 @@ class rc_context(object): A dictionary can also be passed to the context manager:: >>> with mpl.rc_context(rc={'text.usetex': True}, fname='screen.rc'): - >>> plt.plot(x, a) + ... plt.plot(x, a) The 'rc' dictionary takes precedence over the settings loaded from 'fname'. Passing a dictionary only is also valid. diff --git a/lib/matplotlib/artist.py b/lib/matplotlib/artist.py index 2ad7e4cbd570..39ef1f7fc5c8 100644 --- a/lib/matplotlib/artist.py +++ b/lib/matplotlib/artist.py @@ -1197,7 +1197,7 @@ def setp(obj, *args, **kwargs): with python kwargs. For example, the following are equivalent:: >>> setp(lines, 'linewidth', 2, 'color', r') # MATLAB style - + ... >>> setp(lines, linewidth=2, color='r') # python style """ diff --git a/lib/matplotlib/dates.py b/lib/matplotlib/dates.py index ccceb4699182..c22c08e730f8 100755 --- a/lib/matplotlib/dates.py +++ b/lib/matplotlib/dates.py @@ -83,7 +83,7 @@ * :class:`RRuleLocator`: locate using a :class:`matplotlib.dates.rrulewrapper`. The :class:`rrulewrapper` is a simple wrapper around a - :class:`dateutils.rrule` (`dateutil + :class:`dateutil.rrule` (`dateutil `_) which allow almost arbitrary date tick specifications. See `rrule example <../examples/pylab_examples/date_demo_rrule.html>`_. diff --git a/lib/matplotlib/fontconfig_pattern.py b/lib/matplotlib/fontconfig_pattern.py index de32d84eafca..f96198a6394c 100644 --- a/lib/matplotlib/fontconfig_pattern.py +++ b/lib/matplotlib/fontconfig_pattern.py @@ -21,12 +21,8 @@ from __future__ import print_function import re, sys -if sys.version_info[0] >= 3: - from matplotlib.pyparsing_py3 import Literal, ZeroOrMore, \ - Optional, Regex, StringEnd, ParseException, Suppress -else: - from matplotlib.pyparsing_py2 import Literal, ZeroOrMore, \ - Optional, Regex, StringEnd, ParseException, Suppress +from pyparsing import Literal, ZeroOrMore, \ + Optional, Regex, StringEnd, ParseException, Suppress family_punc = r'\\\-:,' family_unescape = re.compile(r'\\([%s])' % family_punc).sub diff --git a/lib/matplotlib/mathtext.py b/lib/matplotlib/mathtext.py index 3742aecf0e1e..9063b85eb8b6 100644 --- a/lib/matplotlib/mathtext.py +++ b/lib/matplotlib/mathtext.py @@ -34,16 +34,10 @@ from numpy import inf, isinf import numpy as np -if sys.version_info[0] >= 3: - from matplotlib.pyparsing_py3 import Combine, Group, Optional, Forward, \ - Literal, OneOrMore, ZeroOrMore, ParseException, Empty, \ - ParseResults, Suppress, oneOf, StringEnd, ParseFatalException, \ - FollowedBy, Regex, ParserElement, QuotedString, ParseBaseException -else: - from matplotlib.pyparsing_py2 import Combine, Group, Optional, Forward, \ - Literal, OneOrMore, ZeroOrMore, ParseException, Empty, \ - ParseResults, Suppress, oneOf, StringEnd, ParseFatalException, \ - FollowedBy, Regex, ParserElement, QuotedString, ParseBaseException +from pyparsing import Combine, Group, Optional, Forward, \ + Literal, OneOrMore, ZeroOrMore, ParseException, Empty, \ + ParseResults, Suppress, oneOf, StringEnd, ParseFatalException, \ + FollowedBy, Regex, ParserElement, QuotedString, ParseBaseException # Enable packrat parsing ParserElement.enablePackrat() diff --git a/lib/matplotlib/pyparsing_py2.py b/lib/matplotlib/pyparsing_py2.py deleted file mode 100644 index ac94ff331fbd..000000000000 --- a/lib/matplotlib/pyparsing_py2.py +++ /dev/null @@ -1,3791 +0,0 @@ -# module pyparsing.py -# -# Copyright (c) 2003-2010 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# -#from __future__ import generators - -__doc__ = \ -""" -pyparsing module - Classes and methods to define and execute parsing grammars - -The pyparsing module is an alternative approach to creating and executing simple grammars, -vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you -don't need to learn a new syntax for defining grammars or matching expressions - the parsing module -provides a library of classes that you use to construct the grammar directly in Python. - -Here is a program to parse "Hello, World!" (or any greeting of the form C{", !"}):: - - from pyparsing import Word, alphas - - # define grammar of a greeting - greet = Word( alphas ) + "," + Word( alphas ) + "!" - - hello = "Hello, World!" - print hello, "->", greet.parseString( hello ) - -The program outputs the following:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - -The Python representation of the grammar is quite readable, owing to the self-explanatory -class names, and the use of '+', '|' and '^' operators. - -The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an -object with named attributes. - -The pyparsing module handles some of the problems that are typically vexing when writing text parsers: - - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) - - quoted strings - - embedded comments -""" - -__version__ = "1.5.5" -__versionTime__ = "12 Aug 2010 03:56" -__author__ = "Paul McGuire " - -import string -from weakref import ref as wkref -import copy -import sys -import warnings -import re -import sre_constants -#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) - -__all__ = [ -'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', -'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', -'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', -'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', -'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', -'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase', -'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', -'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', -'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', -'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'getTokensEndLoc', 'hexnums', -'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno', -'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', -'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', -'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', -'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', -'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', -'indentedBlock', 'originalTextFor', -] - -""" -Detect if we are running version 3.X and make appropriate changes -Robert A. Clark -""" -_PY3K = sys.version_info[0] > 2 -if _PY3K: - _MAX_INT = sys.maxsize - basestring = str - unichr = chr - _ustr = str - _str2dict = set - alphas = string.ascii_lowercase + string.ascii_uppercase -else: - _MAX_INT = sys.maxint - range = xrange - - def _ustr(obj): - """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries - str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It - then < returns the unicode object | encodes it with the default encoding | ... >. - """ - if isinstance(obj,unicode): - return obj - - try: - # If this works, then _ustr(obj) has the same behaviour as str(obj), so - # it won't break any existing code. - return str(obj) - - except UnicodeEncodeError: - # The Python docs (http://docs.python.org/ref/customization.html#l2h-182) - # state that "The return value must be a string object". However, does a - # unicode object (being a subclass of basestring) count as a "string - # object"? - # If so, then return a unicode object: - return unicode(obj) - # Else encode it... but how? There are many choices... :) - # Replace unprintables with escape codes? - #return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors') - # Replace unprintables with question marks? - #return unicode(obj).encode(sys.getdefaultencoding(), 'replace') - # ... - - def _str2dict(strg): - return dict( [(c,0) for c in strg] ) - - alphas = string.lowercase + string.uppercase - -# build list of single arg builtins, tolerant of Python version, that can be used as parse actions -singleArgBuiltins = [] -import __builtin__ -for fname in "sum len enumerate sorted reversed list tuple set any all".split(): - try: - singleArgBuiltins.append(getattr(__builtin__,fname)) - except AttributeError: - continue - -def _xml_escape(data): - """Escape &, <, >, ", ', etc. in a string of data.""" - - # ampersand must be replaced first - from_symbols = '&><"\'' - to_symbols = ['&'+s+';' for s in "amp gt lt quot apos".split()] - for from_,to_ in zip(from_symbols, to_symbols): - data = data.replace(from_, to_) - return data - -class _Constants(object): - pass - -nums = string.digits -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums -_bslash = chr(92) -printables = "".join( [ c for c in string.printable if c not in string.whitespace ] ) - -class ParseBaseException(Exception): - """base exception class for all parsing runtime exceptions""" - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( self, pstr, loc=0, msg=None, elem=None ): - self.loc = loc - if msg is None: - self.msg = pstr - self.pstr = "" - else: - self.msg = msg - self.pstr = pstr - self.parserElement = elem - - def __getattr__( self, aname ): - """supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - """ - if( aname == "lineno" ): - return lineno( self.loc, self.pstr ) - elif( aname in ("col", "column") ): - return col( self.loc, self.pstr ) - elif( aname == "line" ): - return line( self.loc, self.pstr ) - else: - raise AttributeError(aname) - - def __str__( self ): - return "%s (at char %d), (line:%d, col:%d)" % \ - ( self.msg, self.loc, self.lineno, self.column ) - def __repr__( self ): - return _ustr(self) - def markInputline( self, markerString = ">!<" ): - """Extracts the exception line from the input string, and marks - the location of the exception with a special symbol. - """ - line_str = self.line - line_column = self.column - 1 - if markerString: - line_str = "".join( [line_str[:line_column], - markerString, line_str[line_column:]]) - return line_str.strip() - def __dir__(self): - return "loc msg pstr parserElement lineno col line " \ - "markInputLine __str__ __repr__".split() - -class ParseException(ParseBaseException): - """exception thrown when parse expressions don't match class; - supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - """ - pass - -class ParseFatalException(ParseBaseException): - """user-throwable exception thrown when inconsistent parse content - is found; stops all parsing immediately""" - pass - -class ParseSyntaxException(ParseFatalException): - """just like C{ParseFatalException}, but thrown internally when an - C{ErrorStop} ('-' operator) indicates that parsing is to stop immediately because - an unbacktrackable syntax error has been found""" - def __init__(self, pe): - super(ParseSyntaxException, self).__init__( - pe.pstr, pe.loc, pe.msg, pe.parserElement) - -#~ class ReparseException(ParseBaseException): - #~ """Experimental class - parse actions can raise this exception to cause - #~ pyparsing to reparse the input string: - #~ - with a modified input string, and/or - #~ - with a modified start location - #~ Set the values of the ReparseException in the constructor, and raise the - #~ exception in a parse action to cause pyparsing to use the new string/location. - #~ Setting the values as None causes no change to be made. - #~ """ - #~ def __init_( self, newstring, restartLoc ): - #~ self.newParseText = newstring - #~ self.reparseLoc = restartLoc - -class RecursiveGrammarException(Exception): - """exception thrown by C{validate()} if the grammar could be improperly recursive""" - def __init__( self, parseElementList ): - self.parseElementTrace = parseElementList - - def __str__( self ): - return "RecursiveGrammarException: %s" % self.parseElementTrace - -class _ParseResultsWithOffset(object): - def __init__(self,p1,p2): - self.tup = (p1,p2) - def __getitem__(self,i): - return self.tup[i] - def __repr__(self): - return repr(self.tup) - def setOffset(self,i): - self.tup = (self.tup[0],i) - -class ParseResults(object): - """Structured parse results, to provide multiple means of access to the parsed data: - - as a list (C{len(results)}) - - by list index (C{results[0], results[1]}, etc.) - - by attribute (C{results.}) - """ - #~ __slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" ) - def __new__(cls, toklist, name=None, asList=True, modal=True ): - if isinstance(toklist, cls): - return toklist - retobj = object.__new__(cls) - retobj.__doinit = True - return retobj - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( self, toklist, name=None, asList=True, modal=True, isinstance=isinstance ): - if self.__doinit: - self.__doinit = False - self.__name = None - self.__parent = None - self.__accumNames = {} - if isinstance(toklist, list): - self.__toklist = toklist[:] - else: - self.__toklist = [toklist] - self.__tokdict = dict() - - if name is not None and name: - if not modal: - self.__accumNames[name] = 0 - if isinstance(name,int): - name = _ustr(name) # will always return a str, but use _ustr for consistency - self.__name = name - if not toklist in (None,'',[]): - if isinstance(toklist,basestring): - toklist = [ toklist ] - if asList: - if isinstance(toklist,ParseResults): - self[name] = _ParseResultsWithOffset(toklist.copy(),0) - else: - self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0) - self[name].__name = name - else: - try: - self[name] = toklist[0] - except (KeyError,TypeError,IndexError): - self[name] = toklist - - def __getitem__( self, i ): - if isinstance( i, (int,slice) ): - return self.__toklist[i] - else: - if i not in self.__accumNames: - return self.__tokdict[i][-1][0] - else: - return ParseResults([ v[0] for v in self.__tokdict[i] ]) - - def __setitem__( self, k, v, isinstance=isinstance ): - if isinstance(v,_ParseResultsWithOffset): - self.__tokdict[k] = self.__tokdict.get(k,list()) + [v] - sub = v[0] - elif isinstance(k,int): - self.__toklist[k] = v - sub = v - else: - self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)] - sub = v - if isinstance(sub,ParseResults): - sub.__parent = wkref(self) - - def __delitem__( self, i ): - if isinstance(i,(int,slice)): - mylen = len( self.__toklist ) - del self.__toklist[i] - - # convert int to slice - if isinstance(i, int): - if i < 0: - i += mylen - i = slice(i, i+1) - # get removed indices - removed = list(range(*i.indices(mylen))) - removed.reverse() - # fixup indices in token dictionary - for name in self.__tokdict: - occurrences = self.__tokdict[name] - for j in removed: - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) - else: - del self.__tokdict[i] - - def __contains__( self, k ): - return k in self.__tokdict - - def __len__( self ): return len( self.__toklist ) - def __bool__(self): return len( self.__toklist ) > 0 - __nonzero__ = __bool__ - def __iter__( self ): return iter( self.__toklist ) - def __reversed__( self ): return iter( self.__toklist[::-1] ) - def keys( self ): - """Returns all named result keys.""" - return self.__tokdict.keys() - - def pop( self, index=-1 ): - """Removes and returns item at specified index (default=last). - Will work with either numeric indices or dict-key indicies.""" - ret = self[index] - del self[index] - return ret - - def get(self, key, defaultValue=None): - """Returns named result matching the given key, or if there is no - such name, then returns the given C{defaultValue} or C{None} if no - C{defaultValue} is specified.""" - if key in self: - return self[key] - else: - return defaultValue - - def insert( self, index, insStr ): - """Inserts new element at location index in the list of parsed tokens.""" - self.__toklist.insert(index, insStr) - # fixup indices in token dictionary - for name in self.__tokdict: - occurrences = self.__tokdict[name] - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) - - def items( self ): - """Returns all named result keys and values as a list of tuples.""" - return [(k,self[k]) for k in self.__tokdict] - - def values( self ): - """Returns all named result values.""" - return [ v[-1][0] for v in self.__tokdict.values() ] - - def __getattr__( self, name ): - if True: #name not in self.__slots__: - if name in self.__tokdict: - if name not in self.__accumNames: - return self.__tokdict[name][-1][0] - else: - return ParseResults([ v[0] for v in self.__tokdict[name] ]) - else: - return "" - return None - - def __add__( self, other ): - ret = self.copy() - ret += other - return ret - - def __iadd__( self, other ): - if other.__tokdict: - offset = len(self.__toklist) - addoffset = ( lambda a: (a<0 and offset) or (a+offset) ) - otheritems = other.__tokdict.items() - otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) ) - for (k,vlist) in otheritems for v in vlist] - for k,v in otherdictitems: - self[k] = v - if isinstance(v[0],ParseResults): - v[0].__parent = wkref(self) - - self.__toklist += other.__toklist - self.__accumNames.update( other.__accumNames ) - return self - - def __radd__(self, other): - if isinstance(other,int) and other == 0: - return self.copy() - - def __repr__( self ): - return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) - - def __str__( self ): - out = "[" - sep = "" - for i in self.__toklist: - if isinstance(i, ParseResults): - out += sep + _ustr(i) - else: - out += sep + repr(i) - sep = ", " - out += "]" - return out - - def _asStringList( self, sep='' ): - out = [] - for item in self.__toklist: - if out and sep: - out.append(sep) - if isinstance( item, ParseResults ): - out += item._asStringList() - else: - out.append( _ustr(item) ) - return out - - def asList( self ): - """Returns the parse results as a nested list of matching tokens, all converted to strings.""" - out = [] - for res in self.__toklist: - if isinstance(res,ParseResults): - out.append( res.asList() ) - else: - out.append( res ) - return out - - def asDict( self ): - """Returns the named parse results as dictionary.""" - return dict( self.items() ) - - def copy( self ): - """Returns a new copy of a C{ParseResults} object.""" - ret = ParseResults( self.__toklist ) - ret.__tokdict = self.__tokdict.copy() - ret.__parent = self.__parent - ret.__accumNames.update( self.__accumNames ) - ret.__name = self.__name - return ret - - def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): - """Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.""" - nl = "\n" - out = [] - namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items() - for v in vlist ] ) - nextLevelIndent = indent + " " - - # collapse out indents if formatting is not desired - if not formatted: - indent = "" - nextLevelIndent = "" - nl = "" - - selfTag = None - if doctag is not None: - selfTag = doctag - else: - if self.__name: - selfTag = self.__name - - if not selfTag: - if namedItemsOnly: - return "" - else: - selfTag = "ITEM" - - out += [ nl, indent, "<", selfTag, ">" ] - - worklist = self.__toklist - for i,res in enumerate(worklist): - if isinstance(res,ParseResults): - if i in namedItems: - out += [ res.asXML(namedItems[i], - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - out += [ res.asXML(None, - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - # individual token, see if there is a name for it - resTag = None - if i in namedItems: - resTag = namedItems[i] - if not resTag: - if namedItemsOnly: - continue - else: - resTag = "ITEM" - xmlBodyText = _xml_escape(_ustr(res)) - out += [ nl, nextLevelIndent, "<", resTag, ">", - xmlBodyText, - "" ] - - out += [ nl, indent, "" ] - return "".join(out) - - def __lookup(self,sub): - for k,vlist in self.__tokdict.items(): - for v,loc in vlist: - if sub is v: - return k - return None - - def getName(self): - """Returns the results name for this token expression.""" - if self.__name: - return self.__name - elif self.__parent: - par = self.__parent() - if par: - return par.__lookup(self) - else: - return None - elif (len(self) == 1 and - len(self.__tokdict) == 1 and - self.__tokdict.values()[0][0][1] in (0,-1)): - return self.__tokdict.keys()[0] - else: - return None - - def dump(self,indent='',depth=0): - """Diagnostic method for listing out the contents of a C{ParseResults}. - Accepts an optional C{indent} argument so that this string can be embedded - in a nested display of other data.""" - out = [] - out.append( indent+_ustr(self.asList()) ) - keys = self.items() - keys.sort() - for k,v in keys: - if out: - out.append('\n') - out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) - if isinstance(v,ParseResults): - if v.keys(): - out.append( v.dump(indent,depth+1) ) - else: - out.append(_ustr(v)) - else: - out.append(_ustr(v)) - return "".join(out) - - # add support for pickle protocol - def __getstate__(self): - return ( self.__toklist, - ( self.__tokdict.copy(), - self.__parent is not None and self.__parent() or None, - self.__accumNames, - self.__name ) ) - - def __setstate__(self,state): - self.__toklist = state[0] - self.__tokdict, \ - par, \ - inAccumNames, \ - self.__name = state[1] - self.__accumNames = {} - self.__accumNames.update(inAccumNames) - if par is not None: - self.__parent = wkref(par) - else: - self.__parent = None - - def __dir__(self): - return dir(super(ParseResults,self)) + self.keys() - -def col (loc,strg): - """Returns current column within a string, counting newlines as line separators. - The first column is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{ParserElement.parseString}} for more information - on parsing strings containing s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ - return (loc} for more information - on parsing strings containing s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ - return strg.count("\n",0,loc) + 1 - -def line( loc, strg ): - """Returns the line of text containing loc within a string, counting newlines as line separators. - """ - lastCR = strg.rfind("\n", 0, loc) - nextCR = strg.find("\n", loc) - if nextCR >= 0: - return strg[lastCR+1:nextCR] - else: - return strg[lastCR+1:] - -def _defaultStartDebugAction( instring, loc, expr ): - print ("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )) - -def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ): - print ("Matched " + _ustr(expr) + " -> " + str(toks.asList())) - -def _defaultExceptionDebugAction( instring, loc, expr, exc ): - print ("Exception raised:" + _ustr(exc)) - -def nullDebugAction(*args): - """'Do-nothing' debug action, to suppress debugging output during parsing.""" - pass - -class ParserElement(object): - """Abstract base level parser element class.""" - DEFAULT_WHITE_CHARS = " \n\t\r" - verbose_stacktrace = False - - def setDefaultWhitespaceChars( chars ): - """Overrides the default whitespace chars - """ - ParserElement.DEFAULT_WHITE_CHARS = chars - setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars) - - def __init__( self, savelist=False ): - self.parseAction = list() - self.failAction = None - #~ self.name = "" # don't define self.name, let subclasses try/except upcall - self.strRepr = None - self.resultsName = None - self.saveAsList = savelist - self.skipWhitespace = True - self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS - self.copyDefaultWhiteChars = True - self.mayReturnEmpty = False # used when checking for left-recursion - self.keepTabs = False - self.ignoreExprs = list() - self.debug = False - self.streamlined = False - self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index - self.errmsg = "" - self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) - self.debugActions = ( None, None, None ) #custom debug actions - self.re = None - self.callPreparse = True # used to avoid redundant calls to preParse - self.callDuringTry = False - - def copy( self ): - """Make a copy of this C{ParserElement}. Useful for defining different parse actions - for the same parsing pattern, using copies of the original parse element.""" - cpy = copy.copy( self ) - cpy.parseAction = self.parseAction[:] - cpy.ignoreExprs = self.ignoreExprs[:] - if self.copyDefaultWhiteChars: - cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS - return cpy - - def setName( self, name ): - """Define name for this expression, for use in debugging.""" - self.name = name - self.errmsg = "Expected " + self.name - if hasattr(self,"exception"): - self.exception.msg = self.errmsg - return self - - def setResultsName( self, name, listAllMatches=False ): - """Define name for referencing matching tokens as a nested attribute - of the returned parse results. - NOTE: this returns a *copy* of the original C{ParserElement} object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - - You can also set results names using the abbreviated syntax, - C{expr("name")} in place of C{expr.setResultsName("name")} - - see L{I{__call__}<__call__>}. - """ - newself = self.copy() - newself.resultsName = name - newself.modalResults = not listAllMatches - return newself - - def setBreak(self,breakFlag = True): - """Method to invoke the Python pdb debugger when this element is - about to be parsed. Set C{breakFlag} to True to enable, False to - disable. - """ - if breakFlag: - _parseMethod = self._parse - def breaker(instring, loc, doActions=True, callPreParse=True): - import pdb - pdb.set_trace() - return _parseMethod( instring, loc, doActions, callPreParse ) - breaker._originalParseMethod = _parseMethod - self._parse = breaker - else: - if hasattr(self._parse,"_originalParseMethod"): - self._parse = self._parse._originalParseMethod - return self - - def _normalizeParseActionArgs( f ): - """Internal method used to decorate parse actions that take fewer than 3 arguments, - so that all parse actions can be called as C{f(s,l,t)}.""" - STAR_ARGS = 4 - - # special handling for single-argument builtins - if (f in singleArgBuiltins): - numargs = 1 - else: - try: - restore = None - if isinstance(f,type): - restore = f - f = f.__init__ - if not _PY3K: - codeObj = f.func_code - else: - codeObj = f.code - if codeObj.co_flags & STAR_ARGS: - return f - numargs = codeObj.co_argcount - if not _PY3K: - if hasattr(f,"im_self"): - numargs -= 1 - else: - if hasattr(f,"__self__"): - numargs -= 1 - if restore: - f = restore - except AttributeError: - try: - if not _PY3K: - call_im_func_code = f.__call__.im_func.func_code - else: - call_im_func_code = f.__code__ - - # not a function, must be a callable object, get info from the - # im_func binding of its bound __call__ method - if call_im_func_code.co_flags & STAR_ARGS: - return f - numargs = call_im_func_code.co_argcount - if not _PY3K: - if hasattr(f.__call__,"im_self"): - numargs -= 1 - else: - if hasattr(f.__call__,"__self__"): - numargs -= 0 - except AttributeError: - if not _PY3K: - call_func_code = f.__call__.func_code - else: - call_func_code = f.__call__.__code__ - # not a bound method, get info directly from __call__ method - if call_func_code.co_flags & STAR_ARGS: - return f - numargs = call_func_code.co_argcount - if not _PY3K: - if hasattr(f.__call__,"im_self"): - numargs -= 1 - else: - if hasattr(f.__call__,"__self__"): - numargs -= 1 - - - #~ print ("adding function %s with %d args" % (f.func_name,numargs)) - if numargs == 3: - return f - else: - if numargs > 3: - def tmp(s,l,t): - return f(f.__call__.__self__, s,l,t) - if numargs == 2: - def tmp(s,l,t): - return f(l,t) - elif numargs == 1: - def tmp(s,l,t): - return f(t) - else: #~ numargs == 0: - def tmp(s,l,t): - return f() - try: - tmp.__name__ = f.__name__ - except (AttributeError,TypeError): - # no need for special handling if attribute doesnt exist - pass - try: - tmp.__doc__ = f.__doc__ - except (AttributeError,TypeError): - # no need for special handling if attribute doesnt exist - pass - try: - tmp.__dict__.update(f.__dict__) - except (AttributeError,TypeError): - # no need for special handling if attribute doesnt exist - pass - return tmp - _normalizeParseActionArgs = staticmethod(_normalizeParseActionArgs) - - def setParseAction( self, *fns, **kwargs ): - """Define action to perform when successfully matching parse element definition. - Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)}, - C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where: - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a ParseResults object - If the functions in fns modify the tokens, they can return them as the return - value from fn, and the modified list of tokens will replace the original. - Otherwise, fn does not need to return any value. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{parseString}} for more information - on parsing strings containing s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ - self.parseAction = list(map(self._normalizeParseActionArgs, list(fns))) - self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"]) - return self - - def addParseAction( self, *fns, **kwargs ): - """Add parse action to expression's list of parse actions. See L{I{setParseAction}}.""" - self.parseAction += list(map(self._normalizeParseActionArgs, list(fns))) - self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"]) - return self - - def setFailAction( self, fn ): - """Define action to perform if parsing fails at this expression. - Fail acton fn is a callable function that takes the arguments - C{fn(s,loc,expr,err)} where: - - s = string being parsed - - loc = location where expression match was attempted and failed - - expr = the parse expression that failed - - err = the exception thrown - The function returns no value. It may throw C{ParseFatalException} - if it is desired to stop parsing immediately.""" - self.failAction = fn - return self - - def _skipIgnorables( self, instring, loc ): - exprsFound = True - while exprsFound: - exprsFound = False - for e in self.ignoreExprs: - try: - while 1: - loc,dummy = e._parse( instring, loc ) - exprsFound = True - except ParseException: - pass - return loc - - def preParse( self, instring, loc ): - if self.ignoreExprs: - loc = self._skipIgnorables( instring, loc ) - - if self.skipWhitespace: - wt = self.whiteChars - instrlen = len(instring) - while loc < instrlen and instring[loc] in wt: - loc += 1 - - return loc - - def parseImpl( self, instring, loc, doActions=True ): - return loc, [] - - def postParse( self, instring, loc, tokenlist ): - return tokenlist - - #~ @profile - def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ): - debugging = ( self.debug ) #and doActions ) - - if debugging or self.failAction: - #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )) - if (self.debugActions[0] ): - self.debugActions[0]( instring, loc, self ) - if callPreParse and self.callPreparse: - preloc = self.preParse( instring, loc ) - else: - preloc = loc - tokensStart = preloc - try: - try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) - except ParseBaseException: - #~ print ("Exception raised:", err) - err = None - if self.debugActions[2]: - err = sys.exc_info()[1] - self.debugActions[2]( instring, tokensStart, self, err ) - if self.failAction: - if err is None: - err = sys.exc_info()[1] - self.failAction( instring, tokensStart, self, err ) - raise - else: - if callPreParse and self.callPreparse: - preloc = self.preParse( instring, loc ) - else: - preloc = loc - tokensStart = preloc - if self.mayIndexError or loc >= len(instring): - try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) - else: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - - tokens = self.postParse( instring, loc, tokens ) - - retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults ) - if self.parseAction and (doActions or self.callDuringTry): - if debugging: - try: - for fn in self.parseAction: - tokens = fn( instring, tokensStart, retTokens ) - if tokens is not None: - retTokens = ParseResults( tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) - except ParseBaseException: - #~ print "Exception raised in user parse action:", err - if (self.debugActions[2] ): - err = sys.exc_info()[1] - self.debugActions[2]( instring, tokensStart, self, err ) - raise - else: - for fn in self.parseAction: - tokens = fn( instring, tokensStart, retTokens ) - if tokens is not None: - retTokens = ParseResults( tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) - - if debugging: - #~ print ("Matched",self,"->",retTokens.asList()) - if (self.debugActions[1] ): - self.debugActions[1]( instring, tokensStart, loc, self, retTokens ) - - return loc, retTokens - - def tryParse( self, instring, loc ): - try: - return self._parse( instring, loc, doActions=False )[0] - except ParseFatalException: - raise ParseException( instring, loc, self.errmsg, self) - - # this method gets repeatedly called during backtracking with the same arguments - - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): - lookup = (self,instring,loc,callPreParse,doActions) - if lookup in ParserElement._exprArgCache: - value = ParserElement._exprArgCache[ lookup ] - if isinstance(value,Exception): - raise value - return value - else: - try: - value = self._parseNoCache( instring, loc, doActions, callPreParse ) - ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy()) - return value - except ParseBaseException: - pe = sys.exc_info()[1] - ParserElement._exprArgCache[ lookup ] = pe - raise - - _parse = _parseNoCache - - # argument cache for optimizing repeated calls when backtracking through recursive expressions - _exprArgCache = {} - def resetCache(): - ParserElement._exprArgCache.clear() - resetCache = staticmethod(resetCache) - - _packratEnabled = False - def enablePackrat(): - """Enables "packrat" parsing, which adds memoizing to the parsing logic. - Repeated parse attempts at the same string location (which happens - often in many complex grammars) can immediately return a cached value, - instead of re-executing parsing/validating code. Memoizing is done of - both valid results and parsing exceptions. - - This speedup may break existing programs that use parse actions that - have side-effects. For this reason, packrat parsing is disabled when - you first import pyparsing. To activate the packrat feature, your - program must call the class method C{ParserElement.enablePackrat()}. If - your program uses C{psyco} to "compile as you go", you must call - C{enablePackrat} before calling C{psyco.full()}. If you do not do this, - Python will crash. For best results, call C{enablePackrat()} immediately - after importing pyparsing. - """ - if not ParserElement._packratEnabled: - ParserElement._packratEnabled = True - ParserElement._parse = ParserElement._parseCache - enablePackrat = staticmethod(enablePackrat) - - def parseString( self, instring, parseAll=False ): - """Execute the parse expression with the given string. - This is the main interface to the client code, once the complete - expression has been built. - - If you want the grammar to require that the entire input string be - successfully parsed, then set C{parseAll} to True (equivalent to ending - the grammar with C{StringEnd()}). - - Note: C{parseString} implicitly calls C{expandtabs()} on the input string, - in order to report proper column numbers in parse actions. - If the input string contains tabs and - the grammar uses parse actions that use the C{loc} argument to index into the - string being parsed, you can ensure you have a consistent view of the input - string by: - - calling C{parseWithTabs} on your grammar before calling C{parseString} - (see L{I{parseWithTabs}}) - - define your parse action using the full C{(s,loc,toks)} signature, and - reference the input string using the parse action's C{s} argument - - explictly expand the tabs in your input string before calling - C{parseString} - """ - ParserElement.resetCache() - if not self.streamlined: - self.streamline() - #~ self.saveAsList = True - for e in self.ignoreExprs: - e.streamline() - if not self.keepTabs: - instring = instring.expandtabs() - try: - loc, tokens = self._parse( instring, 0 ) - if parseAll: - #loc = self.preParse( instring, loc ) - se = StringEnd() - se._parse( instring, loc ) - except ParseBaseException: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - exc = sys.exc_info()[1] - raise exc - else: - return tokens - - def scanString( self, instring, maxMatches=_MAX_INT ): - """Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - C{maxMatches} argument, to clip scanning after 'n' matches are found. - - Note that the start and end locations are reported relative to the string - being parsed. See L{I{parseString}} for more information on parsing - strings with embedded tabs.""" - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - - if not self.keepTabs: - instring = _ustr(instring).expandtabs() - instrlen = len(instring) - loc = 0 - preparseFn = self.preParse - parseFn = self._parse - ParserElement.resetCache() - matches = 0 - try: - while loc <= instrlen and matches < maxMatches: - try: - preloc = preparseFn( instring, loc ) - nextLoc,tokens = parseFn( instring, preloc, callPreParse=False ) - except ParseException: - loc = preloc+1 - else: - if nextLoc > loc: - matches += 1 - yield tokens, preloc, nextLoc - loc = nextLoc - else: - loc = preloc+1 - except ParseBaseException: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - exc = sys.exc_info()[1] - raise exc - - def transformString( self, instring ): - """Extension to C{scanString}, to modify matching text with modified tokens that may - be returned from a parse action. To use C{transformString}, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking C{transformString()} on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. C{transformString()} returns the resulting transformed string.""" - out = [] - lastE = 0 - # force preservation of s, to minimize unwanted transformation of string, and to - # keep string locs straight between transformString and scanString - self.keepTabs = True - try: - for t,s,e in self.scanString( instring ): - out.append( instring[lastE:s] ) - if t: - if isinstance(t,ParseResults): - out += t.asList() - elif isinstance(t,list): - out += t - else: - out.append(t) - lastE = e - out.append(instring[lastE:]) - return "".join(map(_ustr,_flatten(out))) - except ParseBaseException: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - exc = sys.exc_info()[1] - raise exc - - def searchString( self, instring, maxMatches=_MAX_INT ): - """Another extension to C{scanString}, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - C{maxMatches} argument, to clip searching after 'n' matches are found. - """ - try: - return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) - except ParseBaseException: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - exc = sys.exc_info()[1] - raise exc - - def __add__(self, other ): - """Implementation of + operator - returns And""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return And( [ self, other ] ) - - def __radd__(self, other ): - """Implementation of + operator when left operand is not a C{ParserElement}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other + self - - def __sub__(self, other): - """Implementation of - operator, returns C{And} with error stop""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return And( [ self, And._ErrorStop(), other ] ) - - def __rsub__(self, other ): - """Implementation of - operator when left operand is not a C{ParserElement}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other - self - - def __mul__(self,other): - """Implementation of * operator, allows use of C{expr * 3} in place of - C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer - tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples - may also include C{None} as in: - - C{expr*(n,None)} or C{expr*(n,)} is equivalent - to C{expr*n + ZeroOrMore(expr)} - (read as "at least n instances of C{expr}") - - C{expr*(None,n)} is equivalent to C{expr*(0,n)} - (read as "0 to n instances of C{expr}") - - C{expr*(None,None)} is equivalent to C{ZeroOrMore(expr)} - - C{expr*(1,None)} is equivalent to C{OneOrMore(expr)} - - Note that C{expr*(None,n)} does not raise an exception if - more than n exprs exist in the input stream; that is, - C{expr*(None,n)} does not enforce a maximum number of expr - occurrences. If this behavior is desired, then write - C{expr*(None,n) + ~expr} - - """ - if isinstance(other,int): - minElements, optElements = other,0 - elif isinstance(other,tuple): - other = (other + (None, None))[:2] - if other[0] is None: - other = (0, other[1]) - if isinstance(other[0],int) and other[1] is None: - if other[0] == 0: - return ZeroOrMore(self) - if other[0] == 1: - return OneOrMore(self) - else: - return self*other[0] + ZeroOrMore(self) - elif isinstance(other[0],int) and isinstance(other[1],int): - minElements, optElements = other - optElements -= minElements - else: - raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1])) - else: - raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) - - if minElements < 0: - raise ValueError("cannot multiply ParserElement by negative value") - if optElements < 0: - raise ValueError("second tuple value must be greater or equal to first tuple value") - if minElements == optElements == 0: - raise ValueError("cannot multiply ParserElement by 0 or (0,0)") - - if (optElements): - def makeOptionalList(n): - if n>1: - return Optional(self + makeOptionalList(n-1)) - else: - return Optional(self) - if minElements: - if minElements == 1: - ret = self + makeOptionalList(optElements) - else: - ret = And([self]*minElements) + makeOptionalList(optElements) - else: - ret = makeOptionalList(optElements) - else: - if minElements == 1: - ret = self - else: - ret = And([self]*minElements) - return ret - - def __rmul__(self, other): - return self.__mul__(other) - - def __or__(self, other ): - """Implementation of | operator - returns C{MatchFirst}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return MatchFirst( [ self, other ] ) - - def __ror__(self, other ): - """Implementation of | operator when left operand is not a C{ParserElement}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other | self - - def __xor__(self, other ): - """Implementation of ^ operator - returns C{Or}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Or( [ self, other ] ) - - def __rxor__(self, other ): - """Implementation of ^ operator when left operand is not a C{ParserElement}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other ^ self - - def __and__(self, other ): - """Implementation of & operator - returns C{Each}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Each( [ self, other ] ) - - def __rand__(self, other ): - """Implementation of & operator when left operand is not a C{ParserElement}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other & self - - def __invert__( self ): - """Implementation of ~ operator - returns C{NotAny}""" - return NotAny( self ) - - def __call__(self, name): - """Shortcut for C{setResultsName}, with C{listAllMatches=default}:: - userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") - could be written as:: - userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") - """ - return self.setResultsName(name) - - def suppress( self ): - """Suppresses the output of this C{ParserElement}; useful to keep punctuation from - cluttering up returned output. - """ - return Suppress( self ) - - def leaveWhitespace( self ): - """Disables the skipping of whitespace before matching the characters in the - C{ParserElement}'s defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. - """ - self.skipWhitespace = False - return self - - def setWhitespaceChars( self, chars ): - """Overrides the default whitespace chars - """ - self.skipWhitespace = True - self.whiteChars = chars - self.copyDefaultWhiteChars = False - return self - - def parseWithTabs( self ): - """Overrides default behavior to expand s to spaces before parsing the input string. - Must be called before C{parseString} when the input grammar contains elements that - match characters.""" - self.keepTabs = True - return self - - def ignore( self, other ): - """Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. - """ - if isinstance( other, Suppress ): - if other not in self.ignoreExprs: - self.ignoreExprs.append( other.copy() ) - else: - self.ignoreExprs.append( Suppress( other.copy() ) ) - return self - - def setDebugActions( self, startAction, successAction, exceptionAction ): - """Enable display of debugging messages while doing pattern matching.""" - self.debugActions = (startAction or _defaultStartDebugAction, - successAction or _defaultSuccessDebugAction, - exceptionAction or _defaultExceptionDebugAction) - self.debug = True - return self - - def setDebug( self, flag=True ): - """Enable display of debugging messages while doing pattern matching. - Set C{flag} to True to enable, False to disable.""" - if flag: - self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) - else: - self.debug = False - return self - - def __str__( self ): - return self.name - - def __repr__( self ): - return _ustr(self) - - def streamline( self ): - self.streamlined = True - self.strRepr = None - return self - - def checkRecursion( self, parseElementList ): - pass - - def validate( self, validateTrace=[] ): - """Check defined expressions for valid structure, check for infinite recursive definitions.""" - self.checkRecursion( [] ) - - def parseFile( self, file_or_filename, parseAll=False ): - """Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. - """ - try: - file_contents = file_or_filename.read() - except AttributeError: - f = open(file_or_filename, "rb") - file_contents = f.read() - f.close() - try: - return self.parseString(file_contents, parseAll) - except ParseBaseException: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - exc = sys.exc_info()[1] - raise exc - - def getException(self): - return ParseException("",0,self.errmsg,self) - - def __getattr__(self,aname): - if aname == "myException": - self.myException = ret = self.getException(); - return ret; - else: - raise AttributeError("no such attribute " + aname) - - def __eq__(self,other): - if isinstance(other, ParserElement): - return self is other or self.__dict__ == other.__dict__ - elif isinstance(other, basestring): - try: - self.parseString(_ustr(other), parseAll=True) - return True - except ParseBaseException: - return False - else: - return super(ParserElement,self)==other - - def __ne__(self,other): - return not (self == other) - - def __hash__(self): - return hash(id(self)) - - def __req__(self,other): - return self == other - - def __rne__(self,other): - return not (self == other) - - -class Token(ParserElement): - """Abstract C{ParserElement} subclass, for defining atomic matching patterns.""" - def __init__( self ): - super(Token,self).__init__( savelist=False ) - #self.myException = ParseException("",0,"",self) - - def setName(self, name): - s = super(Token,self).setName(name) - self.errmsg = "Expected " + self.name - #s.myException.msg = self.errmsg - return s - - -class Empty(Token): - """An empty token, will always match.""" - def __init__( self ): - super(Empty,self).__init__() - self.name = "Empty" - self.mayReturnEmpty = True - self.mayIndexError = False - - -class NoMatch(Token): - """A token that will never match.""" - def __init__( self ): - super(NoMatch,self).__init__() - self.name = "NoMatch" - self.mayReturnEmpty = True - self.mayIndexError = False - self.errmsg = "Unmatchable token" - #self.myException.msg = self.errmsg - - def parseImpl( self, instring, loc, doActions=True ): - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - -class Literal(Token): - """Token to exactly match a specified string.""" - def __init__( self, matchString ): - super(Literal,self).__init__() - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Literal; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.__class__ = Empty - self.name = '"%s"' % _ustr(self.match) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - #self.myException.msg = self.errmsg - self.mayIndexError = False - - # Performance tuning: this routine gets called a *lot* - # if this is a single character match string and the first character matches, - # short-circuit as quickly as possible, and avoid calling startswith - #~ @profile - def parseImpl( self, instring, loc, doActions=True ): - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) ): - return loc+self.matchLen, self.match - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc -_L = Literal - -class Keyword(Token): - """Token to exactly match a specified string as a keyword, that is, it must be - immediately followed by a non-keyword character. Compare with C{Literal}:: - Literal("if") will match the leading 'if' in 'ifAndOnlyIf'. - Keyword("if") will not; it will only match the leading 'if in 'if x=1', or 'if(y==2)' - Accepts two optional constructor arguments in addition to the keyword string: - C{identChars} is a string of characters that would be valid identifier characters, - defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive - matching, default is False. - """ - DEFAULT_KEYWORD_CHARS = alphanums+"_$" - - def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ): - super(Keyword,self).__init__() - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Keyword; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.name = '"%s"' % self.match - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - #self.myException.msg = self.errmsg - self.mayIndexError = False - self.caseless = caseless - if caseless: - self.caselessmatch = matchString.upper() - identChars = identChars.upper() - self.identChars = _str2dict(identChars) - - def parseImpl( self, instring, loc, doActions=True ): - if self.caseless: - if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and - (loc == 0 or instring[loc-1].upper() not in self.identChars) ): - return loc+self.matchLen, self.match - else: - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and - (loc == 0 or instring[loc-1] not in self.identChars) ): - return loc+self.matchLen, self.match - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - def copy(self): - c = super(Keyword,self).copy() - c.identChars = Keyword.DEFAULT_KEYWORD_CHARS - return c - - def setDefaultKeywordChars( chars ): - """Overrides the default Keyword chars - """ - Keyword.DEFAULT_KEYWORD_CHARS = chars - setDefaultKeywordChars = staticmethod(setDefaultKeywordChars) - -class CaselessLiteral(Literal): - """Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. - """ - def __init__( self, matchString ): - super(CaselessLiteral,self).__init__( matchString.upper() ) - # Preserve the defining literal. - self.returnString = matchString - self.name = "'%s'" % self.returnString - self.errmsg = "Expected " + self.name - #self.myException.msg = self.errmsg - - def parseImpl( self, instring, loc, doActions=True ): - if instring[ loc:loc+self.matchLen ].upper() == self.match: - return loc+self.matchLen, self.returnString - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - -class CaselessKeyword(Keyword): - def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ): - super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) - - def parseImpl( self, instring, loc, doActions=True ): - if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ): - return loc+self.matchLen, self.match - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - -class Word(Token): - """Token for matching words composed of allowed character sets. - Defined with string containing all allowed initial characters, - an optional string containing allowed body characters (if omitted, - defaults to the initial character set), and an optional minimum, - maximum, and/or exact length. The default value for C{min} is 1 (a - minimum value < 1 is not valid); the default values for C{max} and C{exact} - are 0, meaning no maximum or exact length restriction. - """ - def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False ): - super(Word,self).__init__() - self.initCharsOrig = initChars - self.initChars = _str2dict(initChars) - if bodyChars : - self.bodyCharsOrig = bodyChars - self.bodyChars = _str2dict(bodyChars) - else: - self.bodyCharsOrig = initChars - self.bodyChars = _str2dict(initChars) - - self.maxSpecified = max > 0 - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - #self.myException.msg = self.errmsg - self.mayIndexError = False - self.asKeyword = asKeyword - - if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0): - if self.bodyCharsOrig == self.initCharsOrig: - self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) - elif len(self.bodyCharsOrig) == 1: - self.reString = "%s[%s]*" % \ - (re.escape(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - else: - self.reString = "[%s][%s]*" % \ - (_escapeRegexRangeChars(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - if self.asKeyword: - self.reString = r"\b"+self.reString+r"\b" - try: - self.re = re.compile( self.reString ) - except: - self.re = None - - def parseImpl( self, instring, loc, doActions=True ): - if self.re: - result = self.re.match(instring,loc) - if not result: - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - loc = result.end() - return loc,result.group() - - if not(instring[ loc ] in self.initChars): - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - start = loc - loc += 1 - instrlen = len(instring) - bodychars = self.bodyChars - maxloc = start + self.maxLen - maxloc = min( maxloc, instrlen ) - while loc < maxloc and instring[loc] in bodychars: - loc += 1 - - throwException = False - if loc - start < self.minLen: - throwException = True - if self.maxSpecified and loc < instrlen and instring[loc] in bodychars: - throwException = True - if self.asKeyword: - if (start>0 and instring[start-1] in bodychars) or (loc4: - return s[:4]+"..." - else: - return s - - if ( self.initCharsOrig != self.bodyCharsOrig ): - self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) ) - else: - self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) - - return self.strRepr - - -class Regex(Token): - """Token for matching strings that match a given regular expression. - Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. - """ - compiledREtype = type(re.compile("[A-Z]")) - def __init__( self, pattern, flags=0): - """The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags.""" - super(Regex,self).__init__() - - if isinstance(pattern, basestring): - if len(pattern) == 0: - warnings.warn("null string passed to Regex; use Empty() instead", - SyntaxWarning, stacklevel=2) - - self.pattern = pattern - self.flags = flags - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % pattern, - SyntaxWarning, stacklevel=2) - raise - - elif isinstance(pattern, Regex.compiledREtype): - self.re = pattern - self.pattern = \ - self.reString = str(pattern) - self.flags = flags - - else: - raise ValueError("Regex may only be constructed with a string or a compiled RE object") - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - #self.myException.msg = self.errmsg - self.mayIndexError = False - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - result = self.re.match(instring,loc) - if not result: - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - loc = result.end() - d = result.groupdict() - ret = ParseResults(result.group()) - if d: - for k in d: - ret[k] = d[k] - return loc,ret - - def __str__( self ): - try: - return super(Regex,self).__str__() - except: - pass - - if self.strRepr is None: - self.strRepr = "Re:(%s)" % repr(self.pattern) - - return self.strRepr - - -class QuotedString(Token): - """Token for matching strings that are delimited by quoting characters. - """ - def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None): - """ - Defined with the following parameters: - - quoteChar - string of one or more characters defining the quote delimiting string - - escChar - character to escape quotes, typically backslash (default=None) - - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None) - - multiline - boolean indicating whether quotes can span multiple lines (default=False) - - unquoteResults - boolean indicating whether the matched text should be unquoted (default=True) - - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar) - """ - super(QuotedString,self).__init__() - - # remove white space from quote chars - wont work anyway - quoteChar = quoteChar.strip() - if len(quoteChar) == 0: - warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) - raise SyntaxError() - - if endQuoteChar is None: - endQuoteChar = quoteChar - else: - endQuoteChar = endQuoteChar.strip() - if len(endQuoteChar) == 0: - warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) - raise SyntaxError() - - self.quoteChar = quoteChar - self.quoteCharLen = len(quoteChar) - self.firstQuoteChar = quoteChar[0] - self.endQuoteChar = endQuoteChar - self.endQuoteCharLen = len(endQuoteChar) - self.escChar = escChar - self.escQuote = escQuote - self.unquoteResults = unquoteResults - - if multiline: - self.flags = re.MULTILINE | re.DOTALL - self.pattern = r'%s(?:[^%s%s]' % \ - ( re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) - else: - self.flags = 0 - self.pattern = r'%s(?:[^%s\n\r%s]' % \ - ( re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) - if len(self.endQuoteChar) > 1: - self.pattern += ( - '|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]), - _escapeRegexRangeChars(self.endQuoteChar[i])) - for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')' - ) - if escQuote: - self.pattern += (r'|(?:%s)' % re.escape(escQuote)) - if escChar: - self.pattern += (r'|(?:%s.)' % re.escape(escChar)) - self.escCharReplacePattern = re.escape(self.escChar)+"(.)" - self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, - SyntaxWarning, stacklevel=2) - raise - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - #self.myException.msg = self.errmsg - self.mayIndexError = False - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None - if not result: - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - loc = result.end() - ret = result.group() - - if self.unquoteResults: - - # strip off quotes - ret = ret[self.quoteCharLen:-self.endQuoteCharLen] - - if isinstance(ret,basestring): - # replace escaped characters - if self.escChar: - ret = re.sub(self.escCharReplacePattern,"\g<1>",ret) - - # replace escaped quotes - if self.escQuote: - ret = ret.replace(self.escQuote, self.endQuoteChar) - - return loc, ret - - def __str__( self ): - try: - return super(QuotedString,self).__str__() - except: - pass - - if self.strRepr is None: - self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar) - - return self.strRepr - - -class CharsNotIn(Token): - """Token for matching words composed of characters *not* in a given set. - Defined with string containing all disallowed characters, and an optional - minimum, maximum, and/or exact length. The default value for C{min} is 1 (a - minimum value < 1 is not valid); the default values for C{max} and C{exact} - are 0, meaning no maximum or exact length restriction. - """ - def __init__( self, notChars, min=1, max=0, exact=0 ): - super(CharsNotIn,self).__init__() - self.skipWhitespace = False - self.notChars = notChars - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = ( self.minLen == 0 ) - #self.myException.msg = self.errmsg - self.mayIndexError = False - - def parseImpl( self, instring, loc, doActions=True ): - if instring[loc] in self.notChars: - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - start = loc - loc += 1 - notchars = self.notChars - maxlen = min( start+self.maxLen, len(instring) ) - while loc < maxlen and \ - (instring[loc] not in notchars): - loc += 1 - - if loc - start < self.minLen: - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - return loc, instring[start:loc] - - def __str__( self ): - try: - return super(CharsNotIn, self).__str__() - except: - pass - - if self.strRepr is None: - if len(self.notChars) > 4: - self.strRepr = "!W:(%s...)" % self.notChars[:4] - else: - self.strRepr = "!W:(%s)" % self.notChars - - return self.strRepr - -class White(Token): - """Special matching class for matching whitespace. Normally, whitespace is ignored - by pyparsing grammars. This class is included when some whitespace structures - are significant. Define with a string containing the whitespace characters to be - matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments, - as defined for the C{Word} class.""" - whiteStrs = { - " " : "", - "\t": "", - "\n": "", - "\r": "", - "\f": "", - } - def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): - super(White,self).__init__() - self.matchWhite = ws - self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) ) - #~ self.leaveWhitespace() - self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite])) - self.mayReturnEmpty = True - self.errmsg = "Expected " + self.name - #self.myException.msg = self.errmsg - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - def parseImpl( self, instring, loc, doActions=True ): - if not(instring[ loc ] in self.matchWhite): - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - start = loc - loc += 1 - maxloc = start + self.maxLen - maxloc = min( maxloc, len(instring) ) - while loc < maxloc and instring[loc] in self.matchWhite: - loc += 1 - - if loc - start < self.minLen: - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - return loc, instring[start:loc] - - -class _PositionToken(Token): - def __init__( self ): - super(_PositionToken,self).__init__() - self.name=self.__class__.__name__ - self.mayReturnEmpty = True - self.mayIndexError = False - -class GoToColumn(_PositionToken): - """Token to advance to a specific column of input text; useful for tabular report scraping.""" - def __init__( self, colno ): - super(GoToColumn,self).__init__() - self.col = colno - - def preParse( self, instring, loc ): - if col(loc,instring) != self.col: - instrlen = len(instring) - if self.ignoreExprs: - loc = self._skipIgnorables( instring, loc ) - while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col : - loc += 1 - return loc - - def parseImpl( self, instring, loc, doActions=True ): - thiscol = col( loc, instring ) - if thiscol > self.col: - raise ParseException( instring, loc, "Text not in expected column", self ) - newloc = loc + self.col - thiscol - ret = instring[ loc: newloc ] - return newloc, ret - -class LineStart(_PositionToken): - """Matches if current position is at the beginning of a line within the parse string""" - def __init__( self ): - super(LineStart,self).__init__() - self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) - self.errmsg = "Expected start of line" - #self.myException.msg = self.errmsg - - def preParse( self, instring, loc ): - preloc = super(LineStart,self).preParse(instring,loc) - if instring[preloc] == "\n": - loc += 1 - return loc - - def parseImpl( self, instring, loc, doActions=True ): - if not( loc==0 or - (loc == self.preParse( instring, 0 )) or - (instring[loc-1] == "\n") ): #col(loc, instring) != 1: - #~ raise ParseException( instring, loc, "Expected start of line" ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - return loc, [] - -class LineEnd(_PositionToken): - """Matches if current position is at the end of a line within the parse string""" - def __init__( self ): - super(LineEnd,self).__init__() - self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) - self.errmsg = "Expected end of line" - #self.myException.msg = self.errmsg - - def parseImpl( self, instring, loc, doActions=True ): - if loc len(instring): - return loc, [] - else: - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - -class WordStart(_PositionToken): - """Matches if the current position is at the beginning of a Word, and - is not preceded by any character in a given set of wordChars - (default=C{printables}). To emulate the C{\b} behavior of regular expressions, - use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of - the string being parsed, or at the beginning of a line. - """ - def __init__(self, wordChars = printables): - super(WordStart,self).__init__() - self.wordChars = _str2dict(wordChars) - self.errmsg = "Not at the start of a word" - - def parseImpl(self, instring, loc, doActions=True ): - if loc != 0: - if (instring[loc-1] in self.wordChars or - instring[loc] not in self.wordChars): - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - return loc, [] - -class WordEnd(_PositionToken): - """Matches if the current position is at the end of a Word, and - is not followed by any character in a given set of wordChars - (default=C{printables}). To emulate the C{\b} behavior of regular expressions, - use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of - the string being parsed, or at the end of a line. - """ - def __init__(self, wordChars = printables): - super(WordEnd,self).__init__() - self.wordChars = _str2dict(wordChars) - self.skipWhitespace = False - self.errmsg = "Not at the end of a word" - - def parseImpl(self, instring, loc, doActions=True ): - instrlen = len(instring) - if instrlen>0 and loc maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring,len(instring),e.errmsg,self) - maxExcLoc = len(instring) - else: - if loc2 > maxMatchLoc: - maxMatchLoc = loc2 - maxMatchExp = e - - if maxMatchLoc < 0: - if maxException is not None: - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - return maxMatchExp._parse( instring, loc, doActions ) - - def __ixor__(self, other ): - if isinstance( other, basestring ): - other = Literal( other ) - return self.append( other ) #Or( [ self, other ] ) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class MatchFirst(ParseExpression): - """Requires that at least one C{ParseExpression} is found. - If two expressions match, the first one listed is the one that will match. - May be constructed using the '|' operator. - """ - def __init__( self, exprs, savelist = False ): - super(MatchFirst,self).__init__(exprs, savelist) - if exprs: - self.mayReturnEmpty = False - for e in self.exprs: - if e.mayReturnEmpty: - self.mayReturnEmpty = True - break - else: - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - maxExcLoc = -1 - maxException = None - for e in self.exprs: - try: - ret = e._parse( instring, loc, doActions ) - return ret - except ParseException, err: - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring,len(instring),e.errmsg,self) - maxExcLoc = len(instring) - - # only got here if no expression matched, raise exception for match that made it the furthest - else: - if maxException is not None: - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - def __ior__(self, other ): - if isinstance( other, basestring ): - other = Literal( other ) - return self.append( other ) #MatchFirst( [ self, other ] ) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class Each(ParseExpression): - """Requires all given C{ParseExpressions} to be found, but in any order. - Expressions may be separated by whitespace. - May be constructed using the '&' operator. - """ - def __init__( self, exprs, savelist = True ): - super(Each,self).__init__(exprs, savelist) - self.mayReturnEmpty = True - for e in self.exprs: - if not e.mayReturnEmpty: - self.mayReturnEmpty = False - break - self.skipWhitespace = True - self.initExprGroups = True - - def parseImpl( self, instring, loc, doActions=True ): - if self.initExprGroups: - opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ] - opt2 = [ e for e in self.exprs if e.mayReturnEmpty and e not in opt1 ] - self.optionals = opt1 + opt2 - self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ] - self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ] - self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ] - self.required += self.multirequired - self.initExprGroups = False - tmpLoc = loc - tmpReqd = self.required[:] - tmpOpt = self.optionals[:] - matchOrder = [] - - keepMatching = True - while keepMatching: - tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired - failed = [] - for e in tmpExprs: - try: - tmpLoc = e.tryParse( instring, tmpLoc ) - except ParseException: - failed.append(e) - else: - matchOrder.append(e) - if e in tmpReqd: - tmpReqd.remove(e) - elif e in tmpOpt: - tmpOpt.remove(e) - if len(failed) == len(tmpExprs): - keepMatching = False - - if tmpReqd: - missing = ", ".join( [ _ustr(e) for e in tmpReqd ] ) - raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing ) - - # add any unmatched Optionals, in case they have default values defined - matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt] - - resultlist = [] - for e in matchOrder: - loc,results = e._parse(instring,loc,doActions) - resultlist.append(results) - - finalResults = ParseResults([]) - for r in resultlist: - dups = {} - for k in r.keys(): - if k in finalResults.keys(): - tmp = ParseResults(finalResults[k]) - tmp += ParseResults(r[k]) - dups[k] = tmp - finalResults += ParseResults(r) - for k,v in dups.items(): - finalResults[k] = v - return loc, finalResults - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class ParseElementEnhance(ParserElement): - """Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.""" - def __init__( self, expr, savelist=False ): - super(ParseElementEnhance,self).__init__(savelist) - if isinstance( expr, basestring ): - expr = Literal(expr) - self.expr = expr - self.strRepr = None - if expr is not None: - self.mayIndexError = expr.mayIndexError - self.mayReturnEmpty = expr.mayReturnEmpty - self.setWhitespaceChars( expr.whiteChars ) - self.skipWhitespace = expr.skipWhitespace - self.saveAsList = expr.saveAsList - self.callPreparse = expr.callPreparse - self.ignoreExprs.extend(expr.ignoreExprs) - - def parseImpl( self, instring, loc, doActions=True ): - if self.expr is not None: - return self.expr._parse( instring, loc, doActions, callPreParse=False ) - else: - raise ParseException("",loc,self.errmsg,self) - - def leaveWhitespace( self ): - self.skipWhitespace = False - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.leaveWhitespace() - return self - - def ignore( self, other ): - if isinstance( other, Suppress ): - if other not in self.ignoreExprs: - super( ParseElementEnhance, self).ignore( other ) - if self.expr is not None: - self.expr.ignore( self.ignoreExprs[-1] ) - else: - super( ParseElementEnhance, self).ignore( other ) - if self.expr is not None: - self.expr.ignore( self.ignoreExprs[-1] ) - return self - - def streamline( self ): - super(ParseElementEnhance,self).streamline() - if self.expr is not None: - self.expr.streamline() - return self - - def checkRecursion( self, parseElementList ): - if self in parseElementList: - raise RecursiveGrammarException( parseElementList+[self] ) - subRecCheckList = parseElementList[:] + [ self ] - if self.expr is not None: - self.expr.checkRecursion( subRecCheckList ) - - def validate( self, validateTrace=[] ): - tmp = validateTrace[:]+[self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion( [] ) - - def __str__( self ): - try: - return super(ParseElementEnhance,self).__str__() - except: - pass - - if self.strRepr is None and self.expr is not None: - self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) ) - return self.strRepr - - -class FollowedBy(ParseElementEnhance): - """Lookahead matching of the given parse expression. C{FollowedBy} - does *not* advance the parsing position within the input string, it only - verifies that the specified parse expression matches at the current - position. C{FollowedBy} always returns a null token list.""" - def __init__( self, expr ): - super(FollowedBy,self).__init__(expr) - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - self.expr.tryParse( instring, loc ) - return loc, [] - - -class NotAny(ParseElementEnhance): - """Lookahead to disallow matching with the given parse expression. C{NotAny} - does *not* advance the parsing position within the input string, it only - verifies that the specified parse expression does *not* match at the current - position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny} - always returns a null token list. May be constructed using the '~' operator.""" - def __init__( self, expr ): - super(NotAny,self).__init__(expr) - #~ self.leaveWhitespace() - self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs - self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, "+_ustr(self.expr) - #self.myException = ParseException("",0,self.errmsg,self) - - def parseImpl( self, instring, loc, doActions=True ): - try: - self.expr.tryParse( instring, loc ) - except (ParseException,IndexError): - pass - else: - #~ raise ParseException(instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - return loc, [] - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "~{" + _ustr(self.expr) + "}" - - return self.strRepr - - -class ZeroOrMore(ParseElementEnhance): - """Optional repetition of zero or more of the given expression.""" - def __init__( self, expr ): - super(ZeroOrMore,self).__init__(expr) - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - tokens = [] - try: - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) - hasIgnoreExprs = ( len(self.ignoreExprs) > 0 ) - while 1: - if hasIgnoreExprs: - preloc = self._skipIgnorables( instring, loc ) - else: - preloc = loc - loc, tmptokens = self.expr._parse( instring, preloc, doActions ) - if tmptokens or tmptokens.keys(): - tokens += tmptokens - except (ParseException,IndexError): - pass - - return loc, tokens - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]..." - - return self.strRepr - - def setResultsName( self, name, listAllMatches=False ): - ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches) - ret.saveAsList = True - return ret - - -class OneOrMore(ParseElementEnhance): - """Repetition of one or more of the given expression.""" - def parseImpl( self, instring, loc, doActions=True ): - # must be at least one - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) - try: - hasIgnoreExprs = ( len(self.ignoreExprs) > 0 ) - while 1: - if hasIgnoreExprs: - preloc = self._skipIgnorables( instring, loc ) - else: - preloc = loc - loc, tmptokens = self.expr._parse( instring, preloc, doActions ) - if tmptokens or tmptokens.keys(): - tokens += tmptokens - except (ParseException,IndexError): - pass - - return loc, tokens - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + _ustr(self.expr) + "}..." - - return self.strRepr - - def setResultsName( self, name, listAllMatches=False ): - ret = super(OneOrMore,self).setResultsName(name,listAllMatches) - ret.saveAsList = True - return ret - -class _NullToken(object): - def __bool__(self): - return False - __nonzero__ = __bool__ - def __str__(self): - return "" - -_optionalNotMatched = _NullToken() -class Optional(ParseElementEnhance): - """Optional matching of the given expression. - A default return string can also be specified, if the optional expression - is not found. - """ - def __init__( self, exprs, default=_optionalNotMatched ): - super(Optional,self).__init__( exprs, savelist=False ) - self.defaultValue = default - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - try: - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) - except (ParseException,IndexError): - if self.defaultValue is not _optionalNotMatched: - if self.expr.resultsName: - tokens = ParseResults([ self.defaultValue ]) - tokens[self.expr.resultsName] = self.defaultValue - else: - tokens = [ self.defaultValue ] - else: - tokens = [] - return loc, tokens - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]" - - return self.strRepr - - -class SkipTo(ParseElementEnhance): - """Token for skipping over all undefined text until the matched expression is found. - If C{include} is set to true, the matched expression is also parsed (the skipped text - and matched expression are returned as a 2-element list). The C{ignore} - argument is used to define grammars (typically quoted strings and comments) that - might contain false matches. - """ - def __init__( self, other, include=False, ignore=None, failOn=None ): - super( SkipTo, self ).__init__( other ) - self.ignoreExpr = ignore - self.mayReturnEmpty = True - self.mayIndexError = False - self.includeMatch = include - self.asList = False - if failOn is not None and isinstance(failOn, basestring): - self.failOn = Literal(failOn) - else: - self.failOn = failOn - self.errmsg = "No match found for "+_ustr(self.expr) - #self.myException = ParseException("",0,self.errmsg,self) - - def parseImpl( self, instring, loc, doActions=True ): - startLoc = loc - instrlen = len(instring) - expr = self.expr - failParse = False - while loc <= instrlen: - try: - if self.failOn: - try: - self.failOn.tryParse(instring, loc) - except ParseBaseException: - pass - else: - failParse = True - raise ParseException(instring, loc, "Found expression " + str(self.failOn)) - failParse = False - if self.ignoreExpr is not None: - while 1: - try: - loc = self.ignoreExpr.tryParse(instring,loc) - # print "found ignoreExpr, advance to", loc - except ParseBaseException: - break - expr._parse( instring, loc, doActions=False, callPreParse=False ) - skipText = instring[startLoc:loc] - if self.includeMatch: - loc,mat = expr._parse(instring,loc,doActions,callPreParse=False) - if mat: - skipRes = ParseResults( skipText ) - skipRes += mat - return loc, [ skipRes ] - else: - return loc, [ skipText ] - else: - return loc, [ skipText ] - except (ParseException,IndexError): - if failParse: - raise - else: - loc += 1 - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - -class Forward(ParseElementEnhance): - """Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator. - - Note: take care when assigning to C{Forward} not to overlook precedence of operators. - Specifically, '|' has a lower precedence than '<<', so that:: - fwdExpr << a | b | c - will actually be evaluated as:: - (fwdExpr << a) | b | c - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the C{Forward}:: - fwdExpr << (a | b | c) - """ - def __init__( self, other=None ): - super(Forward,self).__init__( other, savelist=False ) - - def __lshift__( self, other ): - if isinstance( other, basestring ): - other = Literal(other) - self.expr = other - self.mayReturnEmpty = other.mayReturnEmpty - self.strRepr = None - self.mayIndexError = self.expr.mayIndexError - self.mayReturnEmpty = self.expr.mayReturnEmpty - self.setWhitespaceChars( self.expr.whiteChars ) - self.skipWhitespace = self.expr.skipWhitespace - self.saveAsList = self.expr.saveAsList - self.ignoreExprs.extend(self.expr.ignoreExprs) - return None - - def leaveWhitespace( self ): - self.skipWhitespace = False - return self - - def streamline( self ): - if not self.streamlined: - self.streamlined = True - if self.expr is not None: - self.expr.streamline() - return self - - def validate( self, validateTrace=[] ): - if self not in validateTrace: - tmp = validateTrace[:]+[self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion([]) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - self._revertClass = self.__class__ - self.__class__ = _ForwardNoRecurse - try: - if self.expr is not None: - retString = _ustr(self.expr) - else: - retString = "None" - finally: - self.__class__ = self._revertClass - return self.__class__.__name__ + ": " + retString - - def copy(self): - if self.expr is not None: - return super(Forward,self).copy() - else: - ret = Forward() - ret << self - return ret - -class _ForwardNoRecurse(Forward): - def __str__( self ): - return "..." - -class TokenConverter(ParseElementEnhance): - """Abstract subclass of ParseExpression, for converting parsed results.""" - def __init__( self, expr, savelist=False ): - super(TokenConverter,self).__init__( expr )#, savelist ) - self.saveAsList = False - -class Upcase(TokenConverter): - """Converter to upper case all matching tokens.""" - def __init__(self, *args): - super(Upcase,self).__init__(*args) - warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead", - DeprecationWarning,stacklevel=2) - - def postParse( self, instring, loc, tokenlist ): - return list(map( string.upper, tokenlist )) - - -class Combine(TokenConverter): - """Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the input string; - this can be disabled by specifying C{'adjacent=False'} in the constructor. - """ - def __init__( self, expr, joinString="", adjacent=True ): - super(Combine,self).__init__( expr ) - # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself - if adjacent: - self.leaveWhitespace() - self.adjacent = adjacent - self.skipWhitespace = True - self.joinString = joinString - self.callPreparse = True - - def ignore( self, other ): - if self.adjacent: - ParserElement.ignore(self, other) - else: - super( Combine, self).ignore( other ) - return self - - def postParse( self, instring, loc, tokenlist ): - retToks = tokenlist.copy() - del retToks[:] - retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults) - - if self.resultsName and len(retToks.keys())>0: - return [ retToks ] - else: - return retToks - -class Group(TokenConverter): - """Converter to return the matched tokens as a list - useful for returning tokens of ZeroOrMore and OneOrMore expressions.""" - def __init__( self, expr ): - super(Group,self).__init__( expr ) - self.saveAsList = True - - def postParse( self, instring, loc, tokenlist ): - return [ tokenlist ] - -class Dict(TokenConverter): - """Converter to return a repetitive expression as a list, but also as a dictionary. - Each element can also be referenced using the first token in the expression as its key. - Useful for tabular report scraping when the first column can be used as a item key. - """ - def __init__( self, exprs ): - super(Dict,self).__init__( exprs ) - self.saveAsList = True - - def postParse( self, instring, loc, tokenlist ): - for i,tok in enumerate(tokenlist): - if len(tok) == 0: - continue - ikey = tok[0] - if isinstance(ikey,int): - ikey = _ustr(tok[0]).strip() - if len(tok)==1: - tokenlist[ikey] = _ParseResultsWithOffset("",i) - elif len(tok)==2 and not isinstance(tok[1],ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i) - else: - dictvalue = tok.copy() #ParseResults(i) - del dictvalue[0] - if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i) - else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i) - - if self.resultsName: - return [ tokenlist ] - else: - return tokenlist - - -class Suppress(TokenConverter): - """Converter for ignoring the results of a parsed expression.""" - def postParse( self, instring, loc, tokenlist ): - return [] - - def suppress( self ): - return self - - -class OnlyOnce(object): - """Wrapper for parse actions, to ensure they are only called once.""" - def __init__(self, methodCall): - self.callable = ParserElement._normalizeParseActionArgs(methodCall) - self.called = False - def __call__(self,s,l,t): - if not self.called: - results = self.callable(s,l,t) - self.called = True - return results - raise ParseException(s,l,"") - def reset(self): - self.called = False - -def traceParseAction(f): - """Decorator for debugging parse actions.""" - f = ParserElement._normalizeParseActionArgs(f) - def z(*paArgs): - thisFunc = f.func_name - s,l,t = paArgs[-3:] - if len(paArgs)>3: - thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc - sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) ) - try: - ret = f(*paArgs) - except Exception: - exc = sys.exc_info()[1] - sys.stderr.write( "<", "|".join( [ _escapeRegexChars(sym) for sym in symbols] )) - try: - if len(symbols)==len("".join(symbols)): - return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) ) - else: - return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) ) - except: - warnings.warn("Exception creating Regex for oneOf, building MatchFirst", - SyntaxWarning, stacklevel=2) - - - # last resort, just use MatchFirst - return MatchFirst( [ parseElementClass(sym) for sym in symbols ] ) - -def dictOf( key, value ): - """Helper to easily and clearly define a dictionary by specifying the respective patterns - for the key and value. Takes care of defining the C{Dict}, C{ZeroOrMore}, and C{Group} tokens - in the proper order. The key pattern can include delimiting markers or punctuation, - as long as they are suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the C{Dict} results can include named token - fields. - """ - return Dict( ZeroOrMore( Group ( key + value ) ) ) - -def originalTextFor(expr, asString=True): - """Helper to return the original, untokenized text for a given expression. Useful to - restore the parsed fields of an HTML start tag into the raw tag text itself, or to - revert separate tokens with intervening whitespace back to the original matching - input text. Simpler to use than the parse action C{keepOriginalText}, and does not - require the inspect module to chase up the call stack. By default, returns a - string containing the original parsed text. - - If the optional C{asString} argument is passed as False, then the return value is a - C{ParseResults} containing any results names that were originally matched, and a - single token containing the original matched text from the input string. So if - the expression passed to C{originalTextFor} contains expressions with defined - results names, you must set C{asString} to False if you want to preserve those - results name values.""" - locMarker = Empty().setParseAction(lambda s,loc,t: loc) - endlocMarker = locMarker.copy() - endlocMarker.callPreparse = False - matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") - if asString: - extractText = lambda s,l,t: s[t._original_start:t._original_end] - else: - def extractText(s,l,t): - del t[:] - t.insert(0, s[t._original_start:t._original_end]) - del t["_original_start"] - del t["_original_end"] - matchExpr.setParseAction(extractText) - return matchExpr - -# convenience constants for positional expressions -empty = Empty().setName("empty") -lineStart = LineStart().setName("lineStart") -lineEnd = LineEnd().setName("lineEnd") -stringStart = StringStart().setName("stringStart") -stringEnd = StringEnd().setName("stringEnd") - -_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1]) -_printables_less_backslash = "".join([ c for c in printables if c not in r"\]" ]) -_escapedHexChar = Combine( Suppress(_bslash + "0x") + Word(hexnums) ).setParseAction(lambda s,l,t:unichr(int(t[0],16))) -_escapedOctChar = Combine( Suppress(_bslash) + Word("0","01234567") ).setParseAction(lambda s,l,t:unichr(int(t[0],8))) -_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1) -_charRange = Group(_singleChar + Suppress("-") + _singleChar) -_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]" - -_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p) - -def srange(s): - r"""Helper to easily define string ranges for use in Word construction. Borrows - syntax from regexp '[]' string range definitions:: - srange("[0-9]") -> "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - The input string must be enclosed in []'s, and the returned string is the expanded - character set joined into a single string. - The values enclosed in the []'s may be:: - a single character - an escaped character with a leading backslash (such as \- or \]) - an escaped hex character with a leading '\0x' (\0x21, which is a '!' character) - an escaped octal character with a leading '\0' (\041, which is a '!' character) - a range of any of the above, separated by a dash ('a-z', etc.) - any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.) - """ - try: - return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body]) - except: - return "" - -def matchOnlyAtCol(n): - """Helper method for defining parse actions that require matching at a specific - column in the input text. - """ - def verifyCol(strg,locn,toks): - if col(locn,strg) != n: - raise ParseException(strg,locn,"matched token not at column %d" % n) - return verifyCol - -def replaceWith(replStr): - """Helper method for common parse actions that simply return a literal value. Especially - useful when used with C{transformString()}. - """ - def _replFunc(*args): - return [replStr] - return _replFunc - -def removeQuotes(s,l,t): - """Helper parse action for removing quotation marks from parsed quoted strings. - To use, add this parse action to quoted string using:: - quotedString.setParseAction( removeQuotes ) - """ - return t[0][1:-1] - -def upcaseTokens(s,l,t): - """Helper parse action to convert tokens to upper case.""" - return [ tt.upper() for tt in map(_ustr,t) ] - -def downcaseTokens(s,l,t): - """Helper parse action to convert tokens to lower case.""" - return [ tt.lower() for tt in map(_ustr,t) ] - -def keepOriginalText(s,startLoc,t): - """DEPRECATED - use new helper method C{originalTextFor}. - Helper parse action to preserve original parsed text, - overriding any nested parse actions.""" - try: - endloc = getTokensEndLoc() - except ParseException: - raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action") - del t[:] - t += ParseResults(s[startLoc:endloc]) - return t - -def getTokensEndLoc(): - """Method to be called from within a parse action to determine the end - location of the parsed tokens.""" - import inspect - fstack = inspect.stack() - try: - # search up the stack (through intervening argument normalizers) for correct calling routine - for f in fstack[2:]: - if f[3] == "_parseNoCache": - endloc = f[0].f_locals["loc"] - return endloc - else: - raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action") - finally: - del fstack - -def _makeTags(tagStr, xml): - """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr,basestring): - resname = tagStr - tagStr = Keyword(tagStr, caseless=not xml) - else: - resname = tagStr.name - - tagAttrName = Word(alphas,alphanums+"_-:") - if (xml): - tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes ) - openTag = Suppress("<") + tagStr("tag") + \ - Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \ - Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") - else: - printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] ) - tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack) - openTag = Suppress("<") + tagStr + \ - Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \ - Optional( Suppress("=") + tagAttrValue ) ))) + \ - Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") - closeTag = Combine(_L("") - - openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr) - closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("" % tagStr) - openTag.tag = resname - closeTag.tag = resname - return openTag, closeTag - -def makeHTMLTags(tagStr): - """Helper to construct opening and closing tag expressions for HTML, given a tag name""" - return _makeTags( tagStr, False ) - -def makeXMLTags(tagStr): - """Helper to construct opening and closing tag expressions for XML, given a tag name""" - return _makeTags( tagStr, True ) - -def withAttribute(*args,**attrDict): - """Helper to create a validating parse action to be used with start tags created - with makeXMLTags or makeHTMLTags. Use withAttribute to qualify a starting tag - with a required attribute value, to avoid false matches on common tags such as - or
. - - Call withAttribute with a series of attribute names and values. Specify the list - of filter attributes names and values as: - - keyword arguments, as in (class="Customer",align="right"), or - - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) - For attribute names with a namespace prefix, you must use the second form. Attribute - names are matched insensitive to upper/lower case. - - To verify that the attribute exists, but without specifying a value, pass - withAttribute.ANY_VALUE as the value. - """ - if args: - attrs = args[:] - else: - attrs = attrDict.items() - attrs = [(k,v) for k,v in attrs] - def pa(s,l,tokens): - for attrName,attrValue in attrs: - if attrName not in tokens: - raise ParseException(s,l,"no matching attribute " + attrName) - if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % - (attrName, tokens[attrName], attrValue)) - return pa -withAttribute.ANY_VALUE = object() - -opAssoc = _Constants() -opAssoc.LEFT = object() -opAssoc.RIGHT = object() - -def operatorPrecedence( baseExpr, opList ): - """Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary or - binary, left- or right-associative. Parse actions can also be attached - to operator expressions. - - Parameters: - - baseExpr - expression representing the most basic element for the nested - - opList - list of tuples, one for each operator precedence level in the - expression grammar; each tuple is of the form - (opExpr, numTerms, rightLeftAssoc, parseAction), where: - - opExpr is the pyparsing expression for the operator; - may also be a string, which will be converted to a Literal; - if numTerms is 3, opExpr is a tuple of two expressions, for the - two operators separating the 3 terms - - numTerms is the number of terms for this operator (must - be 1, 2, or 3) - - rightLeftAssoc is the indicator whether the operator is - right or left associative, using the pyparsing-defined - constants opAssoc.RIGHT and opAssoc.LEFT. - - parseAction is the parse action to be associated with - expressions matching this operator expression (the - parse action tuple member may be omitted) - """ - ret = Forward() - lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') ) - for i,operDef in enumerate(opList): - opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] - if arity == 3: - if opExpr is None or len(opExpr) != 2: - raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") - opExpr1, opExpr2 = opExpr - thisExpr = Forward()#.setName("expr%d" % i) - if rightLeftAssoc == opAssoc.LEFT: - if arity == 1: - matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) - elif arity == 2: - if opExpr is not None: - matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) - else: - matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) - elif arity == 3: - matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ - Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - elif rightLeftAssoc == opAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Optional): - opExpr = Optional(opExpr) - matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) - elif arity == 2: - if opExpr is not None: - matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) - else: - matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) - elif arity == 3: - matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ - Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - else: - raise ValueError("operator must indicate right or left associativity") - if pa: - matchExpr.setParseAction( pa ) - thisExpr << ( matchExpr | lastExpr ) - lastExpr = thisExpr - ret << lastExpr - return ret - -dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes") -sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes") -quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes") -unicodeString = Combine(_L('u') + quotedString.copy()) - -def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): - """Helper method for defining nested lists enclosed in opening and closing - delimiters ("(" and ")" are the default). - - Parameters: - - opener - opening character for a nested list (default="("); can also be a pyparsing expression - - closer - closing character for a nested list (default=")"); can also be a pyparsing expression - - content - expression for items within the nested lists (default=None) - - ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString) - - If an expression is not provided for the content argument, the nested - expression will capture all whitespace-delimited content between delimiters - as a list of separate values. - - Use the ignoreExpr argument to define expressions that may contain - opening or closing characters that should not be treated as opening - or closing characters for nesting, such as quotedString or a comment - expression. Specify multiple expressions using an Or or MatchFirst. - The default is quotedString, but if no expressions are to be ignored, - then pass None for this argument. - """ - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener,basestring) and isinstance(closer,basestring): - if len(opener) == 1 and len(closer)==1: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS - ).setParseAction(lambda t:t[0].strip())) - else: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - ~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - raise ValueError("opening and closing arguments must be strings if no content expression is given") - ret = Forward() - if ignoreExpr is not None: - ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) - else: - ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) - return ret - -def indentedBlock(blockStatementExpr, indentStack, indent=True): - """Helper method for defining space-delimited indentation blocks, such as - those used to define block statements in Python source code. - - Parameters: - - blockStatementExpr - expression defining syntax of statement that - is repeated within the indented block - - indentStack - list created by caller to manage indentation stack - (multiple statementWithIndentedBlock expressions within a single grammar - should share a common indentStack) - - indent - boolean indicating whether block must be indented beyond the - the current level; set to False for block of left-most statements - (default=True) - - A valid block must contain at least one blockStatement. - """ - def checkPeerIndent(s,l,t): - if l >= len(s): return - curCol = col(l,s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseFatalException(s,l,"illegal nesting") - raise ParseException(s,l,"not a peer entry") - - def checkSubIndent(s,l,t): - curCol = col(l,s) - if curCol > indentStack[-1]: - indentStack.append( curCol ) - else: - raise ParseException(s,l,"not a subentry") - - def checkUnindent(s,l,t): - if l >= len(s): return - curCol = col(l,s) - if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): - raise ParseException(s,l,"not an unindent") - indentStack.pop() - - NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) - INDENT = Empty() + Empty().setParseAction(checkSubIndent) - PEER = Empty().setParseAction(checkPeerIndent) - UNDENT = Empty().setParseAction(checkUnindent) - if indent: - smExpr = Group( Optional(NL) + - #~ FollowedBy(blockStatementExpr) + - INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) - else: - smExpr = Group( Optional(NL) + - (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:")) -commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline() -_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "')) -replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment") - -htmlComment = Regex(r"") -restOfLine = Regex(r".*").leaveWhitespace() -dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment") -cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?" + str(tokenlist)) - print ("tokens = " + str(tokens)) - print ("tokens.columns = " + str(tokens.columns)) - print ("tokens.tables = " + str(tokens.tables)) - print (tokens.asXML("SQL",True)) - except ParseBaseException: - err = sys.exc_info()[1] - print (teststring + "->") - print (err.line) - print (" "*(err.column-1) + "^") - print (err) - print() - - selectToken = CaselessLiteral( "select" ) - fromToken = CaselessLiteral( "from" ) - - ident = Word( alphas, alphanums + "_$" ) - columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens ) - columnNameList = Group( delimitedList( columnName ) )#.setName("columns") - tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens ) - tableNameList = Group( delimitedList( tableName ) )#.setName("tables") - simpleSQL = ( selectToken + \ - ( '*' | columnNameList ).setResultsName( "columns" ) + \ - fromToken + \ - tableNameList.setResultsName( "tables" ) ) - - test( "SELECT * from XYZZY, ABC" ) - test( "select * from SYS.XYZZY" ) - test( "Select A from Sys.dual" ) - test( "Select AA,BB,CC from Sys.dual" ) - test( "Select A, B, C from Sys.dual" ) - test( "Select A, B, C from Sys.dual" ) - test( "Xelect A, B, C from Sys.dual" ) - test( "Select A, B, C frox Sys.dual" ) - test( "Select" ) - test( "Select ^^^ frox Sys.dual" ) - test( "Select A, B, C from Sys.dual, Table2 " ) diff --git a/lib/matplotlib/pyparsing_py3.py b/lib/matplotlib/pyparsing_py3.py deleted file mode 100644 index c0e0af2e98bd..000000000000 --- a/lib/matplotlib/pyparsing_py3.py +++ /dev/null @@ -1,3682 +0,0 @@ -# module pyparsing.py -# -# Copyright (c) 2003-2010 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# -#from __future__ import generators - -__doc__ = \ -""" -pyparsing module - Classes and methods to define and execute parsing grammars - -The pyparsing module is an alternative approach to creating and executing simple grammars, -vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you -don't need to learn a new syntax for defining grammars or matching expressions - the parsing module -provides a library of classes that you use to construct the grammar directly in Python. - -Here is a program to parse "Hello, World!" (or any greeting of the form C{", !"}):: - - from pyparsing import Word, alphas - - # define grammar of a greeting - greet = Word( alphas ) + "," + Word( alphas ) + "!" - - hello = "Hello, World!" - print hello, "->", greet.parseString( hello ) - -The program outputs the following:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - -The Python representation of the grammar is quite readable, owing to the self-explanatory -class names, and the use of '+', '|' and '^' operators. - -The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an -object with named attributes. - -The pyparsing module handles some of the problems that are typically vexing when writing text parsers: - - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) - - quoted strings - - embedded comments -""" - -__version__ = "1.5.5" -__versionTime__ = "12 Aug 2010 03:56" -__author__ = "Paul McGuire " - -import string -from weakref import ref as wkref -import copy -import sys -import warnings -import re -import sre_constants -import collections -#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) - -__all__ = [ -'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', -'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', -'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', -'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', -'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', -'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase', -'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', -'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', -'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', -'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'getTokensEndLoc', 'hexnums', -'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno', -'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', -'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', -'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', -'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', -'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', -'indentedBlock', 'originalTextFor', -] - -""" -Detect if we are running version 3.X and make appropriate changes -Robert A. Clark -""" -_PY3K = sys.version_info[0] > 2 -if _PY3K: - _MAX_INT = sys.maxsize - basestring = str - unichr = chr - _ustr = str - alphas = string.ascii_lowercase + string.ascii_uppercase -else: - _MAX_INT = sys.maxint - range = xrange - set = lambda s : dict( [(c,0) for c in s] ) - alphas = string.lowercase + string.uppercase - - def _ustr(obj): - """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries - str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It - then < returns the unicode object | encodes it with the default encoding | ... >. - """ - if isinstance(obj,unicode): - return obj - - try: - # If this works, then _ustr(obj) has the same behaviour as str(obj), so - # it won't break any existing code. - return str(obj) - - except UnicodeEncodeError: - # The Python docs (http://docs.python.org/ref/customization.html#l2h-182) - # state that "The return value must be a string object". However, does a - # unicode object (being a subclass of basestring) count as a "string - # object"? - # If so, then return a unicode object: - return unicode(obj) - # Else encode it... but how? There are many choices... :) - # Replace unprintables with escape codes? - #return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors') - # Replace unprintables with question marks? - #return unicode(obj).encode(sys.getdefaultencoding(), 'replace') - # ... - - -# build list of single arg builtins, tolerant of Python version, that can be used as parse actions -singleArgBuiltins = [] -import builtins -for fname in "sum len enumerate sorted reversed list tuple set any all".split(): - try: - singleArgBuiltins.append(getattr(builtins,fname)) - except AttributeError: - continue - -def _xml_escape(data): - """Escape &, <, >, ", ', etc. in a string of data.""" - - # ampersand must be replaced first - for from_,to_ in zip('&><"\'', "amp gt lt quot apos".split()): - data = data.replace(from_, '&'+to_+';') - return data - -class _Constants(object): - pass - -nums = string.digits -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums -_bslash = chr(92) -printables = "".join( [ c for c in string.printable if c not in string.whitespace ] ) - -class ParseBaseException(Exception): - """base exception class for all parsing runtime exceptions""" - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( self, pstr, loc=0, msg=None, elem=None ): - self.loc = loc - if msg is None: - self.msg = pstr - self.pstr = "" - else: - self.msg = msg - self.pstr = pstr - self.parserElement = elem - - def __getattr__( self, aname ): - """supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - """ - if( aname == "lineno" ): - return lineno( self.loc, self.pstr ) - elif( aname in ("col", "column") ): - return col( self.loc, self.pstr ) - elif( aname == "line" ): - return line( self.loc, self.pstr ) - else: - raise AttributeError(aname) - - def __str__( self ): - return "%s (at char %d), (line:%d, col:%d)" % \ - ( self.msg, self.loc, self.lineno, self.column ) - def __repr__( self ): - return _ustr(self) - def markInputline( self, markerString = ">!<" ): - """Extracts the exception line from the input string, and marks - the location of the exception with a special symbol. - """ - line_str = self.line - line_column = self.column - 1 - if markerString: - line_str = "".join( [line_str[:line_column], - markerString, line_str[line_column:]]) - return line_str.strip() - def __dir__(self): - return "loc msg pstr parserElement lineno col line " \ - "markInputLine __str__ __repr__".split() - -class ParseException(ParseBaseException): - """exception thrown when parse expressions don't match class; - supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - """ - pass - -class ParseFatalException(ParseBaseException): - """user-throwable exception thrown when inconsistent parse content - is found; stops all parsing immediately""" - pass - -class ParseSyntaxException(ParseFatalException): - """just like C{ParseFatalException}, but thrown internally when an - C{ErrorStop} ('-' operator) indicates that parsing is to stop immediately because - an unbacktrackable syntax error has been found""" - def __init__(self, pe): - super(ParseSyntaxException, self).__init__( - pe.pstr, pe.loc, pe.msg, pe.parserElement) - -#~ class ReparseException(ParseBaseException): - #~ """Experimental class - parse actions can raise this exception to cause - #~ pyparsing to reparse the input string: - #~ - with a modified input string, and/or - #~ - with a modified start location - #~ Set the values of the ReparseException in the constructor, and raise the - #~ exception in a parse action to cause pyparsing to use the new string/location. - #~ Setting the values as None causes no change to be made. - #~ """ - #~ def __init_( self, newstring, restartLoc ): - #~ self.newParseText = newstring - #~ self.reparseLoc = restartLoc - -class RecursiveGrammarException(Exception): - """exception thrown by C{validate()} if the grammar could be improperly recursive""" - def __init__( self, parseElementList ): - self.parseElementTrace = parseElementList - - def __str__( self ): - return "RecursiveGrammarException: %s" % self.parseElementTrace - -class _ParseResultsWithOffset(object): - def __init__(self,p1,p2): - self.tup = (p1,p2) - def __getitem__(self,i): - return self.tup[i] - def __repr__(self): - return repr(self.tup) - def setOffset(self,i): - self.tup = (self.tup[0],i) - -class ParseResults(object): - """Structured parse results, to provide multiple means of access to the parsed data: - - as a list (C{len(results)}) - - by list index (C{results[0], results[1]}, etc.) - - by attribute (C{results.}) - """ - #~ __slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" ) - def __new__(cls, toklist, name=None, asList=True, modal=True ): - if isinstance(toklist, cls): - return toklist - retobj = object.__new__(cls) - retobj.__doinit = True - return retobj - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( self, toklist, name=None, asList=True, modal=True ): - if self.__doinit: - self.__doinit = False - self.__name = None - self.__parent = None - self.__accumNames = {} - if isinstance(toklist, list): - self.__toklist = toklist[:] - else: - self.__toklist = [toklist] - self.__tokdict = dict() - - if name is not None and name: - if not modal: - self.__accumNames[name] = 0 - if isinstance(name,int): - name = _ustr(name) # will always return a str, but use _ustr for consistency - self.__name = name - if not toklist in (None,'',[]): - if isinstance(toklist,basestring): - toklist = [ toklist ] - if asList: - if isinstance(toklist,ParseResults): - self[name] = _ParseResultsWithOffset(toklist.copy(),0) - else: - self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0) - self[name].__name = name - else: - try: - self[name] = toklist[0] - except (KeyError,TypeError,IndexError): - self[name] = toklist - - def __getitem__( self, i ): - if isinstance( i, (int,slice) ): - return self.__toklist[i] - else: - if i not in self.__accumNames: - return self.__tokdict[i][-1][0] - else: - return ParseResults([ v[0] for v in self.__tokdict[i] ]) - - def __setitem__( self, k, v ): - if isinstance(v,_ParseResultsWithOffset): - self.__tokdict[k] = self.__tokdict.get(k,list()) + [v] - sub = v[0] - elif isinstance(k,int): - self.__toklist[k] = v - sub = v - else: - self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)] - sub = v - if isinstance(sub,ParseResults): - sub.__parent = wkref(self) - - def __delitem__( self, i ): - if isinstance(i,(int,slice)): - mylen = len( self.__toklist ) - del self.__toklist[i] - - # convert int to slice - if isinstance(i, int): - if i < 0: - i += mylen - i = slice(i, i+1) - # get removed indices - removed = list(range(*i.indices(mylen))) - removed.reverse() - # fixup indices in token dictionary - for name in self.__tokdict: - occurrences = self.__tokdict[name] - for j in removed: - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) - else: - del self.__tokdict[i] - - def __contains__( self, k ): - return k in self.__tokdict - - def __len__( self ): return len( self.__toklist ) - def __bool__(self): return len( self.__toklist ) > 0 - __nonzero__ = __bool__ - def __iter__( self ): return iter( self.__toklist ) - def __reversed__( self ): return iter( reversed(self.__toklist) ) - def keys( self ): - """Returns all named result keys.""" - return self.__tokdict.keys() - - def pop( self, index=-1 ): - """Removes and returns item at specified index (default=last). - Will work with either numeric indices or dict-key indicies.""" - ret = self[index] - del self[index] - return ret - - def get(self, key, defaultValue=None): - """Returns named result matching the given key, or if there is no - such name, then returns the given C{defaultValue} or C{None} if no - C{defaultValue} is specified.""" - if key in self: - return self[key] - else: - return defaultValue - - def insert( self, index, insStr ): - """Inserts new element at location index in the list of parsed tokens.""" - self.__toklist.insert(index, insStr) - # fixup indices in token dictionary - for name in self.__tokdict: - occurrences = self.__tokdict[name] - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) - - def items( self ): - """Returns all named result keys and values as a list of tuples.""" - return [(k,self[k]) for k in self.__tokdict] - - def values( self ): - """Returns all named result values.""" - return [ v[-1][0] for v in self.__tokdict.values() ] - - def __getattr__( self, name ): - if True: #name not in self.__slots__: - if name in self.__tokdict: - if name not in self.__accumNames: - return self.__tokdict[name][-1][0] - else: - return ParseResults([ v[0] for v in self.__tokdict[name] ]) - else: - return "" - return None - - def __add__( self, other ): - ret = self.copy() - ret += other - return ret - - def __iadd__( self, other ): - if other.__tokdict: - offset = len(self.__toklist) - addoffset = ( lambda a: (a<0 and offset) or (a+offset) ) - otheritems = other.__tokdict.items() - otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) ) - for (k,vlist) in otheritems for v in vlist] - for k,v in otherdictitems: - self[k] = v - if isinstance(v[0],ParseResults): - v[0].__parent = wkref(self) - - self.__toklist += other.__toklist - self.__accumNames.update( other.__accumNames ) - return self - - def __radd__(self, other): - if isinstance(other,int) and other == 0: - return self.copy() - - def __repr__( self ): - return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) - - def __str__( self ): - out = "[" - sep = "" - for i in self.__toklist: - if isinstance(i, ParseResults): - out += sep + _ustr(i) - else: - out += sep + repr(i) - sep = ", " - out += "]" - return out - - def _asStringList( self, sep='' ): - out = [] - for item in self.__toklist: - if out and sep: - out.append(sep) - if isinstance( item, ParseResults ): - out += item._asStringList() - else: - out.append( _ustr(item) ) - return out - - def asList( self ): - """Returns the parse results as a nested list of matching tokens, all converted to strings.""" - out = [] - for res in self.__toklist: - if isinstance(res,ParseResults): - out.append( res.asList() ) - else: - out.append( res ) - return out - - def asDict( self ): - """Returns the named parse results as dictionary.""" - return dict( self.items() ) - - def copy( self ): - """Returns a new copy of a C{ParseResults} object.""" - ret = ParseResults( self.__toklist ) - ret.__tokdict = self.__tokdict.copy() - ret.__parent = self.__parent - ret.__accumNames.update( self.__accumNames ) - ret.__name = self.__name - return ret - - def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): - """Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.""" - nl = "\n" - out = [] - namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items() - for v in vlist ] ) - nextLevelIndent = indent + " " - - # collapse out indents if formatting is not desired - if not formatted: - indent = "" - nextLevelIndent = "" - nl = "" - - selfTag = None - if doctag is not None: - selfTag = doctag - else: - if self.__name: - selfTag = self.__name - - if not selfTag: - if namedItemsOnly: - return "" - else: - selfTag = "ITEM" - - out += [ nl, indent, "<", selfTag, ">" ] - - worklist = self.__toklist - for i,res in enumerate(worklist): - if isinstance(res,ParseResults): - if i in namedItems: - out += [ res.asXML(namedItems[i], - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - out += [ res.asXML(None, - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - # individual token, see if there is a name for it - resTag = None - if i in namedItems: - resTag = namedItems[i] - if not resTag: - if namedItemsOnly: - continue - else: - resTag = "ITEM" - xmlBodyText = _xml_escape(_ustr(res)) - out += [ nl, nextLevelIndent, "<", resTag, ">", - xmlBodyText, - "" ] - - out += [ nl, indent, "" ] - return "".join(out) - - def __lookup(self,sub): - for k,vlist in self.__tokdict.items(): - for v,loc in vlist: - if sub is v: - return k - return None - - def getName(self): - """Returns the results name for this token expression.""" - if self.__name: - return self.__name - elif self.__parent: - par = self.__parent() - if par: - return par.__lookup(self) - else: - return None - elif (len(self) == 1 and - len(self.__tokdict) == 1 and - self.__tokdict.values()[0][0][1] in (0,-1)): - return self.__tokdict.keys()[0] - else: - return None - - def dump(self,indent='',depth=0): - """Diagnostic method for listing out the contents of a C{ParseResults}. - Accepts an optional C{indent} argument so that this string can be embedded - in a nested display of other data.""" - out = [] - out.append( indent+_ustr(self.asList()) ) - keys = self.items() - keys.sort() - for k,v in keys: - if out: - out.append('\n') - out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) - if isinstance(v,ParseResults): - if v.keys(): - out.append( v.dump(indent,depth+1) ) - else: - out.append(_ustr(v)) - else: - out.append(_ustr(v)) - return "".join(out) - - # add support for pickle protocol - def __getstate__(self): - return ( self.__toklist, - ( self.__tokdict.copy(), - self.__parent is not None and self.__parent() or None, - self.__accumNames, - self.__name ) ) - - def __setstate__(self,state): - self.__toklist = state[0] - self.__tokdict, \ - par, \ - inAccumNames, \ - self.__name = state[1] - self.__accumNames = {} - self.__accumNames.update(inAccumNames) - if par is not None: - self.__parent = wkref(par) - else: - self.__parent = None - - def __dir__(self): - return dir(super(ParseResults,self)) + self.keys() - -collections.MutableMapping.register(ParseResults) - -def col (loc,strg): - """Returns current column within a string, counting newlines as line separators. - The first column is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{ParserElement.parseString}} for more information - on parsing strings containing s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ - return (loc} for more information - on parsing strings containing s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ - return strg.count("\n",0,loc) + 1 - -def line( loc, strg ): - """Returns the line of text containing loc within a string, counting newlines as line separators. - """ - lastCR = strg.rfind("\n", 0, loc) - nextCR = strg.find("\n", loc) - if nextCR >= 0: - return strg[lastCR+1:nextCR] - else: - return strg[lastCR+1:] - -def _defaultStartDebugAction( instring, loc, expr ): - print ("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )) - -def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ): - print ("Matched " + _ustr(expr) + " -> " + str(toks.asList())) - -def _defaultExceptionDebugAction( instring, loc, expr, exc ): - print ("Exception raised:" + _ustr(exc)) - -def nullDebugAction(*args): - """'Do-nothing' debug action, to suppress debugging output during parsing.""" - pass - -class ParserElement(object): - """Abstract base level parser element class.""" - DEFAULT_WHITE_CHARS = " \n\t\r" - verbose_stacktrace = False - - def setDefaultWhitespaceChars( chars ): - """Overrides the default whitespace chars - """ - ParserElement.DEFAULT_WHITE_CHARS = chars - setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars) - - def __init__( self, savelist=False ): - self.parseAction = list() - self.failAction = None - #~ self.name = "" # don't define self.name, let subclasses try/except upcall - self.strRepr = None - self.resultsName = None - self.saveAsList = savelist - self.skipWhitespace = True - self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS - self.copyDefaultWhiteChars = True - self.mayReturnEmpty = False # used when checking for left-recursion - self.keepTabs = False - self.ignoreExprs = list() - self.debug = False - self.streamlined = False - self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index - self.errmsg = "" - self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) - self.debugActions = ( None, None, None ) #custom debug actions - self.re = None - self.callPreparse = True # used to avoid redundant calls to preParse - self.callDuringTry = False - - def copy( self ): - """Make a copy of this C{ParserElement}. Useful for defining different parse actions - for the same parsing pattern, using copies of the original parse element.""" - cpy = copy.copy( self ) - cpy.parseAction = self.parseAction[:] - cpy.ignoreExprs = self.ignoreExprs[:] - if self.copyDefaultWhiteChars: - cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS - return cpy - - def setName( self, name ): - """Define name for this expression, for use in debugging.""" - self.name = name - self.errmsg = "Expected " + self.name - if hasattr(self,"exception"): - self.exception.msg = self.errmsg - return self - - def setResultsName( self, name, listAllMatches=False ): - """Define name for referencing matching tokens as a nested attribute - of the returned parse results. - NOTE: this returns a *copy* of the original C{ParserElement} object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - - You can also set results names using the abbreviated syntax, - C{expr("name")} in place of C{expr.setResultsName("name")} - - see L{I{__call__}<__call__>}. - """ - newself = self.copy() - newself.resultsName = name - newself.modalResults = not listAllMatches - return newself - - def setBreak(self,breakFlag = True): - """Method to invoke the Python pdb debugger when this element is - about to be parsed. Set C{breakFlag} to True to enable, False to - disable. - """ - if breakFlag: - _parseMethod = self._parse - def breaker(instring, loc, doActions=True, callPreParse=True): - import pdb - pdb.set_trace() - return _parseMethod( instring, loc, doActions, callPreParse ) - breaker._originalParseMethod = _parseMethod - self._parse = breaker - else: - if hasattr(self._parse,"_originalParseMethod"): - self._parse = self._parse._originalParseMethod - return self - - def _normalizeParseActionArgs( f ): - """Internal method used to decorate parse actions that take fewer than 3 arguments, - so that all parse actions can be called as C{f(s,l,t)}.""" - STAR_ARGS = 4 - - # special handling for single-argument builtins - if (f in singleArgBuiltins): - numargs = 1 - else: - try: - restore = None - if isinstance(f,type): - restore = f - f = f.__init__ - if not _PY3K: - codeObj = f.func_code - else: - codeObj = f.code - if codeObj.co_flags & STAR_ARGS: - return f - numargs = codeObj.co_argcount - if not _PY3K: - if hasattr(f,"im_self"): - numargs -= 1 - else: - if hasattr(f,"__self__"): - numargs -= 1 - if restore: - f = restore - except AttributeError: - try: - if not _PY3K: - call_im_func_code = f.__call__.im_func.func_code - else: - call_im_func_code = f.__code__ - - # not a function, must be a callable object, get info from the - # im_func binding of its bound __call__ method - if call_im_func_code.co_flags & STAR_ARGS: - return f - numargs = call_im_func_code.co_argcount - if not _PY3K: - if hasattr(f.__call__,"im_self"): - numargs -= 1 - else: - if hasattr(f.__call__,"__self__"): - numargs -= 0 - except AttributeError: - if not _PY3K: - call_func_code = f.__call__.func_code - else: - call_func_code = f.__call__.__code__ - # not a bound method, get info directly from __call__ method - if call_func_code.co_flags & STAR_ARGS: - return f - numargs = call_func_code.co_argcount - if not _PY3K: - if hasattr(f.__call__,"im_self"): - numargs -= 1 - else: - if hasattr(f.__call__,"__self__"): - numargs -= 1 - - - # print ("adding function %s with %d args" % (f.func_name,numargs)) - if numargs == 3: - return f - else: - if numargs > 3: - def tmp(s,l,t): - return f(s,l,t) - elif numargs == 2: - def tmp(s,l,t): - return f(l,t) - elif numargs == 1: - def tmp(s,l,t): - return f(t) - else: #~ numargs == 0: - def tmp(s,l,t): - return f() - try: - tmp.__name__ = f.__name__ - except (AttributeError,TypeError): - # no need for special handling if attribute doesnt exist - pass - try: - tmp.__doc__ = f.__doc__ - except (AttributeError,TypeError): - # no need for special handling if attribute doesnt exist - pass - try: - tmp.__dict__.update(f.__dict__) - except (AttributeError,TypeError): - # no need for special handling if attribute doesnt exist - pass - return tmp - _normalizeParseActionArgs = staticmethod(_normalizeParseActionArgs) - - def setParseAction( self, *fns, **kwargs ): - """Define action to perform when successfully matching parse element definition. - Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)}, - C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where: - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a ParseResults object - If the functions in fns modify the tokens, they can return them as the return - value from fn, and the modified list of tokens will replace the original. - Otherwise, fn does not need to return any value. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{parseString}} for more information - on parsing strings containing s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ - self.parseAction = list(map(self._normalizeParseActionArgs, list(fns))) - self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"]) - return self - - def addParseAction( self, *fns, **kwargs ): - """Add parse action to expression's list of parse actions. See L{I{setParseAction}}.""" - self.parseAction += list(map(self._normalizeParseActionArgs, list(fns))) - self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"]) - return self - - def setFailAction( self, fn ): - """Define action to perform if parsing fails at this expression. - Fail acton fn is a callable function that takes the arguments - C{fn(s,loc,expr,err)} where: - - s = string being parsed - - loc = location where expression match was attempted and failed - - expr = the parse expression that failed - - err = the exception thrown - The function returns no value. It may throw C{ParseFatalException} - if it is desired to stop parsing immediately.""" - self.failAction = fn - return self - - def _skipIgnorables( self, instring, loc ): - exprsFound = True - while exprsFound: - exprsFound = False - for e in self.ignoreExprs: - try: - while 1: - loc,dummy = e._parse( instring, loc ) - exprsFound = True - except ParseException: - pass - return loc - - def preParse( self, instring, loc ): - if self.ignoreExprs: - loc = self._skipIgnorables( instring, loc ) - - if self.skipWhitespace: - wt = self.whiteChars - instrlen = len(instring) - while loc < instrlen and instring[loc] in wt: - loc += 1 - - return loc - - def parseImpl( self, instring, loc, doActions=True ): - return loc, [] - - def postParse( self, instring, loc, tokenlist ): - return tokenlist - - #~ @profile - def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ): - debugging = ( self.debug ) #and doActions ) - - if debugging or self.failAction: - #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )) - if (self.debugActions[0] ): - self.debugActions[0]( instring, loc, self ) - if callPreParse and self.callPreparse: - preloc = self.preParse( instring, loc ) - else: - preloc = loc - tokensStart = preloc - try: - try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) - except ParseBaseException as err: - #~ print ("Exception raised:", err) - if self.debugActions[2]: - self.debugActions[2]( instring, tokensStart, self, err ) - if self.failAction: - self.failAction( instring, tokensStart, self, err ) - raise - else: - if callPreParse and self.callPreparse: - preloc = self.preParse( instring, loc ) - else: - preloc = loc - tokensStart = preloc - if self.mayIndexError or loc >= len(instring): - try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) - else: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - - tokens = self.postParse( instring, loc, tokens ) - - retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults ) - if self.parseAction and (doActions or self.callDuringTry): - if debugging: - try: - for fn in self.parseAction: - tokens = fn( instring, tokensStart, retTokens ) - if tokens is not None: - retTokens = ParseResults( tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) - except ParseBaseException as err: - #~ print "Exception raised in user parse action:", err - if (self.debugActions[2] ): - self.debugActions[2]( instring, tokensStart, self, err ) - raise - else: - for fn in self.parseAction: - tokens = fn( instring, tokensStart, retTokens ) - if tokens is not None: - retTokens = ParseResults( tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) - - if debugging: - #~ print ("Matched",self,"->",retTokens.asList()) - if (self.debugActions[1] ): - self.debugActions[1]( instring, tokensStart, loc, self, retTokens ) - - return loc, retTokens - - def tryParse( self, instring, loc ): - try: - return self._parse( instring, loc, doActions=False )[0] - except ParseFatalException: - raise ParseException( instring, loc, self.errmsg, self) - - # this method gets repeatedly called during backtracking with the same arguments - - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): - lookup = (self,instring,loc,callPreParse,doActions) - if lookup in ParserElement._exprArgCache: - value = ParserElement._exprArgCache[ lookup ] - if isinstance(value, Exception): - raise value - return value - else: - try: - value = self._parseNoCache( instring, loc, doActions, callPreParse ) - ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy()) - return value - except ParseBaseException as err: - err.__traceback__ = None - ParserElement._exprArgCache[ lookup ] = err - raise - - _parse = _parseNoCache - - # argument cache for optimizing repeated calls when backtracking through recursive expressions - _exprArgCache = {} - def resetCache(): - ParserElement._exprArgCache.clear() - resetCache = staticmethod(resetCache) - - _packratEnabled = False - def enablePackrat(): - """Enables "packrat" parsing, which adds memoizing to the parsing logic. - Repeated parse attempts at the same string location (which happens - often in many complex grammars) can immediately return a cached value, - instead of re-executing parsing/validating code. Memoizing is done of - both valid results and parsing exceptions. - - This speedup may break existing programs that use parse actions that - have side-effects. For this reason, packrat parsing is disabled when - you first import pyparsing. To activate the packrat feature, your - program must call the class method C{ParserElement.enablePackrat()}. If - your program uses C{psyco} to "compile as you go", you must call - C{enablePackrat} before calling C{psyco.full()}. If you do not do this, - Python will crash. For best results, call C{enablePackrat()} immediately - after importing pyparsing. - """ - if not ParserElement._packratEnabled: - ParserElement._packratEnabled = True - ParserElement._parse = ParserElement._parseCache - enablePackrat = staticmethod(enablePackrat) - - def parseString( self, instring, parseAll=False ): - """Execute the parse expression with the given string. - This is the main interface to the client code, once the complete - expression has been built. - - If you want the grammar to require that the entire input string be - successfully parsed, then set C{parseAll} to True (equivalent to ending - the grammar with C{StringEnd()}). - - Note: C{parseString} implicitly calls C{expandtabs()} on the input string, - in order to report proper column numbers in parse actions. - If the input string contains tabs and - the grammar uses parse actions that use the C{loc} argument to index into the - string being parsed, you can ensure you have a consistent view of the input - string by: - - calling C{parseWithTabs} on your grammar before calling C{parseString} - (see L{I{parseWithTabs}}) - - define your parse action using the full C{(s,loc,toks)} signature, and - reference the input string using the parse action's C{s} argument - - explictly expand the tabs in your input string before calling - C{parseString} - """ - ParserElement.resetCache() - if not self.streamlined: - self.streamline() - #~ self.saveAsList = True - for e in self.ignoreExprs: - e.streamline() - if not self.keepTabs: - instring = instring.expandtabs() - try: - loc, tokens = self._parse( instring, 0 ) - if parseAll: - #loc = self.preParse( instring, loc ) - se = StringEnd() - se._parse( instring, loc ) - except ParseBaseException as err: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise err - else: - return tokens - - def scanString( self, instring, maxMatches=_MAX_INT ): - """Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - C{maxMatches} argument, to clip scanning after 'n' matches are found. - - Note that the start and end locations are reported relative to the string - being parsed. See L{I{parseString}} for more information on parsing - strings with embedded tabs.""" - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - - if not self.keepTabs: - instring = _ustr(instring).expandtabs() - instrlen = len(instring) - loc = 0 - preparseFn = self.preParse - parseFn = self._parse - ParserElement.resetCache() - matches = 0 - try: - while loc <= instrlen and matches < maxMatches: - try: - preloc = preparseFn( instring, loc ) - nextLoc,tokens = parseFn( instring, preloc, callPreParse=False ) - except ParseException: - loc = preloc+1 - else: - if nextLoc > loc: - matches += 1 - yield tokens, preloc, nextLoc - loc = nextLoc - else: - loc = preloc+1 - except ParseBaseException as err: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise err - - def transformString( self, instring ): - """Extension to C{scanString}, to modify matching text with modified tokens that may - be returned from a parse action. To use C{transformString}, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking C{transformString()} on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. C{transformString()} returns the resulting transformed string.""" - out = [] - lastE = 0 - # force preservation of s, to minimize unwanted transformation of string, and to - # keep string locs straight between transformString and scanString - self.keepTabs = True - try: - for t,s,e in self.scanString( instring ): - out.append( instring[lastE:s] ) - if t: - if isinstance(t,ParseResults): - out += t.asList() - elif isinstance(t,list): - out += t - else: - out.append(t) - lastE = e - out.append(instring[lastE:]) - return "".join(map(_ustr,out)) - except ParseBaseException as err: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise err - - def searchString( self, instring, maxMatches=_MAX_INT ): - """Another extension to C{scanString}, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - C{maxMatches} argument, to clip searching after 'n' matches are found. - """ - try: - return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) - except ParseBaseException as err: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise err - - def __add__(self, other ): - """Implementation of + operator - returns And""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return And( [ self, other ] ) - - def __radd__(self, other ): - """Implementation of + operator when left operand is not a C{ParserElement}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other + self - - def __sub__(self, other): - """Implementation of - operator, returns C{And} with error stop""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return And( [ self, And._ErrorStop(), other ] ) - - def __rsub__(self, other ): - """Implementation of - operator when left operand is not a C{ParserElement}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other - self - - def __mul__(self,other): - """Implementation of * operator, allows use of C{expr * 3} in place of - C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer - tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples - may also include C{None} as in: - - C{expr*(n,None)} or C{expr*(n,)} is equivalent - to C{expr*n + ZeroOrMore(expr)} - (read as "at least n instances of C{expr}") - - C{expr*(None,n)} is equivalent to C{expr*(0,n)} - (read as "0 to n instances of C{expr}") - - C{expr*(None,None)} is equivalent to C{ZeroOrMore(expr)} - - C{expr*(1,None)} is equivalent to C{OneOrMore(expr)} - - Note that C{expr*(None,n)} does not raise an exception if - more than n exprs exist in the input stream; that is, - C{expr*(None,n)} does not enforce a maximum number of expr - occurrences. If this behavior is desired, then write - C{expr*(None,n) + ~expr} - - """ - if isinstance(other,int): - minElements, optElements = other,0 - elif isinstance(other,tuple): - other = (other + (None, None))[:2] - if other[0] is None: - other = (0, other[1]) - if isinstance(other[0],int) and other[1] is None: - if other[0] == 0: - return ZeroOrMore(self) - if other[0] == 1: - return OneOrMore(self) - else: - return self*other[0] + ZeroOrMore(self) - elif isinstance(other[0],int) and isinstance(other[1],int): - minElements, optElements = other - optElements -= minElements - else: - raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1])) - else: - raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) - - if minElements < 0: - raise ValueError("cannot multiply ParserElement by negative value") - if optElements < 0: - raise ValueError("second tuple value must be greater or equal to first tuple value") - if minElements == optElements == 0: - raise ValueError("cannot multiply ParserElement by 0 or (0,0)") - - if (optElements): - def makeOptionalList(n): - if n>1: - return Optional(self + makeOptionalList(n-1)) - else: - return Optional(self) - if minElements: - if minElements == 1: - ret = self + makeOptionalList(optElements) - else: - ret = And([self]*minElements) + makeOptionalList(optElements) - else: - ret = makeOptionalList(optElements) - else: - if minElements == 1: - ret = self - else: - ret = And([self]*minElements) - return ret - - def __rmul__(self, other): - return self.__mul__(other) - - def __or__(self, other ): - """Implementation of | operator - returns C{MatchFirst}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return MatchFirst( [ self, other ] ) - - def __ror__(self, other ): - """Implementation of | operator when left operand is not a C{ParserElement}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other | self - - def __xor__(self, other ): - """Implementation of ^ operator - returns C{Or}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Or( [ self, other ] ) - - def __rxor__(self, other ): - """Implementation of ^ operator when left operand is not a C{ParserElement}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other ^ self - - def __and__(self, other ): - """Implementation of & operator - returns C{Each}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Each( [ self, other ] ) - - def __rand__(self, other ): - """Implementation of & operator when left operand is not a C{ParserElement}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other & self - - def __invert__( self ): - """Implementation of ~ operator - returns C{NotAny}""" - return NotAny( self ) - - def __call__(self, name): - """Shortcut for C{setResultsName}, with C{listAllMatches=default}:: - userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") - could be written as:: - userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") - """ - return self.setResultsName(name) - - def suppress( self ): - """Suppresses the output of this C{ParserElement}; useful to keep punctuation from - cluttering up returned output. - """ - return Suppress( self ) - - def leaveWhitespace( self ): - """Disables the skipping of whitespace before matching the characters in the - C{ParserElement}'s defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. - """ - self.skipWhitespace = False - return self - - def setWhitespaceChars( self, chars ): - """Overrides the default whitespace chars - """ - self.skipWhitespace = True - self.whiteChars = chars - self.copyDefaultWhiteChars = False - return self - - def parseWithTabs( self ): - """Overrides default behavior to expand s to spaces before parsing the input string. - Must be called before C{parseString} when the input grammar contains elements that - match characters.""" - self.keepTabs = True - return self - - def ignore( self, other ): - """Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. - """ - if isinstance( other, Suppress ): - if other not in self.ignoreExprs: - self.ignoreExprs.append( other.copy() ) - else: - self.ignoreExprs.append( Suppress( other.copy() ) ) - return self - - def setDebugActions( self, startAction, successAction, exceptionAction ): - """Enable display of debugging messages while doing pattern matching.""" - self.debugActions = (startAction or _defaultStartDebugAction, - successAction or _defaultSuccessDebugAction, - exceptionAction or _defaultExceptionDebugAction) - self.debug = True - return self - - def setDebug( self, flag=True ): - """Enable display of debugging messages while doing pattern matching. - Set C{flag} to True to enable, False to disable.""" - if flag: - self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) - else: - self.debug = False - return self - - def __str__( self ): - return self.name - - def __repr__( self ): - return _ustr(self) - - def streamline( self ): - self.streamlined = True - self.strRepr = None - return self - - def checkRecursion( self, parseElementList ): - pass - - def validate( self, validateTrace=[] ): - """Check defined expressions for valid structure, check for infinite recursive definitions.""" - self.checkRecursion( [] ) - - def parseFile( self, file_or_filename, parseAll=False ): - """Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. - """ - try: - file_contents = file_or_filename.read() - except AttributeError: - f = open(file_or_filename, "rb") - file_contents = f.read() - f.close() - try: - return self.parseString(file_contents, parseAll) - except ParseBaseException as err: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise err - - def __eq__(self,other): - if isinstance(other, ParserElement): - return self is other or self.__dict__ == other.__dict__ - elif isinstance(other, basestring): - try: - self.parseString(_ustr(other), parseAll=True) - return True - except ParseBaseException: - return False - else: - return super(ParserElement,self)==other - - def __ne__(self,other): - return not (self == other) - - def __hash__(self): - return hash(id(self)) - - def __req__(self,other): - return self == other - - def __rne__(self,other): - return not (self == other) - - -class Token(ParserElement): - """Abstract C{ParserElement} subclass, for defining atomic matching patterns.""" - def __init__( self ): - super(Token,self).__init__( savelist=False ) - #self.myException = ParseException("",0,"",self) - - def setName(self, name): - s = super(Token,self).setName(name) - self.errmsg = "Expected " + self.name - #s.myException.msg = self.errmsg - return s - - -class Empty(Token): - """An empty token, will always match.""" - def __init__( self ): - super(Empty,self).__init__() - self.name = "Empty" - self.mayReturnEmpty = True - self.mayIndexError = False - - -class NoMatch(Token): - """A token that will never match.""" - def __init__( self ): - super(NoMatch,self).__init__() - self.name = "NoMatch" - self.mayReturnEmpty = True - self.mayIndexError = False - self.errmsg = "Unmatchable token" - #self.myException.msg = self.errmsg - - def parseImpl( self, instring, loc, doActions=True ): - raise ParseException(instring, loc, self.errmsg, self) - - -class Literal(Token): - """Token to exactly match a specified string.""" - def __init__( self, matchString ): - super(Literal,self).__init__() - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Literal; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.__class__ = Empty - self.name = '"%s"' % _ustr(self.match) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - #self.myException.msg = self.errmsg - self.mayIndexError = False - - # Performance tuning: this routine gets called a *lot* - # if this is a single character match string and the first character matches, - # short-circuit as quickly as possible, and avoid calling startswith - #~ @profile - def parseImpl( self, instring, loc, doActions=True ): - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) ): - return loc+self.matchLen, self.match - raise ParseException( instring, loc, self.errmsg, self ) -_L = Literal - -class Keyword(Token): - """Token to exactly match a specified string as a keyword, that is, it must be - immediately followed by a non-keyword character. Compare with C{Literal}:: - Literal("if") will match the leading 'if' in 'ifAndOnlyIf'. - Keyword("if") will not; it will only match the leading 'if in 'if x=1', or 'if(y==2)' - Accepts two optional constructor arguments in addition to the keyword string: - C{identChars} is a string of characters that would be valid identifier characters, - defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive - matching, default is False. - """ - DEFAULT_KEYWORD_CHARS = alphanums+"_$" - - def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ): - super(Keyword,self).__init__() - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Keyword; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.name = '"%s"' % self.match - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - #self.myException.msg = self.errmsg - self.mayIndexError = False - self.caseless = caseless - if caseless: - self.caselessmatch = matchString.upper() - identChars = identChars.upper() - self.identChars = set(identChars) - - def parseImpl( self, instring, loc, doActions=True ): - if self.caseless: - if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and - (loc == 0 or instring[loc-1].upper() not in self.identChars) ): - return loc+self.matchLen, self.match - else: - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and - (loc == 0 or instring[loc-1] not in self.identChars) ): - return loc+self.matchLen, self.match - raise ParseException( instring, loc, self.errmsg, self ) - - def copy(self): - c = super(Keyword,self).copy() - c.identChars = Keyword.DEFAULT_KEYWORD_CHARS - return c - - def setDefaultKeywordChars( chars ): - """Overrides the default Keyword chars - """ - Keyword.DEFAULT_KEYWORD_CHARS = chars - setDefaultKeywordChars = staticmethod(setDefaultKeywordChars) - -class CaselessLiteral(Literal): - """Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. - """ - def __init__( self, matchString ): - super(CaselessLiteral,self).__init__( matchString.upper() ) - # Preserve the defining literal. - self.returnString = matchString - self.name = "'%s'" % self.returnString - self.errmsg = "Expected " + self.name - #self.myException.msg = self.errmsg - - def parseImpl( self, instring, loc, doActions=True ): - if instring[ loc:loc+self.matchLen ].upper() == self.match: - return loc+self.matchLen, self.returnString - raise ParseException( instring, loc, self.errmsg, self ) - -class CaselessKeyword(Keyword): - def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ): - super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) - - def parseImpl( self, instring, loc, doActions=True ): - if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ): - return loc+self.matchLen, self.match - raise ParseException( instring, loc, self.errmsg, self ) - -class Word(Token): - """Token for matching words composed of allowed character sets. - Defined with string containing all allowed initial characters, - an optional string containing allowed body characters (if omitted, - defaults to the initial character set), and an optional minimum, - maximum, and/or exact length. The default value for C{min} is 1 (a - minimum value < 1 is not valid); the default values for C{max} and C{exact} - are 0, meaning no maximum or exact length restriction. - """ - def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False ): - super(Word,self).__init__() - self.initCharsOrig = initChars - self.initChars = set(initChars) - if bodyChars : - self.bodyCharsOrig = bodyChars - self.bodyChars = set(bodyChars) - else: - self.bodyCharsOrig = initChars - self.bodyChars = set(initChars) - - self.maxSpecified = max > 0 - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - #self.myException.msg = self.errmsg - self.mayIndexError = False - self.asKeyword = asKeyword - - if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0): - if self.bodyCharsOrig == self.initCharsOrig: - self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) - elif len(self.bodyCharsOrig) == 1: - self.reString = "%s[%s]*" % \ - (re.escape(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - else: - self.reString = "[%s][%s]*" % \ - (_escapeRegexRangeChars(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - if self.asKeyword: - self.reString = r"\b"+self.reString+r"\b" - try: - self.re = re.compile( self.reString ) - except: - self.re = None - - def parseImpl( self, instring, loc, doActions=True ): - if self.re: - result = self.re.match(instring,loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - return loc,result.group() - - if not(instring[ loc ] in self.initChars): - raise ParseException( instring, loc, self.errmsg, self ) - start = loc - loc += 1 - instrlen = len(instring) - bodychars = self.bodyChars - maxloc = start + self.maxLen - maxloc = min( maxloc, instrlen ) - while loc < maxloc and instring[loc] in bodychars: - loc += 1 - - throwException = False - if loc - start < self.minLen: - throwException = True - if self.maxSpecified and loc < instrlen and instring[loc] in bodychars: - throwException = True - if self.asKeyword: - if (start>0 and instring[start-1] in bodychars) or (loc4: - return s[:4]+"..." - else: - return s - - if ( self.initCharsOrig != self.bodyCharsOrig ): - self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) ) - else: - self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) - - return self.strRepr - - -class Regex(Token): - """Token for matching strings that match a given regular expression. - Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. - """ - compiledREtype = type(re.compile("[A-Z]")) - def __init__( self, pattern, flags=0): - """The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags.""" - super(Regex,self).__init__() - - if isinstance(pattern, basestring): - if len(pattern) == 0: - warnings.warn("null string passed to Regex; use Empty() instead", - SyntaxWarning, stacklevel=2) - - self.pattern = pattern - self.flags = flags - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % pattern, - SyntaxWarning, stacklevel=2) - raise - - elif isinstance(pattern, Regex.compiledREtype): - self.re = pattern - self.pattern = \ - self.reString = str(pattern) - self.flags = flags - - else: - raise ValueError("Regex may only be constructed with a string or a compiled RE object") - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - #self.myException.msg = self.errmsg - self.mayIndexError = False - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - result = self.re.match(instring,loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - d = result.groupdict() - ret = ParseResults(result.group()) - if d: - for k in d: - ret[k] = d[k] - return loc,ret - - def __str__( self ): - try: - return super(Regex,self).__str__() - except: - pass - - if self.strRepr is None: - self.strRepr = "Re:(%s)" % repr(self.pattern) - - return self.strRepr - - -class QuotedString(Token): - """Token for matching strings that are delimited by quoting characters. - """ - def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None): - """ - Defined with the following parameters: - - quoteChar - string of one or more characters defining the quote delimiting string - - escChar - character to escape quotes, typically backslash (default=None) - - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None) - - multiline - boolean indicating whether quotes can span multiple lines (default=False) - - unquoteResults - boolean indicating whether the matched text should be unquoted (default=True) - - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar) - """ - super(QuotedString,self).__init__() - - # remove white space from quote chars - wont work anyway - quoteChar = quoteChar.strip() - if len(quoteChar) == 0: - warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) - raise SyntaxError() - - if endQuoteChar is None: - endQuoteChar = quoteChar - else: - endQuoteChar = endQuoteChar.strip() - if len(endQuoteChar) == 0: - warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) - raise SyntaxError() - - self.quoteChar = quoteChar - self.quoteCharLen = len(quoteChar) - self.firstQuoteChar = quoteChar[0] - self.endQuoteChar = endQuoteChar - self.endQuoteCharLen = len(endQuoteChar) - self.escChar = escChar - self.escQuote = escQuote - self.unquoteResults = unquoteResults - - if multiline: - self.flags = re.MULTILINE | re.DOTALL - self.pattern = r'%s(?:[^%s%s]' % \ - ( re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) - else: - self.flags = 0 - self.pattern = r'%s(?:[^%s\n\r%s]' % \ - ( re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) - if len(self.endQuoteChar) > 1: - self.pattern += ( - '|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]), - _escapeRegexRangeChars(self.endQuoteChar[i])) - for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')' - ) - if escQuote: - self.pattern += (r'|(?:%s)' % re.escape(escQuote)) - if escChar: - self.pattern += (r'|(?:%s.)' % re.escape(escChar)) - self.escCharReplacePattern = re.escape(self.escChar)+"(.)" - self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, - SyntaxWarning, stacklevel=2) - raise - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - #self.myException.msg = self.errmsg - self.mayIndexError = False - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.group() - - if self.unquoteResults: - - # strip off quotes - ret = ret[self.quoteCharLen:-self.endQuoteCharLen] - - if isinstance(ret,basestring): - # replace escaped characters - if self.escChar: - ret = re.sub(self.escCharReplacePattern,"\g<1>",ret) - - # replace escaped quotes - if self.escQuote: - ret = ret.replace(self.escQuote, self.endQuoteChar) - - return loc, ret - - def __str__( self ): - try: - return super(QuotedString,self).__str__() - except: - pass - - if self.strRepr is None: - self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar) - - return self.strRepr - - -class CharsNotIn(Token): - """Token for matching words composed of characters *not* in a given set. - Defined with string containing all disallowed characters, and an optional - minimum, maximum, and/or exact length. The default value for C{min} is 1 (a - minimum value < 1 is not valid); the default values for C{max} and C{exact} - are 0, meaning no maximum or exact length restriction. - """ - def __init__( self, notChars, min=1, max=0, exact=0 ): - super(CharsNotIn,self).__init__() - self.skipWhitespace = False - self.notChars = notChars - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = ( self.minLen == 0 ) - #self.myException.msg = self.errmsg - self.mayIndexError = False - - def parseImpl( self, instring, loc, doActions=True ): - if instring[loc] in self.notChars: - raise ParseException( instring, loc, self.errmsg, self ) - - start = loc - loc += 1 - notchars = self.notChars - maxlen = min( start+self.maxLen, len(instring) ) - while loc < maxlen and \ - (instring[loc] not in notchars): - loc += 1 - - if loc - start < self.minLen: - raise ParseException( instring, loc, self.errmsg, self ) - - return loc, instring[start:loc] - - def __str__( self ): - try: - return super(CharsNotIn, self).__str__() - except: - pass - - if self.strRepr is None: - if len(self.notChars) > 4: - self.strRepr = "!W:(%s...)" % self.notChars[:4] - else: - self.strRepr = "!W:(%s)" % self.notChars - - return self.strRepr - -class White(Token): - """Special matching class for matching whitespace. Normally, whitespace is ignored - by pyparsing grammars. This class is included when some whitespace structures - are significant. Define with a string containing the whitespace characters to be - matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments, - as defined for the C{Word} class.""" - whiteStrs = { - " " : "", - "\t": "", - "\n": "", - "\r": "", - "\f": "", - } - def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): - super(White,self).__init__() - self.matchWhite = ws - self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) ) - #~ self.leaveWhitespace() - self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite])) - self.mayReturnEmpty = True - self.errmsg = "Expected " + self.name - #self.myException.msg = self.errmsg - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - def parseImpl( self, instring, loc, doActions=True ): - if not(instring[ loc ] in self.matchWhite): - raise ParseException( instring, loc, self.errmsg, self ) - start = loc - loc += 1 - maxloc = start + self.maxLen - maxloc = min( maxloc, len(instring) ) - while loc < maxloc and instring[loc] in self.matchWhite: - loc += 1 - - if loc - start < self.minLen: - raise ParseException( instring, loc, self.errmsg, self ) - - return loc, instring[start:loc] - - -class _PositionToken(Token): - def __init__( self ): - super(_PositionToken,self).__init__() - self.name=self.__class__.__name__ - self.mayReturnEmpty = True - self.mayIndexError = False - -class GoToColumn(_PositionToken): - """Token to advance to a specific column of input text; useful for tabular report scraping.""" - def __init__( self, colno ): - super(GoToColumn,self).__init__() - self.col = colno - - def preParse( self, instring, loc ): - if col(loc,instring) != self.col: - instrlen = len(instring) - if self.ignoreExprs: - loc = self._skipIgnorables( instring, loc ) - while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col : - loc += 1 - return loc - - def parseImpl( self, instring, loc, doActions=True ): - thiscol = col( loc, instring ) - if thiscol > self.col: - raise ParseException( instring, loc, "Text not in expected column", self ) - newloc = loc + self.col - thiscol - ret = instring[ loc: newloc ] - return newloc, ret - -class LineStart(_PositionToken): - """Matches if current position is at the beginning of a line within the parse string""" - def __init__( self ): - super(LineStart,self).__init__() - self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) - self.errmsg = "Expected start of line" - #self.myException.msg = self.errmsg - - def preParse( self, instring, loc ): - preloc = super(LineStart,self).preParse(instring,loc) - if instring[preloc] == "\n": - loc += 1 - return loc - - def parseImpl( self, instring, loc, doActions=True ): - if not( loc==0 or - (loc == self.preParse( instring, 0 )) or - (instring[loc-1] == "\n") ): #col(loc, instring) != 1: - raise ParseException( instring, loc, self.errmsg, self ) - return loc, [] - -class LineEnd(_PositionToken): - """Matches if current position is at the end of a line within the parse string""" - def __init__( self ): - super(LineEnd,self).__init__() - self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) - self.errmsg = "Expected end of line" - #self.myException.msg = self.errmsg - - def parseImpl( self, instring, loc, doActions=True ): - if loc len(instring): - return loc, [] - else: - raise ParseException( instring, loc, self.errmsg, self ) - -class WordStart(_PositionToken): - """Matches if the current position is at the beginning of a Word, and - is not preceded by any character in a given set of wordChars - (default=C{printables}). To emulate the C{\b} behavior of regular expressions, - use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of - the string being parsed, or at the beginning of a line. - """ - def __init__(self, wordChars = printables): - super(WordStart,self).__init__() - self.wordChars = set(wordChars) - self.errmsg = "Not at the start of a word" - - def parseImpl(self, instring, loc, doActions=True ): - if loc != 0: - if (instring[loc-1] in self.wordChars or - instring[loc] not in self.wordChars): - raise ParseException( instring, loc, self.errmsg, self ) - return loc, [] - -class WordEnd(_PositionToken): - """Matches if the current position is at the end of a Word, and - is not followed by any character in a given set of wordChars - (default=C{printables}). To emulate the C{\b} behavior of regular expressions, - use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of - the string being parsed, or at the end of a line. - """ - def __init__(self, wordChars = printables): - super(WordEnd,self).__init__() - self.wordChars = set(wordChars) - self.skipWhitespace = False - self.errmsg = "Not at the end of a word" - - def parseImpl(self, instring, loc, doActions=True ): - instrlen = len(instring) - if instrlen>0 and loc maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring,len(instring),e.errmsg,self) - maxExcLoc = len(instring) - else: - if loc2 > maxMatchLoc: - maxMatchLoc = loc2 - maxMatchExp = e - - if maxMatchLoc < 0: - if maxException is not None: - maxException.__traceback__ = None - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - return maxMatchExp._parse( instring, loc, doActions ) - - def __ixor__(self, other ): - if isinstance( other, basestring ): - other = Literal( other ) - return self.append( other ) #Or( [ self, other ] ) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class MatchFirst(ParseExpression): - """Requires that at least one C{ParseExpression} is found. - If two expressions match, the first one listed is the one that will match. - May be constructed using the '|' operator. - """ - def __init__( self, exprs, savelist = False ): - super(MatchFirst,self).__init__(exprs, savelist) - if exprs: - self.mayReturnEmpty = False - for e in self.exprs: - if e.mayReturnEmpty: - self.mayReturnEmpty = True - break - else: - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - maxExcLoc = -1 - maxException = None - for e in self.exprs: - try: - ret = e._parse( instring, loc, doActions ) - return ret - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring,len(instring),e.errmsg,self) - maxExcLoc = len(instring) - - # only got here if no expression matched, raise exception for match that made it the furthest - else: - if maxException is not None: - maxException.__traceback__ = None - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - def __ior__(self, other ): - if isinstance( other, basestring ): - other = Literal( other ) - return self.append( other ) #MatchFirst( [ self, other ] ) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class Each(ParseExpression): - """Requires all given C{ParseExpressions} to be found, but in any order. - Expressions may be separated by whitespace. - May be constructed using the '&' operator. - """ - def __init__( self, exprs, savelist = True ): - super(Each,self).__init__(exprs, savelist) - self.mayReturnEmpty = True - for e in self.exprs: - if not e.mayReturnEmpty: - self.mayReturnEmpty = False - break - self.skipWhitespace = True - self.initExprGroups = True - - def parseImpl( self, instring, loc, doActions=True ): - if self.initExprGroups: - opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ] - opt2 = [ e for e in self.exprs if e.mayReturnEmpty and e not in opt1 ] - self.optionals = opt1 + opt2 - self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ] - self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ] - self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ] - self.required += self.multirequired - self.initExprGroups = False - tmpLoc = loc - tmpReqd = self.required[:] - tmpOpt = self.optionals[:] - matchOrder = [] - - keepMatching = True - while keepMatching: - tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired - failed = [] - for e in tmpExprs: - try: - tmpLoc = e.tryParse( instring, tmpLoc ) - except ParseException: - failed.append(e) - else: - matchOrder.append(e) - if e in tmpReqd: - tmpReqd.remove(e) - elif e in tmpOpt: - tmpOpt.remove(e) - if len(failed) == len(tmpExprs): - keepMatching = False - - if tmpReqd: - missing = ", ".join( [ _ustr(e) for e in tmpReqd ] ) - raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing ) - - # add any unmatched Optionals, in case they have default values defined - matchOrder += list(e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt) - - resultlist = [] - for e in matchOrder: - loc,results = e._parse(instring,loc,doActions) - resultlist.append(results) - - finalResults = ParseResults([]) - for r in resultlist: - dups = {} - for k in r.keys(): - if k in finalResults.keys(): - tmp = ParseResults(finalResults[k]) - tmp += ParseResults(r[k]) - dups[k] = tmp - finalResults += ParseResults(r) - for k,v in dups.items(): - finalResults[k] = v - return loc, finalResults - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class ParseElementEnhance(ParserElement): - """Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.""" - def __init__( self, expr, savelist=False ): - super(ParseElementEnhance,self).__init__(savelist) - if isinstance( expr, basestring ): - expr = Literal(expr) - self.expr = expr - self.strRepr = None - if expr is not None: - self.mayIndexError = expr.mayIndexError - self.mayReturnEmpty = expr.mayReturnEmpty - self.setWhitespaceChars( expr.whiteChars ) - self.skipWhitespace = expr.skipWhitespace - self.saveAsList = expr.saveAsList - self.callPreparse = expr.callPreparse - self.ignoreExprs.extend(expr.ignoreExprs) - - def parseImpl( self, instring, loc, doActions=True ): - if self.expr is not None: - return self.expr._parse( instring, loc, doActions, callPreParse=False ) - else: - raise ParseException("",loc,self.errmsg,self) - - def leaveWhitespace( self ): - self.skipWhitespace = False - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.leaveWhitespace() - return self - - def ignore( self, other ): - if isinstance( other, Suppress ): - if other not in self.ignoreExprs: - super( ParseElementEnhance, self).ignore( other ) - if self.expr is not None: - self.expr.ignore( self.ignoreExprs[-1] ) - else: - super( ParseElementEnhance, self).ignore( other ) - if self.expr is not None: - self.expr.ignore( self.ignoreExprs[-1] ) - return self - - def streamline( self ): - super(ParseElementEnhance,self).streamline() - if self.expr is not None: - self.expr.streamline() - return self - - def checkRecursion( self, parseElementList ): - if self in parseElementList: - raise RecursiveGrammarException( parseElementList+[self] ) - subRecCheckList = parseElementList[:] + [ self ] - if self.expr is not None: - self.expr.checkRecursion( subRecCheckList ) - - def validate( self, validateTrace=[] ): - tmp = validateTrace[:]+[self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion( [] ) - - def __str__( self ): - try: - return super(ParseElementEnhance,self).__str__() - except: - pass - - if self.strRepr is None and self.expr is not None: - self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) ) - return self.strRepr - - -class FollowedBy(ParseElementEnhance): - """Lookahead matching of the given parse expression. C{FollowedBy} - does *not* advance the parsing position within the input string, it only - verifies that the specified parse expression matches at the current - position. C{FollowedBy} always returns a null token list.""" - def __init__( self, expr ): - super(FollowedBy,self).__init__(expr) - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - self.expr.tryParse( instring, loc ) - return loc, [] - - -class NotAny(ParseElementEnhance): - """Lookahead to disallow matching with the given parse expression. C{NotAny} - does *not* advance the parsing position within the input string, it only - verifies that the specified parse expression does *not* match at the current - position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny} - always returns a null token list. May be constructed using the '~' operator.""" - def __init__( self, expr ): - super(NotAny,self).__init__(expr) - #~ self.leaveWhitespace() - self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs - self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, "+_ustr(self.expr) - #self.myException = ParseException("",0,self.errmsg,self) - - def parseImpl( self, instring, loc, doActions=True ): - try: - self.expr.tryParse( instring, loc ) - except (ParseException,IndexError): - pass - else: - raise ParseException( instring, loc, self.errmsg, self ) - return loc, [] - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "~{" + _ustr(self.expr) + "}" - - return self.strRepr - - -class ZeroOrMore(ParseElementEnhance): - """Optional repetition of zero or more of the given expression.""" - def __init__( self, expr ): - super(ZeroOrMore,self).__init__(expr) - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - tokens = [] - try: - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) - hasIgnoreExprs = ( len(self.ignoreExprs) > 0 ) - while 1: - if hasIgnoreExprs: - preloc = self._skipIgnorables( instring, loc ) - else: - preloc = loc - loc, tmptokens = self.expr._parse( instring, preloc, doActions ) - if tmptokens or tmptokens.keys(): - tokens += tmptokens - except (ParseException,IndexError): - pass - - return loc, tokens - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]..." - - return self.strRepr - - def setResultsName( self, name, listAllMatches=False ): - ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches) - ret.saveAsList = True - return ret - - -class OneOrMore(ParseElementEnhance): - """Repetition of one or more of the given expression.""" - def parseImpl( self, instring, loc, doActions=True ): - # must be at least one - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) - try: - hasIgnoreExprs = ( len(self.ignoreExprs) > 0 ) - while 1: - if hasIgnoreExprs: - preloc = self._skipIgnorables( instring, loc ) - else: - preloc = loc - loc, tmptokens = self.expr._parse( instring, preloc, doActions ) - if tmptokens or tmptokens.keys(): - tokens += tmptokens - except (ParseException,IndexError): - pass - - return loc, tokens - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + _ustr(self.expr) + "}..." - - return self.strRepr - - def setResultsName( self, name, listAllMatches=False ): - ret = super(OneOrMore,self).setResultsName(name,listAllMatches) - ret.saveAsList = True - return ret - -class _NullToken(object): - def __bool__(self): - return False - __nonzero__ = __bool__ - def __str__(self): - return "" - -_optionalNotMatched = _NullToken() -class Optional(ParseElementEnhance): - """Optional matching of the given expression. - A default return string can also be specified, if the optional expression - is not found. - """ - def __init__( self, exprs, default=_optionalNotMatched ): - super(Optional,self).__init__( exprs, savelist=False ) - self.defaultValue = default - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - try: - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) - except (ParseException,IndexError): - if self.defaultValue is not _optionalNotMatched: - if self.expr.resultsName: - tokens = ParseResults([ self.defaultValue ]) - tokens[self.expr.resultsName] = self.defaultValue - else: - tokens = [ self.defaultValue ] - else: - tokens = [] - return loc, tokens - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]" - - return self.strRepr - - -class SkipTo(ParseElementEnhance): - """Token for skipping over all undefined text until the matched expression is found. - If C{include} is set to true, the matched expression is also parsed (the skipped text - and matched expression are returned as a 2-element list). The C{ignore} - argument is used to define grammars (typically quoted strings and comments) that - might contain false matches. - """ - def __init__( self, other, include=False, ignore=None, failOn=None ): - super( SkipTo, self ).__init__( other ) - self.ignoreExpr = ignore - self.mayReturnEmpty = True - self.mayIndexError = False - self.includeMatch = include - self.asList = False - if failOn is not None and isinstance(failOn, basestring): - self.failOn = Literal(failOn) - else: - self.failOn = failOn - self.errmsg = "No match found for "+_ustr(self.expr) - #self.myException = ParseException("",0,self.errmsg,self) - - def parseImpl( self, instring, loc, doActions=True ): - startLoc = loc - instrlen = len(instring) - expr = self.expr - failParse = False - while loc <= instrlen: - try: - if self.failOn: - try: - self.failOn.tryParse(instring, loc) - except ParseBaseException: - pass - else: - failParse = True - raise ParseException(instring, loc, "Found expression " + str(self.failOn)) - failParse = False - if self.ignoreExpr is not None: - while 1: - try: - loc = self.ignoreExpr.tryParse(instring,loc) - # print("found ignoreExpr, advance to", loc) - except ParseBaseException: - break - expr._parse( instring, loc, doActions=False, callPreParse=False ) - skipText = instring[startLoc:loc] - if self.includeMatch: - loc,mat = expr._parse(instring,loc,doActions,callPreParse=False) - if mat: - skipRes = ParseResults( skipText ) - skipRes += mat - return loc, [ skipRes ] - else: - return loc, [ skipText ] - else: - return loc, [ skipText ] - except (ParseException,IndexError): - if failParse: - raise - else: - loc += 1 - raise ParseException( instring, loc, self.errmsg, self ) - -class Forward(ParseElementEnhance): - """Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator. - - Note: take care when assigning to C{Forward} not to overlook precedence of operators. - Specifically, '|' has a lower precedence than '<<', so that:: - fwdExpr << a | b | c - will actually be evaluated as:: - (fwdExpr << a) | b | c - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the C{Forward}:: - fwdExpr << (a | b | c) - """ - def __init__( self, other=None ): - super(Forward,self).__init__( other, savelist=False ) - - def __lshift__( self, other ): - if isinstance( other, basestring ): - other = Literal(other) - self.expr = other - self.mayReturnEmpty = other.mayReturnEmpty - self.strRepr = None - self.mayIndexError = self.expr.mayIndexError - self.mayReturnEmpty = self.expr.mayReturnEmpty - self.setWhitespaceChars( self.expr.whiteChars ) - self.skipWhitespace = self.expr.skipWhitespace - self.saveAsList = self.expr.saveAsList - self.ignoreExprs.extend(self.expr.ignoreExprs) - return None - - def leaveWhitespace( self ): - self.skipWhitespace = False - return self - - def streamline( self ): - if not self.streamlined: - self.streamlined = True - if self.expr is not None: - self.expr.streamline() - return self - - def validate( self, validateTrace=[] ): - if self not in validateTrace: - tmp = validateTrace[:]+[self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion([]) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - self._revertClass = self.__class__ - self.__class__ = _ForwardNoRecurse - try: - if self.expr is not None: - retString = _ustr(self.expr) - else: - retString = "None" - finally: - self.__class__ = self._revertClass - return self.__class__.__name__ + ": " + retString - - def copy(self): - if self.expr is not None: - return super(Forward,self).copy() - else: - ret = Forward() - ret << self - return ret - -class _ForwardNoRecurse(Forward): - def __str__( self ): - return "..." - -class TokenConverter(ParseElementEnhance): - """Abstract subclass of ParseExpression, for converting parsed results.""" - def __init__( self, expr, savelist=False ): - super(TokenConverter,self).__init__( expr )#, savelist ) - self.saveAsList = False - -class Upcase(TokenConverter): - """Converter to upper case all matching tokens.""" - def __init__(self, *args): - super(Upcase,self).__init__(*args) - warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead", - DeprecationWarning,stacklevel=2) - - def postParse( self, instring, loc, tokenlist ): - return list(map( string.upper, tokenlist )) - - -class Combine(TokenConverter): - """Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the input string; - this can be disabled by specifying C{'adjacent=False'} in the constructor. - """ - def __init__( self, expr, joinString="", adjacent=True ): - super(Combine,self).__init__( expr ) - # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself - if adjacent: - self.leaveWhitespace() - self.adjacent = adjacent - self.skipWhitespace = True - self.joinString = joinString - self.callPreparse = True - - def ignore( self, other ): - if self.adjacent: - ParserElement.ignore(self, other) - else: - super( Combine, self).ignore( other ) - return self - - def postParse( self, instring, loc, tokenlist ): - retToks = tokenlist.copy() - del retToks[:] - retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults) - - if self.resultsName and len(retToks.keys())>0: - return [ retToks ] - else: - return retToks - -class Group(TokenConverter): - """Converter to return the matched tokens as a list - useful for returning tokens of ZeroOrMore and OneOrMore expressions.""" - def __init__( self, expr ): - super(Group,self).__init__( expr ) - self.saveAsList = True - - def postParse( self, instring, loc, tokenlist ): - return [ tokenlist ] - -class Dict(TokenConverter): - """Converter to return a repetitive expression as a list, but also as a dictionary. - Each element can also be referenced using the first token in the expression as its key. - Useful for tabular report scraping when the first column can be used as a item key. - """ - def __init__( self, exprs ): - super(Dict,self).__init__( exprs ) - self.saveAsList = True - - def postParse( self, instring, loc, tokenlist ): - for i,tok in enumerate(tokenlist): - if len(tok) == 0: - continue - ikey = tok[0] - if isinstance(ikey,int): - ikey = _ustr(tok[0]).strip() - if len(tok)==1: - tokenlist[ikey] = _ParseResultsWithOffset("",i) - elif len(tok)==2 and not isinstance(tok[1],ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i) - else: - dictvalue = tok.copy() #ParseResults(i) - del dictvalue[0] - if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i) - else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i) - - if self.resultsName: - return [ tokenlist ] - else: - return tokenlist - - -class Suppress(TokenConverter): - """Converter for ignoring the results of a parsed expression.""" - def postParse( self, instring, loc, tokenlist ): - return [] - - def suppress( self ): - return self - - -class OnlyOnce(object): - """Wrapper for parse actions, to ensure they are only called once.""" - def __init__(self, methodCall): - self.callable = ParserElement._normalizeParseActionArgs(methodCall) - self.called = False - def __call__(self,s,l,t): - if not self.called: - results = self.callable(s,l,t) - self.called = True - return results - raise ParseException(s,l,"") - def reset(self): - self.called = False - -def traceParseAction(f): - """Decorator for debugging parse actions.""" - f = ParserElement._normalizeParseActionArgs(f) - def z(*paArgs): - thisFunc = f.func_name - s,l,t = paArgs[-3:] - if len(paArgs)>3: - thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc - sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) ) - try: - ret = f(*paArgs) - except Exception as exc: - sys.stderr.write( "<", "|".join( [ _escapeRegexChars(sym) for sym in symbols] )) - try: - if len(symbols)==len("".join(symbols)): - return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) ) - else: - return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) ) - except: - warnings.warn("Exception creating Regex for oneOf, building MatchFirst", - SyntaxWarning, stacklevel=2) - - - # last resort, just use MatchFirst - return MatchFirst( [ parseElementClass(sym) for sym in symbols ] ) - -def dictOf( key, value ): - """Helper to easily and clearly define a dictionary by specifying the respective patterns - for the key and value. Takes care of defining the C{Dict}, C{ZeroOrMore}, and C{Group} tokens - in the proper order. The key pattern can include delimiting markers or punctuation, - as long as they are suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the C{Dict} results can include named token - fields. - """ - return Dict( ZeroOrMore( Group ( key + value ) ) ) - -def originalTextFor(expr, asString=True): - """Helper to return the original, untokenized text for a given expression. Useful to - restore the parsed fields of an HTML start tag into the raw tag text itself, or to - revert separate tokens with intervening whitespace back to the original matching - input text. Simpler to use than the parse action C{keepOriginalText}, and does not - require the inspect module to chase up the call stack. By default, returns a - string containing the original parsed text. - - If the optional C{asString} argument is passed as False, then the return value is a - C{ParseResults} containing any results names that were originally matched, and a - single token containing the original matched text from the input string. So if - the expression passed to C{originalTextFor} contains expressions with defined - results names, you must set C{asString} to False if you want to preserve those - results name values.""" - locMarker = Empty().setParseAction(lambda s,loc,t: loc) - endlocMarker = locMarker.copy() - endlocMarker.callPreparse = False - matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") - if asString: - extractText = lambda s,l,t: s[t._original_start:t._original_end] - else: - def extractText(s,l,t): - del t[:] - t.insert(0, s[t._original_start:t._original_end]) - del t["_original_start"] - del t["_original_end"] - matchExpr.setParseAction(extractText) - return matchExpr - -# convenience constants for positional expressions -empty = Empty().setName("empty") -lineStart = LineStart().setName("lineStart") -lineEnd = LineEnd().setName("lineEnd") -stringStart = StringStart().setName("stringStart") -stringEnd = StringEnd().setName("stringEnd") - -_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1]) -_printables_less_backslash = "".join([ c for c in printables if c not in r"\]" ]) -_escapedHexChar = Combine( Suppress(_bslash + "0x") + Word(hexnums) ).setParseAction(lambda s,l,t:unichr(int(t[0],16))) -_escapedOctChar = Combine( Suppress(_bslash) + Word("0","01234567") ).setParseAction(lambda s,l,t:unichr(int(t[0],8))) -_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1) -_charRange = Group(_singleChar + Suppress("-") + _singleChar) -_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]" - -_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p) - -def srange(s): - r"""Helper to easily define string ranges for use in Word construction. Borrows - syntax from regexp '[]' string range definitions:: - srange("[0-9]") -> "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - The input string must be enclosed in []'s, and the returned string is the expanded - character set joined into a single string. - The values enclosed in the []'s may be:: - a single character - an escaped character with a leading backslash (such as \- or \]) - an escaped hex character with a leading '\0x' (\0x21, which is a '!' character) - an escaped octal character with a leading '\0' (\041, which is a '!' character) - a range of any of the above, separated by a dash ('a-z', etc.) - any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.) - """ - try: - return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body]) - except: - return "" - -def matchOnlyAtCol(n): - """Helper method for defining parse actions that require matching at a specific - column in the input text. - """ - def verifyCol(strg,locn,toks): - if col(locn,strg) != n: - raise ParseException(strg,locn,"matched token not at column %d" % n) - return verifyCol - -def replaceWith(replStr): - """Helper method for common parse actions that simply return a literal value. Especially - useful when used with C{transformString()}. - """ - def _replFunc(*args): - return [replStr] - return _replFunc - -def removeQuotes(s,l,t): - """Helper parse action for removing quotation marks from parsed quoted strings. - To use, add this parse action to quoted string using:: - quotedString.setParseAction( removeQuotes ) - """ - return t[0][1:-1] - -def upcaseTokens(s,l,t): - """Helper parse action to convert tokens to upper case.""" - return [ tt.upper() for tt in map(_ustr,t) ] - -def downcaseTokens(s,l,t): - """Helper parse action to convert tokens to lower case.""" - return [ tt.lower() for tt in map(_ustr,t) ] - -def keepOriginalText(s,startLoc,t): - """DEPRECATED - use new helper method C{originalTextFor}. - Helper parse action to preserve original parsed text, - overriding any nested parse actions.""" - try: - endloc = getTokensEndLoc() - except ParseException: - raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action") - del t[:] - t += ParseResults(s[startLoc:endloc]) - return t - -def getTokensEndLoc(): - """Method to be called from within a parse action to determine the end - location of the parsed tokens.""" - import inspect - fstack = inspect.stack() - try: - # search up the stack (through intervening argument normalizers) for correct calling routine - for f in fstack[2:]: - if f[3] == "_parseNoCache": - endloc = f[0].f_locals["loc"] - return endloc - else: - raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action") - finally: - del fstack - -def _makeTags(tagStr, xml): - """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr,basestring): - resname = tagStr - tagStr = Keyword(tagStr, caseless=not xml) - else: - resname = tagStr.name - - tagAttrName = Word(alphas,alphanums+"_-:") - if (xml): - tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes ) - openTag = Suppress("<") + tagStr("tag") + \ - Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \ - Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") - else: - printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] ) - tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack) - openTag = Suppress("<") + tagStr + \ - Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \ - Optional( Suppress("=") + tagAttrValue ) ))) + \ - Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") - closeTag = Combine(_L("") - - openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr) - closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("" % tagStr) - openTag.tag = resname - closeTag.tag = resname - return openTag, closeTag - -def makeHTMLTags(tagStr): - """Helper to construct opening and closing tag expressions for HTML, given a tag name""" - return _makeTags( tagStr, False ) - -def makeXMLTags(tagStr): - """Helper to construct opening and closing tag expressions for XML, given a tag name""" - return _makeTags( tagStr, True ) - -def withAttribute(*args,**attrDict): - """Helper to create a validating parse action to be used with start tags created - with makeXMLTags or makeHTMLTags. Use withAttribute to qualify a starting tag - with a required attribute value, to avoid false matches on common tags such as - or
. - - Call withAttribute with a series of attribute names and values. Specify the list - of filter attributes names and values as: - - keyword arguments, as in (class="Customer",align="right"), or - - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) - For attribute names with a namespace prefix, you must use the second form. Attribute - names are matched insensitive to upper/lower case. - - To verify that the attribute exists, but without specifying a value, pass - withAttribute.ANY_VALUE as the value. - """ - if args: - attrs = args[:] - else: - attrs = attrDict.items() - attrs = [(k,v) for k,v in attrs] - def pa(s,l,tokens): - for attrName,attrValue in attrs: - if attrName not in tokens: - raise ParseException(s,l,"no matching attribute " + attrName) - if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % - (attrName, tokens[attrName], attrValue)) - return pa -withAttribute.ANY_VALUE = object() - -opAssoc = _Constants() -opAssoc.LEFT = object() -opAssoc.RIGHT = object() - -def operatorPrecedence( baseExpr, opList ): - """Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary or - binary, left- or right-associative. Parse actions can also be attached - to operator expressions. - - Parameters: - - baseExpr - expression representing the most basic element for the nested - - opList - list of tuples, one for each operator precedence level in the - expression grammar; each tuple is of the form - (opExpr, numTerms, rightLeftAssoc, parseAction), where: - - opExpr is the pyparsing expression for the operator; - may also be a string, which will be converted to a Literal; - if numTerms is 3, opExpr is a tuple of two expressions, for the - two operators separating the 3 terms - - numTerms is the number of terms for this operator (must - be 1, 2, or 3) - - rightLeftAssoc is the indicator whether the operator is - right or left associative, using the pyparsing-defined - constants opAssoc.RIGHT and opAssoc.LEFT. - - parseAction is the parse action to be associated with - expressions matching this operator expression (the - parse action tuple member may be omitted) - """ - ret = Forward() - lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') ) - for i,operDef in enumerate(opList): - opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] - if arity == 3: - if opExpr is None or len(opExpr) != 2: - raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") - opExpr1, opExpr2 = opExpr - thisExpr = Forward()#.setName("expr%d" % i) - if rightLeftAssoc == opAssoc.LEFT: - if arity == 1: - matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) - elif arity == 2: - if opExpr is not None: - matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) - else: - matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) - elif arity == 3: - matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ - Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - elif rightLeftAssoc == opAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Optional): - opExpr = Optional(opExpr) - matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) - elif arity == 2: - if opExpr is not None: - matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) - else: - matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) - elif arity == 3: - matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ - Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - else: - raise ValueError("operator must indicate right or left associativity") - if pa: - matchExpr.setParseAction( pa ) - thisExpr << ( matchExpr | lastExpr ) - lastExpr = thisExpr - ret << lastExpr - return ret - -dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes") -sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes") -quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes") -unicodeString = Combine(_L('u') + quotedString.copy()) - -def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): - """Helper method for defining nested lists enclosed in opening and closing - delimiters ("(" and ")" are the default). - - Parameters: - - opener - opening character for a nested list (default="("); can also be a pyparsing expression - - closer - closing character for a nested list (default=")"); can also be a pyparsing expression - - content - expression for items within the nested lists (default=None) - - ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString) - - If an expression is not provided for the content argument, the nested - expression will capture all whitespace-delimited content between delimiters - as a list of separate values. - - Use the ignoreExpr argument to define expressions that may contain - opening or closing characters that should not be treated as opening - or closing characters for nesting, such as quotedString or a comment - expression. Specify multiple expressions using an Or or MatchFirst. - The default is quotedString, but if no expressions are to be ignored, - then pass None for this argument. - """ - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener,basestring) and isinstance(closer,basestring): - if len(opener) == 1 and len(closer)==1: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS - ).setParseAction(lambda t:t[0].strip())) - else: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - ~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - raise ValueError("opening and closing arguments must be strings if no content expression is given") - ret = Forward() - if ignoreExpr is not None: - ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) - else: - ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) - return ret - -def indentedBlock(blockStatementExpr, indentStack, indent=True): - """Helper method for defining space-delimited indentation blocks, such as - those used to define block statements in Python source code. - - Parameters: - - blockStatementExpr - expression defining syntax of statement that - is repeated within the indented block - - indentStack - list created by caller to manage indentation stack - (multiple statementWithIndentedBlock expressions within a single grammar - should share a common indentStack) - - indent - boolean indicating whether block must be indented beyond the - the current level; set to False for block of left-most statements - (default=True) - - A valid block must contain at least one blockStatement. - """ - def checkPeerIndent(s,l,t): - if l >= len(s): return - curCol = col(l,s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseFatalException(s,l,"illegal nesting") - raise ParseException(s,l,"not a peer entry") - - def checkSubIndent(s,l,t): - curCol = col(l,s) - if curCol > indentStack[-1]: - indentStack.append( curCol ) - else: - raise ParseException(s,l,"not a subentry") - - def checkUnindent(s,l,t): - if l >= len(s): return - curCol = col(l,s) - if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): - raise ParseException(s,l,"not an unindent") - indentStack.pop() - - NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) - INDENT = Empty() + Empty().setParseAction(checkSubIndent) - PEER = Empty().setParseAction(checkPeerIndent) - UNDENT = Empty().setParseAction(checkUnindent) - if indent: - smExpr = Group( Optional(NL) + - #~ FollowedBy(blockStatementExpr) + - INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) - else: - smExpr = Group( Optional(NL) + - (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:")) -commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline() -_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "')) -replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment") - -htmlComment = Regex(r"") -restOfLine = Regex(r".*").leaveWhitespace() -dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment") -cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?" + str(tokenlist)) - print ("tokens = " + str(tokens)) - print ("tokens.columns = " + str(tokens.columns)) - print ("tokens.tables = " + str(tokens.tables)) - print (tokens.asXML("SQL",True)) - except ParseBaseException as err: - print (teststring + "->") - print (err.line) - print (" "*(err.column-1) + "^") - print (err) - print() - - selectToken = CaselessLiteral( "select" ) - fromToken = CaselessLiteral( "from" ) - - ident = Word( alphas, alphanums + "_$" ) - columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens ) - columnNameList = Group( delimitedList( columnName ) )#.setName("columns") - tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens ) - tableNameList = Group( delimitedList( tableName ) )#.setName("tables") - simpleSQL = ( selectToken + \ - ( '*' | columnNameList ).setResultsName( "columns" ) + \ - fromToken + \ - tableNameList.setResultsName( "tables" ) ) - - test( "SELECT * from XYZZY, ABC" ) - test( "select * from SYS.XYZZY" ) - test( "Select A from Sys.dual" ) - test( "Select AA,BB,CC from Sys.dual" ) - test( "Select A, B, C from Sys.dual" ) - test( "Select A, B, C from Sys.dual" ) - test( "Xelect A, B, C from Sys.dual" ) - test( "Select A, B, C frox Sys.dual" ) - test( "Select" ) - test( "Select ^^^ frox Sys.dual" ) - test( "Select A, B, C from Sys.dual, Table2 " ) diff --git a/lib/matplotlib/tests/test_simplification.py b/lib/matplotlib/tests/test_simplification.py index 03a5d84597a7..50b025db01b5 100644 --- a/lib/matplotlib/tests/test_simplification.py +++ b/lib/matplotlib/tests/test_simplification.py @@ -168,7 +168,7 @@ def test_throw_rendering_complexity_exceeded(): ax = fig.add_subplot(111) ax.plot(xx, yy) try: - fig.savefig(io.StringIO()) + fig.savefig(io.BytesIO()) finally: rcParams['path.simplify'] = True diff --git a/lib/matplotlib/ticker.py b/lib/matplotlib/ticker.py index 267e5c485651..646e917d45d4 100644 --- a/lib/matplotlib/ticker.py +++ b/lib/matplotlib/ticker.py @@ -808,13 +808,13 @@ def format_eng(self, num): """ Formats a number in engineering notation, appending a letter representing the power of 1000 of the original number. Some examples: - >>> format_eng(0) for self.places = 0 + >>> format_eng(0) # for self.places = 0 '0' - >>> format_eng(1000000) for self.places = 1 + >>> format_eng(1000000) # for self.places = 1 '1.0 M' - >>> format_eng("-1e-6") for self.places = 2 + >>> format_eng("-1e-6") # for self.places = 2 u'-1.00 \u03bc' @param num: the value to represent diff --git a/lib/pytz/CHANGES.txt b/lib/pytz/CHANGES.txt deleted file mode 100644 index 7cb8b1bb3576..000000000000 --- a/lib/pytz/CHANGES.txt +++ /dev/null @@ -1,54 +0,0 @@ -2004-07-25 - - - Improved localtime handling, and added a localize() method enabling - correct creation of local times. - -2005-02-16 - - - Made available under the Zope Public Licence 2.1 (ZPL) and checked - into the Zope3 project. pytz may now be used and redistributed - under either the original MIT license or the ZPL 2.1. - -2005-05-13 - - - Move UTC into the top level pytz module and provide special - case pickle support for this singleton. - -2005-08-14 - - - Ensure all tzinfo instances are efficiently picklable. - -2005-12-31 - - - Add fixed offset timezone classes required by Zope 3 - - Generate and distribute a PO template file listing all timezone - names. Translations are not yet available. - -2007-03-03 - - - Import work by James Henstridge, making pytz load timezone - information from zic compiled binaries at runtime rather than - processing them into Python classes. - -2007-03-26 - - - Update database to version 2007d - - Fix windows incompatibilities, working around limitations on that - platform. - - Fix 2.3 incompatibilities. Installation now requires distutils. - - Passing an invalid timezone name to timezone() now raises an - UnknownTimezoneError, which is a KeyError subclass for backwards - compatibility. - -2007-03-27 - - - Ensure API can accept Unicode strings (Bug #96957) - -2009-09-29 - - - Fix test_zdump tests and bugs the fixed tests picked up, including - the fix for Bug #427444. - -2011-02-08 - - - Python 3.1 support. diff --git a/lib/pytz/LICENSE.txt b/lib/pytz/LICENSE.txt deleted file mode 100644 index 5e12fcca67ce..000000000000 --- a/lib/pytz/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2003-2009 Stuart Bishop - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the "Software"), -to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, -and/or sell copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/lib/pytz/README.txt b/lib/pytz/README.txt deleted file mode 100644 index 3a174dda94a0..000000000000 --- a/lib/pytz/README.txt +++ /dev/null @@ -1,552 +0,0 @@ -pytz - World Timezone Definitions for Python -============================================ - -:Author: Stuart Bishop - -Introduction -~~~~~~~~~~~~ - -pytz brings the Olson tz database into Python. This library allows -accurate and cross platform timezone calculations using Python 2.4 -or higher. It also solves the issue of ambiguous times at the end -of daylight savings, which you can read more about in the Python -Library Reference (``datetime.tzinfo``). - -Amost all of the Olson timezones are supported. - -Note that this library differs from the documented Python API for -tzinfo implementations; if you want to create local wallclock -times you need to use the ``localize()`` method documented in this -document. In addition, if you perform date arithmetic on local -times that cross DST boundaries, the results may be in an incorrect -timezone (ie. subtract 1 minute from 2002-10-27 1:00 EST and you get -2002-10-27 0:59 EST instead of the correct 2002-10-27 1:59 EDT). A -``normalize()`` method is provided to correct this. Unfortunatly these -issues cannot be resolved without modifying the Python datetime -implementation. - - -Installation -~~~~~~~~~~~~ - -This package can either be installed from a .egg file using setuptools, -or from the tarball using the standard Python distutils. - -If you are installing from a tarball, run the following command as an -administrative user:: - - python setup.py install - -If you are installing using setuptools, you don't even need to download -anything as the latest version will be downloaded for you -from the Python package index:: - - easy_install --upgrade pytz - -If you already have the .egg file, you can use that too:: - - easy_install pytz-2008g-py2.6.egg - - -Example & Usage -~~~~~~~~~~~~~~~ - -Localized times and date arithmetic ------------------------------------ - ->>> from datetime import datetime, timedelta ->>> from pytz import timezone ->>> import pytz ->>> utc = pytz.utc ->>> utc.zone -'UTC' ->>> eastern = timezone('US/Eastern') ->>> eastern.zone -'US/Eastern' ->>> amsterdam = timezone('Europe/Amsterdam') ->>> fmt = '%Y-%m-%d %H:%M:%S %Z%z' - -This library only supports two ways of building a localized time. The -first is to use the ``localize()`` method provided by the pytz library. -This is used to localize a naive datetime (datetime with no timezone -information): - ->>> loc_dt = eastern.localize(datetime(2002, 10, 27, 6, 0, 0)) ->>> print(loc_dt.strftime(fmt)) -2002-10-27 06:00:00 EST-0500 - -The second way of building a localized time is by converting an existing -localized time using the standard ``astimezone()`` method: - ->>> ams_dt = loc_dt.astimezone(amsterdam) ->>> ams_dt.strftime(fmt) -'2002-10-27 12:00:00 CET+0100' - -Unfortunately using the tzinfo argument of the standard datetime -constructors ''does not work'' with pytz for many timezones. - ->>> datetime(2002, 10, 27, 12, 0, 0, tzinfo=amsterdam).strftime(fmt) -'2002-10-27 12:00:00 AMT+0020' - -It is safe for timezones without daylight savings trasitions though, such -as UTC: - ->>> datetime(2002, 10, 27, 12, 0, 0, tzinfo=pytz.utc).strftime(fmt) -'2002-10-27 12:00:00 UTC+0000' - -The preferred way of dealing with times is to always work in UTC, -converting to localtime only when generating output to be read -by humans. - ->>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) ->>> loc_dt = utc_dt.astimezone(eastern) ->>> loc_dt.strftime(fmt) -'2002-10-27 01:00:00 EST-0500' - -This library also allows you to do date arithmetic using local -times, although it is more complicated than working in UTC as you -need to use the ``normalize()`` method to handle daylight savings time -and other timezone transitions. In this example, ``loc_dt`` is set -to the instant when daylight savings time ends in the US/Eastern -timezone. - ->>> before = loc_dt - timedelta(minutes=10) ->>> before.strftime(fmt) -'2002-10-27 00:50:00 EST-0500' ->>> eastern.normalize(before).strftime(fmt) -'2002-10-27 01:50:00 EDT-0400' ->>> after = eastern.normalize(before + timedelta(minutes=20)) ->>> after.strftime(fmt) -'2002-10-27 01:10:00 EST-0500' - -Creating localtimes is also tricky, and the reason why working with -local times is not recommended. Unfortunately, you cannot just pass -a ``tzinfo`` argument when constructing a datetime (see the next -section for more details) - ->>> dt = datetime(2002, 10, 27, 1, 30, 0) ->>> dt1 = eastern.localize(dt, is_dst=True) ->>> dt1.strftime(fmt) -'2002-10-27 01:30:00 EDT-0400' ->>> dt2 = eastern.localize(dt, is_dst=False) ->>> dt2.strftime(fmt) -'2002-10-27 01:30:00 EST-0500' - -Converting between timezones also needs special attention. This also -needs to use the ``normalize()`` method to ensure the conversion is -correct. - ->>> utc_dt = utc.localize(datetime.utcfromtimestamp(1143408899)) ->>> utc_dt.strftime(fmt) -'2006-03-26 21:34:59 UTC+0000' ->>> au_tz = timezone('Australia/Sydney') ->>> au_dt = au_tz.normalize(utc_dt.astimezone(au_tz)) ->>> au_dt.strftime(fmt) -'2006-03-27 08:34:59 EST+1100' ->>> utc_dt2 = utc.normalize(au_dt.astimezone(utc)) ->>> utc_dt2.strftime(fmt) -'2006-03-26 21:34:59 UTC+0000' - -You can take shortcuts when dealing with the UTC side of timezone -conversions. ``normalize()`` and ``localize()`` are not really -necessary when there are no daylight savings time transitions to -deal with. - ->>> utc_dt = datetime.utcfromtimestamp(1143408899).replace(tzinfo=utc) ->>> utc_dt.strftime(fmt) -'2006-03-26 21:34:59 UTC+0000' ->>> au_tz = timezone('Australia/Sydney') ->>> au_dt = au_tz.normalize(utc_dt.astimezone(au_tz)) ->>> au_dt.strftime(fmt) -'2006-03-27 08:34:59 EST+1100' ->>> utc_dt2 = au_dt.astimezone(utc) ->>> utc_dt2.strftime(fmt) -'2006-03-26 21:34:59 UTC+0000' - - -``tzinfo`` API --------------- - -The ``tzinfo`` instances returned by the ``timezone()`` function have -been extended to cope with ambiguous times by adding an ``is_dst`` -parameter to the ``utcoffset()``, ``dst()`` && ``tzname()`` methods. - ->>> tz = timezone('America/St_Johns') - ->>> normal = datetime(2009, 9, 1) ->>> ambiguous = datetime(2009, 10, 31, 23, 30) - -The ``is_dst`` parameter is ignored for most timestamps. It is only used -during DST transition ambiguous periods to resulve that ambiguity. - ->>> tz.utcoffset(normal, is_dst=True) -datetime.timedelta(-1, 77400) ->>> tz.dst(normal, is_dst=True) -datetime.timedelta(0, 3600) ->>> tz.tzname(normal, is_dst=True) -'NDT' - ->>> tz.utcoffset(ambiguous, is_dst=True) -datetime.timedelta(-1, 77400) ->>> tz.dst(ambiguous, is_dst=True) -datetime.timedelta(0, 3600) ->>> tz.tzname(ambiguous, is_dst=True) -'NDT' - ->>> tz.utcoffset(normal, is_dst=False) -datetime.timedelta(-1, 77400) ->>> tz.dst(normal, is_dst=False) -datetime.timedelta(0, 3600) ->>> tz.tzname(normal, is_dst=False) -'NDT' - ->>> tz.utcoffset(ambiguous, is_dst=False) -datetime.timedelta(-1, 73800) ->>> tz.dst(ambiguous, is_dst=False) -datetime.timedelta(0) ->>> tz.tzname(ambiguous, is_dst=False) -'NST' - -If ``is_dst`` is not specified, ambiguous timestamps will raise -an ``pytz.exceptions.AmbiguousTimeError`` exception. - ->>> tz.utcoffset(normal) -datetime.timedelta(-1, 77400) ->>> tz.dst(normal) -datetime.timedelta(0, 3600) ->>> tz.tzname(normal) -'NDT' - ->>> import pytz.exceptions ->>> try: -... tz.utcoffset(ambiguous) -... except pytz.exceptions.AmbiguousTimeError: -... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous) -pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00 ->>> try: -... tz.dst(ambiguous) -... except pytz.exceptions.AmbiguousTimeError: -... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous) -pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00 ->>> try: -... tz.tzname(ambiguous) -... except pytz.exceptions.AmbiguousTimeError: -... print('pytz.exceptions.AmbiguousTimeError: %s' % ambiguous) -pytz.exceptions.AmbiguousTimeError: 2009-10-31 23:30:00 - - -Problems with Localtime -~~~~~~~~~~~~~~~~~~~~~~~ - -The major problem we have to deal with is that certain datetimes -may occur twice in a year. For example, in the US/Eastern timezone -on the last Sunday morning in October, the following sequence -happens: - - - 01:00 EDT occurs - - 1 hour later, instead of 2:00am the clock is turned back 1 hour - and 01:00 happens again (this time 01:00 EST) - -In fact, every instant between 01:00 and 02:00 occurs twice. This means -that if you try and create a time in the 'US/Eastern' timezone using -the standard datetime syntax, there is no way to specify if you meant -before of after the end-of-daylight-savings-time transition. - ->>> loc_dt = datetime(2002, 10, 27, 1, 30, 00, tzinfo=eastern) ->>> loc_dt.strftime(fmt) -'2002-10-27 01:30:00 EST-0500' - -As you can see, the system has chosen one for you and there is a 50% -chance of it being out by one hour. For some applications, this does -not matter. However, if you are trying to schedule meetings with people -in different timezones or analyze log files it is not acceptable. - -The best and simplest solution is to stick with using UTC. The pytz -package encourages using UTC for internal timezone representation by -including a special UTC implementation based on the standard Python -reference implementation in the Python documentation. - -The UTC timezone unpickles to be the same instance, and pickles to a -smaller size than other pytz tzinfo instances. The UTC implementation -can be obtained as pytz.utc, pytz.UTC, or pytz.timezone('UTC'). - ->>> import pickle, pytz ->>> dt = datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc) ->>> naive = dt.replace(tzinfo=None) ->>> p = pickle.dumps(dt, 1) ->>> naive_p = pickle.dumps(naive, 1) ->>> len(p) - len(naive_p) -17 ->>> new = pickle.loads(p) ->>> new == dt -True ->>> new is dt -False ->>> new.tzinfo is dt.tzinfo -True ->>> pytz.utc is pytz.UTC is pytz.timezone('UTC') -True - -Note that this instance is not the same instance (or implementation) as -other timezones with the same meaning (GMT, Greenwich, Universal, etc.). - ->>> utc is pytz.timezone('GMT') -False - -If you insist on working with local times, this library provides a -facility for constructing them unambiguously: - ->>> loc_dt = datetime(2002, 10, 27, 1, 30, 00) ->>> est_dt = eastern.localize(loc_dt, is_dst=True) ->>> edt_dt = eastern.localize(loc_dt, is_dst=False) ->>> print(est_dt.strftime(fmt) + ' / ' + edt_dt.strftime(fmt)) -2002-10-27 01:30:00 EDT-0400 / 2002-10-27 01:30:00 EST-0500 - -If you pass None as the is_dst flag to localize(), pytz will refuse to -guess and raise exceptions if you try to build ambiguous or non-existent -times. - -For example, 1:30am on 27th Oct 2002 happened twice in the US/Eastern -timezone when the clocks where put back at the end of Daylight Savings -Time: - ->>> dt = datetime(2002, 10, 27, 1, 30, 00) ->>> try: -... eastern.localize(dt, is_dst=None) -... except pytz.exceptions.AmbiguousTimeError: -... print('pytz.exceptions.AmbiguousTimeError: %s' % dt) -pytz.exceptions.AmbiguousTimeError: 2002-10-27 01:30:00 - -Similarly, 2:30am on 7th April 2002 never happened at all in the -US/Eastern timezone, as the clocks where put forward at 2:00am skipping -the entire hour: - ->>> dt = datetime(2002, 4, 7, 2, 30, 00) ->>> try: -... eastern.localize(dt, is_dst=None) -... except pytz.exceptions.NonExistentTimeError: -... print('pytz.exceptions.NonExistentTimeError: %s' % dt) -pytz.exceptions.NonExistentTimeError: 2002-04-07 02:30:00 - -Both of these exceptions share a common base class to make error handling -easier: - ->>> isinstance(pytz.AmbiguousTimeError(), pytz.InvalidTimeError) -True ->>> isinstance(pytz.NonExistentTimeError(), pytz.InvalidTimeError) -True - -Although ``localize()`` handles many cases, it is still not possible -to handle all. In cases where countries change their timezone definitions, -cases like the end-of-daylight-savings-time occur with no way of resolving -the ambiguity. For example, in 1915 Warsaw switched from Warsaw time to -Central European time. So at the stroke of midnight on August 5th 1915 -the clocks were wound back 24 minutes creating an ambiguous time period -that cannot be specified without referring to the timezone abbreviation -or the actual UTC offset. In this case midnight happened twice, neither -time during a daylight savings time period: - ->>> warsaw = pytz.timezone('Europe/Warsaw') ->>> loc_dt1 = warsaw.localize(datetime(1915, 8, 4, 23, 59, 59), is_dst=False) ->>> loc_dt1.strftime(fmt) -'1915-08-04 23:59:59 WMT+0124' ->>> loc_dt2 = warsaw.localize(datetime(1915, 8, 5, 00, 00, 00), is_dst=False) ->>> loc_dt2.strftime(fmt) -'1915-08-05 00:00:00 CET+0100' ->>> str(loc_dt2 - loc_dt1) -'0:24:01' - -The only way of creating a time during the missing 24 minutes is -converting from another timezone - because neither of the timezones -involved where in daylight savings mode the API simply provides no way -to express it: - ->>> utc_dt = datetime(1915, 8, 4, 22, 36, tzinfo=pytz.utc) ->>> utc_dt.astimezone(warsaw).strftime(fmt) -'1915-08-04 23:36:00 CET+0100' - -The standard Python way of handling all these ambiguities is not to -handle them, such as demonstrated in this example using the US/Eastern -timezone definition from the Python documentation (Note that this -implementation only works for dates between 1987 and 2006 - it is -included for tests only!): - ->>> from pytz.reference import Eastern # pytz.reference only for tests ->>> dt = datetime(2002, 10, 27, 0, 30, tzinfo=Eastern) ->>> str(dt) -'2002-10-27 00:30:00-04:00' ->>> str(dt + timedelta(hours=1)) -'2002-10-27 01:30:00-05:00' ->>> str(dt + timedelta(hours=2)) -'2002-10-27 02:30:00-05:00' ->>> str(dt + timedelta(hours=3)) -'2002-10-27 03:30:00-05:00' - -Notice the first two results? At first glance you might think they are -correct, but taking the UTC offset into account you find that they are -actually two hours appart instead of the 1 hour we asked for. - ->>> from pytz.reference import UTC # pytz.reference only for tests ->>> str(dt.astimezone(UTC)) -'2002-10-27 04:30:00+00:00' ->>> str((dt + timedelta(hours=1)).astimezone(UTC)) -'2002-10-27 06:30:00+00:00' - - -Country Information -~~~~~~~~~~~~~~~~~~~ - -A mechanism is provided to access the timezones commonly in use -for a particular country, looked up using the ISO 3166 country code. -It returns a list of strings that can be used to retrieve the relevant -tzinfo instance using ``pytz.timezone()``: - ->>> print(' '.join(pytz.country_timezones['nz'])) -Pacific/Auckland Pacific/Chatham - -The Olson database comes with a ISO 3166 country code to English country -name mapping that pytz exposes as a dictionary: - ->>> print(pytz.country_names['nz']) -New Zealand - - -What is UTC -~~~~~~~~~~~ - -'UTC' is Universal Time, also known as Greenwich Mean Time or GMT -in the United Kingdom. All other timezones are given as offsets from -UTC. No daylight savings time occurs in UTC, making it a useful timezone -to perform date arithmetic without worrying about the confusion and -ambiguities caused by daylight savings time transitions, your country -changing its timezone, or mobile computers that move roam through -multiple timezones. - - -Helpers -~~~~~~~ - -There are two lists of timezones provided. - -``all_timezones`` is the exhaustive list of the timezone names that can -be used. - ->>> from pytz import all_timezones ->>> len(all_timezones) >= 500 -True ->>> 'Etc/Greenwich' in all_timezones -True - -``common_timezones`` is a list of useful, current timezones. It doesn't -contain deprecated zones or historical zones, except for a few I've -deemed in common usage, such as US/Eastern (open a bug report if you -think other timezones are deserving of being included here). It is also -a sequence of strings. - ->>> from pytz import common_timezones ->>> len(common_timezones) < len(all_timezones) -True ->>> 'Etc/Greenwich' in common_timezones -False ->>> 'Australia/Melbourne' in common_timezones -True ->>> 'US/Eastern' in common_timezones -True ->>> 'Canada/Eastern' in common_timezones -True ->>> 'US/Pacific-New' in all_timezones -True ->>> 'US/Pacific-New' in common_timezones -False - -Both ``common_timezones`` and ``all_timezones`` are alphabetically -sorted: - ->>> common_timezones_dupe = common_timezones[:] ->>> common_timezones_dupe.sort() ->>> common_timezones == common_timezones_dupe -True ->>> all_timezones_dupe = all_timezones[:] ->>> all_timezones_dupe.sort() ->>> all_timezones == all_timezones_dupe -True - -``all_timezones`` and ``common_timezones`` are also available as sets. - ->>> from pytz import all_timezones_set, common_timezones_set ->>> 'US/Eastern' in all_timezones_set -True ->>> 'US/Eastern' in common_timezones_set -True ->>> 'Australia/Victoria' in common_timezones_set -False - -You can also retrieve lists of timezones used by particular countries -using the ``country_timezones()`` function. It requires an ISO-3166 -two letter country code. - ->>> from pytz import country_timezones ->>> print(' '.join(country_timezones('ch'))) -Europe/Zurich ->>> print(' '.join(country_timezones('CH'))) -Europe/Zurich - - -License -~~~~~~~ - -MIT license. - -This code is also available as part of Zope 3 under the Zope Public -License, Version 2.1 (ZPL). - -I'm happy to relicense this code if necessary for inclusion in other -open source projects. - - -Latest Versions -~~~~~~~~~~~~~~~ - -This package will be updated after releases of the Olson timezone -database. The latest version can be downloaded from the `Python Package -Index `_. The code that is used -to generate this distribution is hosted on launchpad.net and available -using the `Bazaar version control system `_ -using:: - - bzr branch lp:pytz - - -Bugs, Feature Requests & Patches -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Bugs can be reported using `Launchpad `_. - - -Issues & Limitations -~~~~~~~~~~~~~~~~~~~~ - -- Offsets from UTC are rounded to the nearest whole minute, so timezones - such as Europe/Amsterdam pre 1937 will be up to 30 seconds out. This - is a limitation of the Python datetime library. - -- If you think a timezone definition is incorrect, I probably can't fix - it. pytz is a direct translation of the Olson timezone database, and - changes to the timezone definitions need to be made to this source. - If you find errors they should be reported to the time zone mailing - list, linked from http://www.twinsun.com/tz/tz-link.htm - - -Further Reading -~~~~~~~~~~~~~~~ - -More info than you want to know about timezones: -http://www.twinsun.com/tz/tz-link.htm - - -Contact -~~~~~~~ - -Stuart Bishop - - diff --git a/lib/pytz/__init__.py b/lib/pytz/__init__.py deleted file mode 100644 index 17149b45111a..000000000000 --- a/lib/pytz/__init__.py +++ /dev/null @@ -1,1537 +0,0 @@ -''' -datetime.tzinfo timezone definitions generated from the -Olson timezone database: - - ftp://elsie.nci.nih.gov/pub/tz*.tar.gz - -See the datetime section of the Python Library Reference for information -on how to use these modules. -''' - -# The Olson database is updated several times a year. -OLSON_VERSION = '2012d' -VERSION = OLSON_VERSION -# Version format for a patch release - only one so far. -#VERSION = OLSON_VERSION + '.2' -__version__ = OLSON_VERSION + "-mpl" - -OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling - -__all__ = [ - 'timezone', 'utc', 'country_timezones', 'country_names', - 'AmbiguousTimeError', 'InvalidTimeError', - 'NonExistentTimeError', 'UnknownTimeZoneError', - 'all_timezones', 'all_timezones_set', - 'common_timezones', 'common_timezones_set', - ] - -import sys, datetime, os.path, gettext -try: - from UserDict import DictMixin -except ImportError: - from collections import Mapping as DictMixin - -try: - from pkg_resources import resource_stream -except ImportError: - resource_stream = None - -from pytz.exceptions import AmbiguousTimeError -from pytz.exceptions import InvalidTimeError -from pytz.exceptions import NonExistentTimeError -from pytz.exceptions import UnknownTimeZoneError -from pytz.tzinfo import unpickler -from pytz.tzfile import build_tzinfo, _byte_string - - -try: - unicode - -except NameError: # Python 3.x - - # Python 3.x doesn't have unicode(), making writing code - # for Python 2.3 and Python 3.x a pain. - unicode = str - - def ascii(s): - r""" - >>> ascii('Hello') - 'Hello' - >>> ascii('\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - UnicodeEncodeError: ... - """ - s.encode('US-ASCII') # Raise an exception if not ASCII - return s # But return the original string - not a byte string. - -else: # Python 2.x - - def ascii(s): - r""" - >>> ascii('Hello') - 'Hello' - >>> ascii(u'Hello') - 'Hello' - >>> ascii(u'\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - UnicodeEncodeError: ... - """ - return s.encode('US-ASCII') - - -def open_resource(name): - """Open a resource from the zoneinfo subdir for reading. - - Uses the pkg_resources module if available and no standard file - found at the calculated location. - """ - name_parts = name.lstrip('/').split('/') - for part in name_parts: - if part == os.path.pardir or os.path.sep in part: - raise ValueError('Bad path segment: %r' % part) - filename = os.path.join(os.path.dirname(__file__), - 'zoneinfo', *name_parts) - if not os.path.exists(filename) and resource_stream is not None: - # http://bugs.launchpad.net/bugs/383171 - we avoid using this - # unless absolutely necessary to help when a broken version of - # pkg_resources is installed. - return resource_stream(__name__, 'zoneinfo/' + name) - return open(filename, 'rb') - - -def resource_exists(name): - """Return true if the given resource exists""" - try: - open_resource(name).close() - return True - except IOError: - return False - - -# Enable this when we get some translations? -# We want an i18n API that is useful to programs using Python's gettext -# module, as well as the Zope3 i18n package. Perhaps we should just provide -# the POT file and translations, and leave it up to callers to make use -# of them. -# -# t = gettext.translation( -# 'pytz', os.path.join(os.path.dirname(__file__), 'locales'), -# fallback=True -# ) -# def _(timezone_name): -# """Translate a timezone name using the current locale, returning Unicode""" -# return t.ugettext(timezone_name) - - -_tzinfo_cache = {} - -def timezone(zone): - r''' Return a datetime.tzinfo implementation for the given timezone - - >>> from datetime import datetime, timedelta - >>> utc = timezone('UTC') - >>> eastern = timezone('US/Eastern') - >>> eastern.zone - 'US/Eastern' - >>> timezone(unicode('US/Eastern')) is eastern - True - >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) - >>> loc_dt = utc_dt.astimezone(eastern) - >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' - >>> loc_dt.strftime(fmt) - '2002-10-27 01:00:00 EST (-0500)' - >>> (loc_dt - timedelta(minutes=10)).strftime(fmt) - '2002-10-27 00:50:00 EST (-0500)' - >>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt) - '2002-10-27 01:50:00 EDT (-0400)' - >>> (loc_dt + timedelta(minutes=10)).strftime(fmt) - '2002-10-27 01:10:00 EST (-0500)' - - Raises UnknownTimeZoneError if passed an unknown zone. - - >>> try: - ... timezone('Asia/Shangri-La') - ... except UnknownTimeZoneError: - ... print('Unknown') - Unknown - - >>> try: - ... timezone(unicode('\N{TRADE MARK SIGN}')) - ... except UnknownTimeZoneError: - ... print('Unknown') - Unknown - - ''' - if zone.upper() == 'UTC': - return utc - - try: - zone = ascii(zone) - except UnicodeEncodeError: - # All valid timezones are ASCII - raise UnknownTimeZoneError(zone) - - zone = _unmunge_zone(zone) - if zone not in _tzinfo_cache: - if zone in all_timezones_set: - fp = open_resource(zone) - try: - _tzinfo_cache[zone] = build_tzinfo(zone, fp) - finally: - fp.close() - else: - raise UnknownTimeZoneError(zone) - - return _tzinfo_cache[zone] - - -def _unmunge_zone(zone): - """Undo the time zone name munging done by older versions of pytz.""" - return zone.replace('_plus_', '+').replace('_minus_', '-') - - -ZERO = datetime.timedelta(0) -HOUR = datetime.timedelta(hours=1) - - -class UTC(datetime.tzinfo): - """UTC - - Optimized UTC implementation. It unpickles using the single module global - instance defined beneath this class declaration. - """ - zone = "UTC" - - _utcoffset = ZERO - _dst = ZERO - _tzname = zone - - def fromutc(self, dt): - if dt.tzinfo is None: - return self.localize(dt) - return super(utc.__class__, self).fromutc(dt) - - def utcoffset(self, dt): - return ZERO - - def tzname(self, dt): - return "UTC" - - def dst(self, dt): - return ZERO - - def __reduce__(self): - return _UTC, () - - def localize(self, dt, is_dst=False): - '''Convert naive time to local time''' - if dt.tzinfo is not None: - raise ValueError('Not naive datetime (tzinfo is already set)') - return dt.replace(tzinfo=self) - - def normalize(self, dt, is_dst=False): - '''Correct the timezone information on the given datetime''' - if dt.tzinfo is self: - return dt - if dt.tzinfo is None: - raise ValueError('Naive time - no tzinfo set') - return dt.astimezone(self) - - def __repr__(self): - return "" - - def __str__(self): - return "UTC" - - -UTC = utc = UTC() # UTC is a singleton - - -def _UTC(): - """Factory function for utc unpickling. - - Makes sure that unpickling a utc instance always returns the same - module global. - - These examples belong in the UTC class above, but it is obscured; or in - the README.txt, but we are not depending on Python 2.4 so integrating - the README.txt examples with the unit tests is not trivial. - - >>> import datetime, pickle - >>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc) - >>> naive = dt.replace(tzinfo=None) - >>> p = pickle.dumps(dt, 1) - >>> naive_p = pickle.dumps(naive, 1) - >>> len(p) - len(naive_p) - 17 - >>> new = pickle.loads(p) - >>> new == dt - True - >>> new is dt - False - >>> new.tzinfo is dt.tzinfo - True - >>> utc is UTC is timezone('UTC') - True - >>> utc is timezone('GMT') - False - """ - return utc -_UTC.__safe_for_unpickling__ = True - - -def _p(*args): - """Factory function for unpickling pytz tzinfo instances. - - Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle - by shortening the path. - """ - return unpickler(*args) -_p.__safe_for_unpickling__ = True - - -class _LazyDict(DictMixin): - """Dictionary populated on first use.""" - data = None - def __getitem__(self, key): - if self.data is None: - self._fill() - return self.data[key.upper()] - - def __contains__(self, key): - if self.data is None: - self._fill() - return key in self.data - - def __iter__(self): - if self.data is None: - self._fill() - return iter(self.data) - - def __len__(self): - if self.data is None: - self._fill() - return len(self.data) - - def keys(self): - if self.data is None: - self._fill() - return self.data.keys() - - -class _CountryTimezoneDict(_LazyDict): - """Map ISO 3166 country code to a list of timezone names commonly used - in that country. - - iso3166_code is the two letter code used to identify the country. - - >>> def print_list(list_of_strings): - ... 'We use a helper so doctests work under Python 2.3 -> 3.x' - ... for s in list_of_strings: - ... print(s) - - >>> print_list(country_timezones['nz']) - Pacific/Auckland - Pacific/Chatham - >>> print_list(country_timezones['ch']) - Europe/Zurich - >>> print_list(country_timezones['CH']) - Europe/Zurich - >>> print_list(country_timezones[unicode('ch')]) - Europe/Zurich - >>> print_list(country_timezones['XXX']) - Traceback (most recent call last): - ... - KeyError: 'XXX' - - Previously, this information was exposed as a function rather than a - dictionary. This is still supported:: - - >>> print_list(country_timezones('nz')) - Pacific/Auckland - Pacific/Chatham - """ - def __call__(self, iso3166_code): - """Backwards compatibility.""" - return self[iso3166_code] - - def _fill(self): - data = {} - zone_tab = open_resource('zone.tab') - try: - for line in zone_tab: - line = line.decode('US-ASCII') - if line.startswith('#'): - continue - code, coordinates, zone = line.split(None, 4)[:3] - if zone not in all_timezones_set: - continue - try: - data[code].append(zone) - except KeyError: - data[code] = [zone] - self.data = data - finally: - zone_tab.close() - -country_timezones = _CountryTimezoneDict() - - -class _CountryNameDict(_LazyDict): - '''Dictionary proving ISO3166 code -> English name. - - >>> print(country_names['au']) - Australia - ''' - def _fill(self): - data = {} - zone_tab = open_resource('iso3166.tab') - try: - for line in zone_tab.readlines(): - line = line.decode('US-ASCII') - if line.startswith('#'): - continue - code, name = line.split(None, 1) - data[code] = name.strip() - self.data = data - finally: - zone_tab.close() - -country_names = _CountryNameDict() - - -# Time-zone info based solely on fixed offsets - -class _FixedOffset(datetime.tzinfo): - - zone = None # to match the standard pytz API - - def __init__(self, minutes): - if abs(minutes) >= 1440: - raise ValueError("absolute offset is too large", minutes) - self._minutes = minutes - self._offset = datetime.timedelta(minutes=minutes) - - def utcoffset(self, dt): - return self._offset - - def __reduce__(self): - return FixedOffset, (self._minutes, ) - - def dst(self, dt): - return ZERO - - def tzname(self, dt): - return None - - def __repr__(self): - return 'pytz.FixedOffset(%d)' % self._minutes - - def localize(self, dt, is_dst=False): - '''Convert naive time to local time''' - if dt.tzinfo is not None: - raise ValueError('Not naive datetime (tzinfo is already set)') - return dt.replace(tzinfo=self) - - def normalize(self, dt, is_dst=False): - '''Correct the timezone information on the given datetime''' - if dt.tzinfo is None: - raise ValueError('Naive time - no tzinfo set') - return dt.replace(tzinfo=self) - - -def FixedOffset(offset, _tzinfos = {}): - """return a fixed-offset timezone based off a number of minutes. - - >>> one = FixedOffset(-330) - >>> one - pytz.FixedOffset(-330) - >>> one.utcoffset(datetime.datetime.now()) - datetime.timedelta(-1, 66600) - >>> one.dst(datetime.datetime.now()) - datetime.timedelta(0) - - >>> two = FixedOffset(1380) - >>> two - pytz.FixedOffset(1380) - >>> two.utcoffset(datetime.datetime.now()) - datetime.timedelta(0, 82800) - >>> two.dst(datetime.datetime.now()) - datetime.timedelta(0) - - The datetime.timedelta must be between the range of -1 and 1 day, - non-inclusive. - - >>> FixedOffset(1440) - Traceback (most recent call last): - ... - ValueError: ('absolute offset is too large', 1440) - - >>> FixedOffset(-1440) - Traceback (most recent call last): - ... - ValueError: ('absolute offset is too large', -1440) - - An offset of 0 is special-cased to return UTC. - - >>> FixedOffset(0) is UTC - True - - There should always be only one instance of a FixedOffset per timedelta. - This should be true for multiple creation calls. - - >>> FixedOffset(-330) is one - True - >>> FixedOffset(1380) is two - True - - It should also be true for pickling. - - >>> import pickle - >>> pickle.loads(pickle.dumps(one)) is one - True - >>> pickle.loads(pickle.dumps(two)) is two - True - """ - if offset == 0: - return UTC - - info = _tzinfos.get(offset) - if info is None: - # We haven't seen this one before. we need to save it. - - # Use setdefault to avoid a race condition and make sure we have - # only one - info = _tzinfos.setdefault(offset, _FixedOffset(offset)) - - return info - -FixedOffset.__safe_for_unpickling__ = True - - -def _test(): - import doctest, os, sys - sys.path.insert(0, os.pardir) - import pytz - return doctest.testmod(pytz) - -if __name__ == '__main__': - _test() - -all_timezones = \ -['Africa/Abidjan', - 'Africa/Accra', - 'Africa/Addis_Ababa', - 'Africa/Algiers', - 'Africa/Asmara', - 'Africa/Asmera', - 'Africa/Bamako', - 'Africa/Bangui', - 'Africa/Banjul', - 'Africa/Bissau', - 'Africa/Blantyre', - 'Africa/Brazzaville', - 'Africa/Bujumbura', - 'Africa/Cairo', - 'Africa/Casablanca', - 'Africa/Ceuta', - 'Africa/Conakry', - 'Africa/Dakar', - 'Africa/Dar_es_Salaam', - 'Africa/Djibouti', - 'Africa/Douala', - 'Africa/El_Aaiun', - 'Africa/Freetown', - 'Africa/Gaborone', - 'Africa/Harare', - 'Africa/Johannesburg', - 'Africa/Juba', - 'Africa/Kampala', - 'Africa/Khartoum', - 'Africa/Kigali', - 'Africa/Kinshasa', - 'Africa/Lagos', - 'Africa/Libreville', - 'Africa/Lome', - 'Africa/Luanda', - 'Africa/Lubumbashi', - 'Africa/Lusaka', - 'Africa/Malabo', - 'Africa/Maputo', - 'Africa/Maseru', - 'Africa/Mbabane', - 'Africa/Mogadishu', - 'Africa/Monrovia', - 'Africa/Nairobi', - 'Africa/Ndjamena', - 'Africa/Niamey', - 'Africa/Nouakchott', - 'Africa/Ouagadougou', - 'Africa/Porto-Novo', - 'Africa/Sao_Tome', - 'Africa/Timbuktu', - 'Africa/Tripoli', - 'Africa/Tunis', - 'Africa/Windhoek', - 'America/Adak', - 'America/Anchorage', - 'America/Anguilla', - 'America/Antigua', - 'America/Araguaina', - 'America/Argentina/Buenos_Aires', - 'America/Argentina/Catamarca', - 'America/Argentina/ComodRivadavia', - 'America/Argentina/Cordoba', - 'America/Argentina/Jujuy', - 'America/Argentina/La_Rioja', - 'America/Argentina/Mendoza', - 'America/Argentina/Rio_Gallegos', - 'America/Argentina/Salta', - 'America/Argentina/San_Juan', - 'America/Argentina/San_Luis', - 'America/Argentina/Tucuman', - 'America/Argentina/Ushuaia', - 'America/Aruba', - 'America/Asuncion', - 'America/Atikokan', - 'America/Atka', - 'America/Bahia', - 'America/Bahia_Banderas', - 'America/Barbados', - 'America/Belem', - 'America/Belize', - 'America/Blanc-Sablon', - 'America/Boa_Vista', - 'America/Bogota', - 'America/Boise', - 'America/Buenos_Aires', - 'America/Cambridge_Bay', - 'America/Campo_Grande', - 'America/Cancun', - 'America/Caracas', - 'America/Catamarca', - 'America/Cayenne', - 'America/Cayman', - 'America/Chicago', - 'America/Chihuahua', - 'America/Coral_Harbour', - 'America/Cordoba', - 'America/Costa_Rica', - 'America/Creston', - 'America/Cuiaba', - 'America/Curacao', - 'America/Danmarkshavn', - 'America/Dawson', - 'America/Dawson_Creek', - 'America/Denver', - 'America/Detroit', - 'America/Dominica', - 'America/Edmonton', - 'America/Eirunepe', - 'America/El_Salvador', - 'America/Ensenada', - 'America/Fort_Wayne', - 'America/Fortaleza', - 'America/Glace_Bay', - 'America/Godthab', - 'America/Goose_Bay', - 'America/Grand_Turk', - 'America/Grenada', - 'America/Guadeloupe', - 'America/Guatemala', - 'America/Guayaquil', - 'America/Guyana', - 'America/Halifax', - 'America/Havana', - 'America/Hermosillo', - 'America/Indiana/Indianapolis', - 'America/Indiana/Knox', - 'America/Indiana/Marengo', - 'America/Indiana/Petersburg', - 'America/Indiana/Tell_City', - 'America/Indiana/Vevay', - 'America/Indiana/Vincennes', - 'America/Indiana/Winamac', - 'America/Indianapolis', - 'America/Inuvik', - 'America/Iqaluit', - 'America/Jamaica', - 'America/Jujuy', - 'America/Juneau', - 'America/Kentucky/Louisville', - 'America/Kentucky/Monticello', - 'America/Knox_IN', - 'America/Kralendijk', - 'America/La_Paz', - 'America/Lima', - 'America/Los_Angeles', - 'America/Louisville', - 'America/Lower_Princes', - 'America/Maceio', - 'America/Managua', - 'America/Manaus', - 'America/Marigot', - 'America/Martinique', - 'America/Matamoros', - 'America/Mazatlan', - 'America/Mendoza', - 'America/Menominee', - 'America/Merida', - 'America/Metlakatla', - 'America/Mexico_City', - 'America/Miquelon', - 'America/Moncton', - 'America/Monterrey', - 'America/Montevideo', - 'America/Montreal', - 'America/Montserrat', - 'America/Nassau', - 'America/New_York', - 'America/Nipigon', - 'America/Nome', - 'America/Noronha', - 'America/North_Dakota/Beulah', - 'America/North_Dakota/Center', - 'America/North_Dakota/New_Salem', - 'America/Ojinaga', - 'America/Panama', - 'America/Pangnirtung', - 'America/Paramaribo', - 'America/Phoenix', - 'America/Port-au-Prince', - 'America/Port_of_Spain', - 'America/Porto_Acre', - 'America/Porto_Velho', - 'America/Puerto_Rico', - 'America/Rainy_River', - 'America/Rankin_Inlet', - 'America/Recife', - 'America/Regina', - 'America/Resolute', - 'America/Rio_Branco', - 'America/Rosario', - 'America/Santa_Isabel', - 'America/Santarem', - 'America/Santiago', - 'America/Santo_Domingo', - 'America/Sao_Paulo', - 'America/Scoresbysund', - 'America/Shiprock', - 'America/Sitka', - 'America/St_Barthelemy', - 'America/St_Johns', - 'America/St_Kitts', - 'America/St_Lucia', - 'America/St_Thomas', - 'America/St_Vincent', - 'America/Swift_Current', - 'America/Tegucigalpa', - 'America/Thule', - 'America/Thunder_Bay', - 'America/Tijuana', - 'America/Toronto', - 'America/Tortola', - 'America/Vancouver', - 'America/Virgin', - 'America/Whitehorse', - 'America/Winnipeg', - 'America/Yakutat', - 'America/Yellowknife', - 'Antarctica/Casey', - 'Antarctica/Davis', - 'Antarctica/DumontDUrville', - 'Antarctica/Macquarie', - 'Antarctica/Mawson', - 'Antarctica/McMurdo', - 'Antarctica/Palmer', - 'Antarctica/Rothera', - 'Antarctica/South_Pole', - 'Antarctica/Syowa', - 'Antarctica/Vostok', - 'Arctic/Longyearbyen', - 'Asia/Aden', - 'Asia/Almaty', - 'Asia/Amman', - 'Asia/Anadyr', - 'Asia/Aqtau', - 'Asia/Aqtobe', - 'Asia/Ashgabat', - 'Asia/Ashkhabad', - 'Asia/Baghdad', - 'Asia/Bahrain', - 'Asia/Baku', - 'Asia/Bangkok', - 'Asia/Beirut', - 'Asia/Bishkek', - 'Asia/Brunei', - 'Asia/Calcutta', - 'Asia/Choibalsan', - 'Asia/Chongqing', - 'Asia/Chungking', - 'Asia/Colombo', - 'Asia/Dacca', - 'Asia/Damascus', - 'Asia/Dhaka', - 'Asia/Dili', - 'Asia/Dubai', - 'Asia/Dushanbe', - 'Asia/Gaza', - 'Asia/Harbin', - 'Asia/Hebron', - 'Asia/Ho_Chi_Minh', - 'Asia/Hong_Kong', - 'Asia/Hovd', - 'Asia/Irkutsk', - 'Asia/Istanbul', - 'Asia/Jakarta', - 'Asia/Jayapura', - 'Asia/Jerusalem', - 'Asia/Kabul', - 'Asia/Kamchatka', - 'Asia/Karachi', - 'Asia/Kashgar', - 'Asia/Kathmandu', - 'Asia/Katmandu', - 'Asia/Kolkata', - 'Asia/Krasnoyarsk', - 'Asia/Kuala_Lumpur', - 'Asia/Kuching', - 'Asia/Kuwait', - 'Asia/Macao', - 'Asia/Macau', - 'Asia/Magadan', - 'Asia/Makassar', - 'Asia/Manila', - 'Asia/Muscat', - 'Asia/Nicosia', - 'Asia/Novokuznetsk', - 'Asia/Novosibirsk', - 'Asia/Omsk', - 'Asia/Oral', - 'Asia/Phnom_Penh', - 'Asia/Pontianak', - 'Asia/Pyongyang', - 'Asia/Qatar', - 'Asia/Qyzylorda', - 'Asia/Rangoon', - 'Asia/Riyadh', - 'Asia/Saigon', - 'Asia/Sakhalin', - 'Asia/Samarkand', - 'Asia/Seoul', - 'Asia/Shanghai', - 'Asia/Singapore', - 'Asia/Taipei', - 'Asia/Tashkent', - 'Asia/Tbilisi', - 'Asia/Tehran', - 'Asia/Tel_Aviv', - 'Asia/Thimbu', - 'Asia/Thimphu', - 'Asia/Tokyo', - 'Asia/Ujung_Pandang', - 'Asia/Ulaanbaatar', - 'Asia/Ulan_Bator', - 'Asia/Urumqi', - 'Asia/Vientiane', - 'Asia/Vladivostok', - 'Asia/Yakutsk', - 'Asia/Yekaterinburg', - 'Asia/Yerevan', - 'Atlantic/Azores', - 'Atlantic/Bermuda', - 'Atlantic/Canary', - 'Atlantic/Cape_Verde', - 'Atlantic/Faeroe', - 'Atlantic/Faroe', - 'Atlantic/Jan_Mayen', - 'Atlantic/Madeira', - 'Atlantic/Reykjavik', - 'Atlantic/South_Georgia', - 'Atlantic/St_Helena', - 'Atlantic/Stanley', - 'Australia/ACT', - 'Australia/Adelaide', - 'Australia/Brisbane', - 'Australia/Broken_Hill', - 'Australia/Canberra', - 'Australia/Currie', - 'Australia/Darwin', - 'Australia/Eucla', - 'Australia/Hobart', - 'Australia/LHI', - 'Australia/Lindeman', - 'Australia/Lord_Howe', - 'Australia/Melbourne', - 'Australia/NSW', - 'Australia/North', - 'Australia/Perth', - 'Australia/Queensland', - 'Australia/South', - 'Australia/Sydney', - 'Australia/Tasmania', - 'Australia/Victoria', - 'Australia/West', - 'Australia/Yancowinna', - 'Brazil/Acre', - 'Brazil/DeNoronha', - 'Brazil/East', - 'Brazil/West', - 'CET', - 'CST6CDT', - 'Canada/Atlantic', - 'Canada/Central', - 'Canada/East-Saskatchewan', - 'Canada/Eastern', - 'Canada/Mountain', - 'Canada/Newfoundland', - 'Canada/Pacific', - 'Canada/Saskatchewan', - 'Canada/Yukon', - 'Chile/Continental', - 'Chile/EasterIsland', - 'Cuba', - 'EET', - 'EST', - 'EST5EDT', - 'Egypt', - 'Eire', - 'Etc/GMT', - 'Etc/GMT+0', - 'Etc/GMT+1', - 'Etc/GMT+10', - 'Etc/GMT+11', - 'Etc/GMT+12', - 'Etc/GMT+2', - 'Etc/GMT+3', - 'Etc/GMT+4', - 'Etc/GMT+5', - 'Etc/GMT+6', - 'Etc/GMT+7', - 'Etc/GMT+8', - 'Etc/GMT+9', - 'Etc/GMT-0', - 'Etc/GMT-1', - 'Etc/GMT-10', - 'Etc/GMT-11', - 'Etc/GMT-12', - 'Etc/GMT-13', - 'Etc/GMT-14', - 'Etc/GMT-2', - 'Etc/GMT-3', - 'Etc/GMT-4', - 'Etc/GMT-5', - 'Etc/GMT-6', - 'Etc/GMT-7', - 'Etc/GMT-8', - 'Etc/GMT-9', - 'Etc/GMT0', - 'Etc/Greenwich', - 'Etc/UCT', - 'Etc/UTC', - 'Etc/Universal', - 'Etc/Zulu', - 'Europe/Amsterdam', - 'Europe/Andorra', - 'Europe/Athens', - 'Europe/Belfast', - 'Europe/Belgrade', - 'Europe/Berlin', - 'Europe/Bratislava', - 'Europe/Brussels', - 'Europe/Bucharest', - 'Europe/Budapest', - 'Europe/Chisinau', - 'Europe/Copenhagen', - 'Europe/Dublin', - 'Europe/Gibraltar', - 'Europe/Guernsey', - 'Europe/Helsinki', - 'Europe/Isle_of_Man', - 'Europe/Istanbul', - 'Europe/Jersey', - 'Europe/Kaliningrad', - 'Europe/Kiev', - 'Europe/Lisbon', - 'Europe/Ljubljana', - 'Europe/London', - 'Europe/Luxembourg', - 'Europe/Madrid', - 'Europe/Malta', - 'Europe/Mariehamn', - 'Europe/Minsk', - 'Europe/Monaco', - 'Europe/Moscow', - 'Europe/Nicosia', - 'Europe/Oslo', - 'Europe/Paris', - 'Europe/Podgorica', - 'Europe/Prague', - 'Europe/Riga', - 'Europe/Rome', - 'Europe/Samara', - 'Europe/San_Marino', - 'Europe/Sarajevo', - 'Europe/Simferopol', - 'Europe/Skopje', - 'Europe/Sofia', - 'Europe/Stockholm', - 'Europe/Tallinn', - 'Europe/Tirane', - 'Europe/Tiraspol', - 'Europe/Uzhgorod', - 'Europe/Vaduz', - 'Europe/Vatican', - 'Europe/Vienna', - 'Europe/Vilnius', - 'Europe/Volgograd', - 'Europe/Warsaw', - 'Europe/Zagreb', - 'Europe/Zaporozhye', - 'Europe/Zurich', - 'GB', - 'GB-Eire', - 'GMT', - 'GMT+0', - 'GMT-0', - 'GMT0', - 'Greenwich', - 'HST', - 'Hongkong', - 'Iceland', - 'Indian/Antananarivo', - 'Indian/Chagos', - 'Indian/Christmas', - 'Indian/Cocos', - 'Indian/Comoro', - 'Indian/Kerguelen', - 'Indian/Mahe', - 'Indian/Maldives', - 'Indian/Mauritius', - 'Indian/Mayotte', - 'Indian/Reunion', - 'Iran', - 'Israel', - 'Jamaica', - 'Japan', - 'Kwajalein', - 'Libya', - 'MET', - 'MST', - 'MST7MDT', - 'Mexico/BajaNorte', - 'Mexico/BajaSur', - 'Mexico/General', - 'NZ', - 'NZ-CHAT', - 'Navajo', - 'PRC', - 'PST8PDT', - 'Pacific/Apia', - 'Pacific/Auckland', - 'Pacific/Chatham', - 'Pacific/Chuuk', - 'Pacific/Easter', - 'Pacific/Efate', - 'Pacific/Enderbury', - 'Pacific/Fakaofo', - 'Pacific/Fiji', - 'Pacific/Funafuti', - 'Pacific/Galapagos', - 'Pacific/Gambier', - 'Pacific/Guadalcanal', - 'Pacific/Guam', - 'Pacific/Honolulu', - 'Pacific/Johnston', - 'Pacific/Kiritimati', - 'Pacific/Kosrae', - 'Pacific/Kwajalein', - 'Pacific/Majuro', - 'Pacific/Marquesas', - 'Pacific/Midway', - 'Pacific/Nauru', - 'Pacific/Niue', - 'Pacific/Norfolk', - 'Pacific/Noumea', - 'Pacific/Pago_Pago', - 'Pacific/Palau', - 'Pacific/Pitcairn', - 'Pacific/Pohnpei', - 'Pacific/Ponape', - 'Pacific/Port_Moresby', - 'Pacific/Rarotonga', - 'Pacific/Saipan', - 'Pacific/Samoa', - 'Pacific/Tahiti', - 'Pacific/Tarawa', - 'Pacific/Tongatapu', - 'Pacific/Truk', - 'Pacific/Wake', - 'Pacific/Wallis', - 'Pacific/Yap', - 'Poland', - 'Portugal', - 'ROC', - 'ROK', - 'Singapore', - 'Turkey', - 'UCT', - 'US/Alaska', - 'US/Aleutian', - 'US/Arizona', - 'US/Central', - 'US/East-Indiana', - 'US/Eastern', - 'US/Hawaii', - 'US/Indiana-Starke', - 'US/Michigan', - 'US/Mountain', - 'US/Pacific', - 'US/Pacific-New', - 'US/Samoa', - 'UTC', - 'Universal', - 'W-SU', - 'WET', - 'Zulu'] -all_timezones = [ - tz for tz in all_timezones if resource_exists(tz)] - -all_timezones_set = set(all_timezones) -common_timezones = \ -['Africa/Abidjan', - 'Africa/Accra', - 'Africa/Addis_Ababa', - 'Africa/Algiers', - 'Africa/Asmara', - 'Africa/Bamako', - 'Africa/Bangui', - 'Africa/Banjul', - 'Africa/Bissau', - 'Africa/Blantyre', - 'Africa/Brazzaville', - 'Africa/Bujumbura', - 'Africa/Cairo', - 'Africa/Casablanca', - 'Africa/Ceuta', - 'Africa/Conakry', - 'Africa/Dakar', - 'Africa/Dar_es_Salaam', - 'Africa/Djibouti', - 'Africa/Douala', - 'Africa/El_Aaiun', - 'Africa/Freetown', - 'Africa/Gaborone', - 'Africa/Harare', - 'Africa/Johannesburg', - 'Africa/Juba', - 'Africa/Kampala', - 'Africa/Khartoum', - 'Africa/Kigali', - 'Africa/Kinshasa', - 'Africa/Lagos', - 'Africa/Libreville', - 'Africa/Lome', - 'Africa/Luanda', - 'Africa/Lubumbashi', - 'Africa/Lusaka', - 'Africa/Malabo', - 'Africa/Maputo', - 'Africa/Maseru', - 'Africa/Mbabane', - 'Africa/Mogadishu', - 'Africa/Monrovia', - 'Africa/Nairobi', - 'Africa/Ndjamena', - 'Africa/Niamey', - 'Africa/Nouakchott', - 'Africa/Ouagadougou', - 'Africa/Porto-Novo', - 'Africa/Sao_Tome', - 'Africa/Tripoli', - 'Africa/Tunis', - 'Africa/Windhoek', - 'America/Adak', - 'America/Anchorage', - 'America/Anguilla', - 'America/Antigua', - 'America/Araguaina', - 'America/Argentina/Buenos_Aires', - 'America/Argentina/Catamarca', - 'America/Argentina/Cordoba', - 'America/Argentina/Jujuy', - 'America/Argentina/La_Rioja', - 'America/Argentina/Mendoza', - 'America/Argentina/Rio_Gallegos', - 'America/Argentina/Salta', - 'America/Argentina/San_Juan', - 'America/Argentina/San_Luis', - 'America/Argentina/Tucuman', - 'America/Argentina/Ushuaia', - 'America/Aruba', - 'America/Asuncion', - 'America/Atikokan', - 'America/Bahia', - 'America/Bahia_Banderas', - 'America/Barbados', - 'America/Belem', - 'America/Belize', - 'America/Blanc-Sablon', - 'America/Boa_Vista', - 'America/Bogota', - 'America/Boise', - 'America/Cambridge_Bay', - 'America/Campo_Grande', - 'America/Cancun', - 'America/Caracas', - 'America/Cayenne', - 'America/Cayman', - 'America/Chicago', - 'America/Chihuahua', - 'America/Costa_Rica', - 'America/Creston', - 'America/Cuiaba', - 'America/Curacao', - 'America/Danmarkshavn', - 'America/Dawson', - 'America/Dawson_Creek', - 'America/Denver', - 'America/Detroit', - 'America/Dominica', - 'America/Edmonton', - 'America/Eirunepe', - 'America/El_Salvador', - 'America/Fortaleza', - 'America/Glace_Bay', - 'America/Godthab', - 'America/Goose_Bay', - 'America/Grand_Turk', - 'America/Grenada', - 'America/Guadeloupe', - 'America/Guatemala', - 'America/Guayaquil', - 'America/Guyana', - 'America/Halifax', - 'America/Havana', - 'America/Hermosillo', - 'America/Indiana/Indianapolis', - 'America/Indiana/Knox', - 'America/Indiana/Marengo', - 'America/Indiana/Petersburg', - 'America/Indiana/Tell_City', - 'America/Indiana/Vevay', - 'America/Indiana/Vincennes', - 'America/Indiana/Winamac', - 'America/Inuvik', - 'America/Iqaluit', - 'America/Jamaica', - 'America/Juneau', - 'America/Kentucky/Louisville', - 'America/Kentucky/Monticello', - 'America/Kralendijk', - 'America/La_Paz', - 'America/Lima', - 'America/Los_Angeles', - 'America/Lower_Princes', - 'America/Maceio', - 'America/Managua', - 'America/Manaus', - 'America/Marigot', - 'America/Martinique', - 'America/Matamoros', - 'America/Mazatlan', - 'America/Menominee', - 'America/Merida', - 'America/Metlakatla', - 'America/Mexico_City', - 'America/Miquelon', - 'America/Moncton', - 'America/Monterrey', - 'America/Montevideo', - 'America/Montreal', - 'America/Montserrat', - 'America/Nassau', - 'America/New_York', - 'America/Nipigon', - 'America/Nome', - 'America/Noronha', - 'America/North_Dakota/Beulah', - 'America/North_Dakota/Center', - 'America/North_Dakota/New_Salem', - 'America/Ojinaga', - 'America/Panama', - 'America/Pangnirtung', - 'America/Paramaribo', - 'America/Phoenix', - 'America/Port-au-Prince', - 'America/Port_of_Spain', - 'America/Porto_Velho', - 'America/Puerto_Rico', - 'America/Rainy_River', - 'America/Rankin_Inlet', - 'America/Recife', - 'America/Regina', - 'America/Resolute', - 'America/Rio_Branco', - 'America/Santa_Isabel', - 'America/Santarem', - 'America/Santiago', - 'America/Santo_Domingo', - 'America/Sao_Paulo', - 'America/Scoresbysund', - 'America/Shiprock', - 'America/Sitka', - 'America/St_Barthelemy', - 'America/St_Johns', - 'America/St_Kitts', - 'America/St_Lucia', - 'America/St_Thomas', - 'America/St_Vincent', - 'America/Swift_Current', - 'America/Tegucigalpa', - 'America/Thule', - 'America/Thunder_Bay', - 'America/Tijuana', - 'America/Toronto', - 'America/Tortola', - 'America/Vancouver', - 'America/Whitehorse', - 'America/Winnipeg', - 'America/Yakutat', - 'America/Yellowknife', - 'Antarctica/Casey', - 'Antarctica/Davis', - 'Antarctica/DumontDUrville', - 'Antarctica/Macquarie', - 'Antarctica/Mawson', - 'Antarctica/McMurdo', - 'Antarctica/Palmer', - 'Antarctica/Rothera', - 'Antarctica/South_Pole', - 'Antarctica/Syowa', - 'Antarctica/Vostok', - 'Arctic/Longyearbyen', - 'Asia/Aden', - 'Asia/Almaty', - 'Asia/Amman', - 'Asia/Anadyr', - 'Asia/Aqtau', - 'Asia/Aqtobe', - 'Asia/Ashgabat', - 'Asia/Baghdad', - 'Asia/Bahrain', - 'Asia/Baku', - 'Asia/Bangkok', - 'Asia/Beirut', - 'Asia/Bishkek', - 'Asia/Brunei', - 'Asia/Choibalsan', - 'Asia/Chongqing', - 'Asia/Colombo', - 'Asia/Damascus', - 'Asia/Dhaka', - 'Asia/Dili', - 'Asia/Dubai', - 'Asia/Dushanbe', - 'Asia/Gaza', - 'Asia/Harbin', - 'Asia/Hebron', - 'Asia/Ho_Chi_Minh', - 'Asia/Hong_Kong', - 'Asia/Hovd', - 'Asia/Irkutsk', - 'Asia/Jakarta', - 'Asia/Jayapura', - 'Asia/Jerusalem', - 'Asia/Kabul', - 'Asia/Kamchatka', - 'Asia/Karachi', - 'Asia/Kashgar', - 'Asia/Kathmandu', - 'Asia/Kolkata', - 'Asia/Krasnoyarsk', - 'Asia/Kuala_Lumpur', - 'Asia/Kuching', - 'Asia/Kuwait', - 'Asia/Macau', - 'Asia/Magadan', - 'Asia/Makassar', - 'Asia/Manila', - 'Asia/Muscat', - 'Asia/Nicosia', - 'Asia/Novokuznetsk', - 'Asia/Novosibirsk', - 'Asia/Omsk', - 'Asia/Oral', - 'Asia/Phnom_Penh', - 'Asia/Pontianak', - 'Asia/Pyongyang', - 'Asia/Qatar', - 'Asia/Qyzylorda', - 'Asia/Rangoon', - 'Asia/Riyadh', - 'Asia/Sakhalin', - 'Asia/Samarkand', - 'Asia/Seoul', - 'Asia/Shanghai', - 'Asia/Singapore', - 'Asia/Taipei', - 'Asia/Tashkent', - 'Asia/Tbilisi', - 'Asia/Tehran', - 'Asia/Thimphu', - 'Asia/Tokyo', - 'Asia/Ulaanbaatar', - 'Asia/Urumqi', - 'Asia/Vientiane', - 'Asia/Vladivostok', - 'Asia/Yakutsk', - 'Asia/Yekaterinburg', - 'Asia/Yerevan', - 'Atlantic/Azores', - 'Atlantic/Bermuda', - 'Atlantic/Canary', - 'Atlantic/Cape_Verde', - 'Atlantic/Faroe', - 'Atlantic/Madeira', - 'Atlantic/Reykjavik', - 'Atlantic/South_Georgia', - 'Atlantic/St_Helena', - 'Atlantic/Stanley', - 'Australia/Adelaide', - 'Australia/Brisbane', - 'Australia/Broken_Hill', - 'Australia/Currie', - 'Australia/Darwin', - 'Australia/Eucla', - 'Australia/Hobart', - 'Australia/Lindeman', - 'Australia/Lord_Howe', - 'Australia/Melbourne', - 'Australia/Perth', - 'Australia/Sydney', - 'Canada/Atlantic', - 'Canada/Central', - 'Canada/Eastern', - 'Canada/Mountain', - 'Canada/Newfoundland', - 'Canada/Pacific', - 'Europe/Amsterdam', - 'Europe/Andorra', - 'Europe/Athens', - 'Europe/Belgrade', - 'Europe/Berlin', - 'Europe/Bratislava', - 'Europe/Brussels', - 'Europe/Bucharest', - 'Europe/Budapest', - 'Europe/Chisinau', - 'Europe/Copenhagen', - 'Europe/Dublin', - 'Europe/Gibraltar', - 'Europe/Guernsey', - 'Europe/Helsinki', - 'Europe/Isle_of_Man', - 'Europe/Istanbul', - 'Europe/Jersey', - 'Europe/Kaliningrad', - 'Europe/Kiev', - 'Europe/Lisbon', - 'Europe/Ljubljana', - 'Europe/London', - 'Europe/Luxembourg', - 'Europe/Madrid', - 'Europe/Malta', - 'Europe/Mariehamn', - 'Europe/Minsk', - 'Europe/Monaco', - 'Europe/Moscow', - 'Europe/Oslo', - 'Europe/Paris', - 'Europe/Podgorica', - 'Europe/Prague', - 'Europe/Riga', - 'Europe/Rome', - 'Europe/Samara', - 'Europe/San_Marino', - 'Europe/Sarajevo', - 'Europe/Simferopol', - 'Europe/Skopje', - 'Europe/Sofia', - 'Europe/Stockholm', - 'Europe/Tallinn', - 'Europe/Tirane', - 'Europe/Uzhgorod', - 'Europe/Vaduz', - 'Europe/Vatican', - 'Europe/Vienna', - 'Europe/Vilnius', - 'Europe/Volgograd', - 'Europe/Warsaw', - 'Europe/Zagreb', - 'Europe/Zaporozhye', - 'Europe/Zurich', - 'GMT', - 'Indian/Antananarivo', - 'Indian/Chagos', - 'Indian/Christmas', - 'Indian/Cocos', - 'Indian/Comoro', - 'Indian/Kerguelen', - 'Indian/Mahe', - 'Indian/Maldives', - 'Indian/Mauritius', - 'Indian/Mayotte', - 'Indian/Reunion', - 'Pacific/Apia', - 'Pacific/Auckland', - 'Pacific/Chatham', - 'Pacific/Chuuk', - 'Pacific/Easter', - 'Pacific/Efate', - 'Pacific/Enderbury', - 'Pacific/Fakaofo', - 'Pacific/Fiji', - 'Pacific/Funafuti', - 'Pacific/Galapagos', - 'Pacific/Gambier', - 'Pacific/Guadalcanal', - 'Pacific/Guam', - 'Pacific/Honolulu', - 'Pacific/Johnston', - 'Pacific/Kiritimati', - 'Pacific/Kosrae', - 'Pacific/Kwajalein', - 'Pacific/Majuro', - 'Pacific/Marquesas', - 'Pacific/Midway', - 'Pacific/Nauru', - 'Pacific/Niue', - 'Pacific/Norfolk', - 'Pacific/Noumea', - 'Pacific/Pago_Pago', - 'Pacific/Palau', - 'Pacific/Pitcairn', - 'Pacific/Pohnpei', - 'Pacific/Port_Moresby', - 'Pacific/Rarotonga', - 'Pacific/Saipan', - 'Pacific/Tahiti', - 'Pacific/Tarawa', - 'Pacific/Tongatapu', - 'Pacific/Wake', - 'Pacific/Wallis', - 'US/Alaska', - 'US/Arizona', - 'US/Central', - 'US/Eastern', - 'US/Hawaii', - 'US/Mountain', - 'US/Pacific', - 'UTC'] -common_timezones = [ - tz for tz in common_timezones if tz in all_timezones] - -common_timezones_set = set(common_timezones) diff --git a/lib/pytz/exceptions.py b/lib/pytz/exceptions.py deleted file mode 100644 index 0376108e14bb..000000000000 --- a/lib/pytz/exceptions.py +++ /dev/null @@ -1,48 +0,0 @@ -''' -Custom exceptions raised by pytz. -''' - -__all__ = [ - 'UnknownTimeZoneError', 'InvalidTimeError', 'AmbiguousTimeError', - 'NonExistentTimeError', - ] - - -class UnknownTimeZoneError(KeyError): - '''Exception raised when pytz is passed an unknown timezone. - - >>> isinstance(UnknownTimeZoneError(), LookupError) - True - - This class is actually a subclass of KeyError to provide backwards - compatibility with code relying on the undocumented behavior of earlier - pytz releases. - - >>> isinstance(UnknownTimeZoneError(), KeyError) - True - ''' - pass - - -class InvalidTimeError(Exception): - '''Base class for invalid time exceptions.''' - - -class AmbiguousTimeError(InvalidTimeError): - '''Exception raised when attempting to create an ambiguous wallclock time. - - At the end of a DST transition period, a particular wallclock time will - occur twice (once before the clocks are set back, once after). Both - possibilities may be correct, unless further information is supplied. - - See DstTzInfo.normalize() for more info - ''' - - -class NonExistentTimeError(InvalidTimeError): - '''Exception raised when attempting to create a wallclock time that - cannot exist. - - At the start of a DST transition period, the wallclock time jumps forward. - The instants jumped over never occur. - ''' diff --git a/lib/pytz/reference.py b/lib/pytz/reference.py deleted file mode 100644 index 3dda13e75cd4..000000000000 --- a/lib/pytz/reference.py +++ /dev/null @@ -1,127 +0,0 @@ -''' -Reference tzinfo implementations from the Python docs. -Used for testing against as they are only correct for the years -1987 to 2006. Do not use these for real code. -''' - -from datetime import tzinfo, timedelta, datetime -from pytz import utc, UTC, HOUR, ZERO - -# A class building tzinfo objects for fixed-offset time zones. -# Note that FixedOffset(0, "UTC") is a different way to build a -# UTC tzinfo object. - -class FixedOffset(tzinfo): - """Fixed offset in minutes east from UTC.""" - - def __init__(self, offset, name): - self.__offset = timedelta(minutes = offset) - self.__name = name - - def utcoffset(self, dt): - return self.__offset - - def tzname(self, dt): - return self.__name - - def dst(self, dt): - return ZERO - -# A class capturing the platform's idea of local time. - -import time as _time - -STDOFFSET = timedelta(seconds = -_time.timezone) -if _time.daylight: - DSTOFFSET = timedelta(seconds = -_time.altzone) -else: - DSTOFFSET = STDOFFSET - -DSTDIFF = DSTOFFSET - STDOFFSET - -class LocalTimezone(tzinfo): - - def utcoffset(self, dt): - if self._isdst(dt): - return DSTOFFSET - else: - return STDOFFSET - - def dst(self, dt): - if self._isdst(dt): - return DSTDIFF - else: - return ZERO - - def tzname(self, dt): - return _time.tzname[self._isdst(dt)] - - def _isdst(self, dt): - tt = (dt.year, dt.month, dt.day, - dt.hour, dt.minute, dt.second, - dt.weekday(), 0, -1) - stamp = _time.mktime(tt) - tt = _time.localtime(stamp) - return tt.tm_isdst > 0 - -Local = LocalTimezone() - -# A complete implementation of current DST rules for major US time zones. - -def first_sunday_on_or_after(dt): - days_to_go = 6 - dt.weekday() - if days_to_go: - dt += timedelta(days_to_go) - return dt - -# In the US, DST starts at 2am (standard time) on the first Sunday in April. -DSTSTART = datetime(1, 4, 1, 2) -# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct. -# which is the first Sunday on or after Oct 25. -DSTEND = datetime(1, 10, 25, 1) - -class USTimeZone(tzinfo): - - def __init__(self, hours, reprname, stdname, dstname): - self.stdoffset = timedelta(hours=hours) - self.reprname = reprname - self.stdname = stdname - self.dstname = dstname - - def __repr__(self): - return self.reprname - - def tzname(self, dt): - if self.dst(dt): - return self.dstname - else: - return self.stdname - - def utcoffset(self, dt): - return self.stdoffset + self.dst(dt) - - def dst(self, dt): - if dt is None or dt.tzinfo is None: - # An exception may be sensible here, in one or both cases. - # It depends on how you want to treat them. The default - # fromutc() implementation (called by the default astimezone() - # implementation) passes a datetime with dt.tzinfo is self. - return ZERO - assert dt.tzinfo is self - - # Find first Sunday in April & the last in October. - start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year)) - end = first_sunday_on_or_after(DSTEND.replace(year=dt.year)) - - # Can't compare naive to aware objects, so strip the timezone from - # dt first. - if start <= dt.replace(tzinfo=None) < end: - return HOUR - else: - return ZERO - -Eastern = USTimeZone(-5, "Eastern", "EST", "EDT") -Central = USTimeZone(-6, "Central", "CST", "CDT") -Mountain = USTimeZone(-7, "Mountain", "MST", "MDT") -Pacific = USTimeZone(-8, "Pacific", "PST", "PDT") - diff --git a/lib/pytz/tests/test_docs.py b/lib/pytz/tests/test_docs.py deleted file mode 100644 index 4302dcab233c..000000000000 --- a/lib/pytz/tests/test_docs.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- coding: ascii -*- - -from doctest import DocTestSuite -import unittest, os, os.path, sys -import warnings - -# We test the documentation this way instead of using DocFileSuite so -# we can run the tests under Python 2.3 -def test_README(): - pass - -this_dir = os.path.dirname(__file__) -locs = [ - os.path.join(this_dir, os.pardir, 'README.txt'), - os.path.join(this_dir, os.pardir, os.pardir, 'README.txt'), - ] -for loc in locs: - if os.path.exists(loc): - test_README.__doc__ = open(loc).read() - break -if test_README.__doc__ is None: - raise RuntimeError('README.txt not found') - - -def test_suite(): - "For the Z3 test runner" - return DocTestSuite() - - -if __name__ == '__main__': - sys.path.insert(0, os.path.abspath(os.path.join( - this_dir, os.pardir, os.pardir - ))) - unittest.main(defaultTest='test_suite') - - diff --git a/lib/pytz/tests/test_tzinfo.py b/lib/pytz/tests/test_tzinfo.py deleted file mode 100644 index 24abb91ee844..000000000000 --- a/lib/pytz/tests/test_tzinfo.py +++ /dev/null @@ -1,813 +0,0 @@ -# -*- coding: ascii -*- - -import sys, os, os.path -import unittest, doctest -try: - import cPickle as pickle -except ImportError: - import pickle -from datetime import datetime, time, timedelta, tzinfo -import warnings - -if __name__ == '__main__': - # Only munge path if invoked as a script. Testrunners should have setup - # the paths already - sys.path.insert(0, os.path.abspath(os.path.join(os.pardir, os.pardir))) - -import pytz -from pytz import reference -from pytz.tzfile import _byte_string -from pytz.tzinfo import DstTzInfo, StaticTzInfo - -# I test for expected version to ensure the correct version of pytz is -# actually being tested. -EXPECTED_VERSION='2012d' - -fmt = '%Y-%m-%d %H:%M:%S %Z%z' - -NOTIME = timedelta(0) - -# GMT is a tzinfo.StaticTzInfo--the class we primarily want to test--while -# UTC is reference implementation. They both have the same timezone meaning. -UTC = pytz.timezone('UTC') -GMT = pytz.timezone('GMT') -assert isinstance(GMT, StaticTzInfo), 'GMT is no longer a StaticTzInfo' - -def prettydt(dt): - """datetime as a string using a known format. - - We don't use strftime as it doesn't handle years earlier than 1900 - per http://bugs.python.org/issue1777412 - """ - if dt.utcoffset() >= timedelta(0): - offset = '+%s' % (dt.utcoffset(),) - else: - offset = '-%s' % (-1 * dt.utcoffset(),) - return '%04d-%02d-%02d %02d:%02d:%02d %s %s' % ( - dt.year, dt.month, dt.day, - dt.hour, dt.minute, dt.second, - dt.tzname(), offset) - - -try: - unicode -except NameError: - # Python 3.x doesn't have unicode(), making writing code - # for Python 2.3 and Python 3.x a pain. - unicode = str - - -class BasicTest(unittest.TestCase): - - def testVersion(self): - # Ensuring the correct version of pytz has been loaded - self.assertEqual(EXPECTED_VERSION, pytz.__version__, - 'Incorrect pytz version loaded. Import path is stuffed ' - 'or this test needs updating. (Wanted %s, got %s)' - % (EXPECTED_VERSION, pytz.__version__) - ) - - def testGMT(self): - now = datetime.now(tz=GMT) - self.assertTrue(now.utcoffset() == NOTIME) - self.assertTrue(now.dst() == NOTIME) - self.assertTrue(now.timetuple() == now.utctimetuple()) - self.assertTrue(now==now.replace(tzinfo=UTC)) - - def testReferenceUTC(self): - now = datetime.now(tz=UTC) - self.assertTrue(now.utcoffset() == NOTIME) - self.assertTrue(now.dst() == NOTIME) - self.assertTrue(now.timetuple() == now.utctimetuple()) - - def testUnknownOffsets(self): - # This tzinfo behavior is required to make - # datetime.time.{utcoffset, dst, tzname} work as documented. - - dst_tz = pytz.timezone('US/Eastern') - - # This information is not known when we don't have a date, - # so return None per API. - self.assertTrue(dst_tz.utcoffset(None) is None) - self.assertTrue(dst_tz.dst(None) is None) - # We don't know the abbreviation, but this is still a valid - # tzname per the Python documentation. - self.assertEqual(dst_tz.tzname(None), 'US/Eastern') - - def clearCache(self): - pytz._tzinfo_cache.clear() - - def testUnicodeTimezone(self): - # We need to ensure that cold lookups work for both Unicode - # and traditional strings, and that the desired singleton is - # returned. - self.clearCache() - eastern = pytz.timezone(unicode('US/Eastern')) - self.assertTrue(eastern is pytz.timezone('US/Eastern')) - - self.clearCache() - eastern = pytz.timezone('US/Eastern') - self.assertTrue(eastern is pytz.timezone(unicode('US/Eastern'))) - - -class PicklingTest(unittest.TestCase): - - def _roundtrip_tzinfo(self, tz): - p = pickle.dumps(tz) - unpickled_tz = pickle.loads(p) - self.assertTrue(tz is unpickled_tz, '%s did not roundtrip' % tz.zone) - - def _roundtrip_datetime(self, dt): - # Ensure that the tzinfo attached to a datetime instance - # is identical to the one returned. This is important for - # DST timezones, as some state is stored in the tzinfo. - tz = dt.tzinfo - p = pickle.dumps(dt) - unpickled_dt = pickle.loads(p) - unpickled_tz = unpickled_dt.tzinfo - self.assertTrue(tz is unpickled_tz, '%s did not roundtrip' % tz.zone) - - def testDst(self): - tz = pytz.timezone('Europe/Amsterdam') - dt = datetime(2004, 2, 1, 0, 0, 0) - - for localized_tz in tz._tzinfos.values(): - self._roundtrip_tzinfo(localized_tz) - self._roundtrip_datetime(dt.replace(tzinfo=localized_tz)) - - def testRoundtrip(self): - dt = datetime(2004, 2, 1, 0, 0, 0) - for zone in pytz.all_timezones: - tz = pytz.timezone(zone) - self._roundtrip_tzinfo(tz) - - def testDatabaseFixes(self): - # Hack the pickle to make it refer to a timezone abbreviation - # that does not match anything. The unpickler should be able - # to repair this case - tz = pytz.timezone('Australia/Melbourne') - p = pickle.dumps(tz) - tzname = tz._tzname - hacked_p = p.replace(_byte_string(tzname), _byte_string('???')) - self.assertNotEqual(p, hacked_p) - unpickled_tz = pickle.loads(hacked_p) - self.assertTrue(tz is unpickled_tz) - - # Simulate a database correction. In this case, the incorrect - # data will continue to be used. - p = pickle.dumps(tz) - new_utcoffset = tz._utcoffset.seconds + 42 - - # Python 3 introduced a new pickle protocol where numbers are stored in - # hexadecimal representation. Here we extract the pickle - # representation of the number for the current Python version. - old_pickle_pattern = pickle.dumps(tz._utcoffset.seconds)[3:-1] - new_pickle_pattern = pickle.dumps(new_utcoffset)[3:-1] - hacked_p = p.replace(old_pickle_pattern, new_pickle_pattern) - - self.assertNotEqual(p, hacked_p) - unpickled_tz = pickle.loads(hacked_p) - self.assertEqual(unpickled_tz._utcoffset.seconds, new_utcoffset) - self.assertTrue(tz is not unpickled_tz) - - def testOldPickles(self): - # Ensure that applications serializing pytz instances as pickles - # have no troubles upgrading to a new pytz release. These pickles - # where created with pytz2006j - east1 = pickle.loads(_byte_string( - "cpytz\n_p\np1\n(S'US/Eastern'\np2\nI-18000\n" - "I0\nS'EST'\np3\ntRp4\n." - )) - east2 = pytz.timezone('US/Eastern') - self.assertTrue(east1 is east2) - - # Confirm changes in name munging between 2006j and 2007c cause - # no problems. - pap1 = pickle.loads(_byte_string( - "cpytz\n_p\np1\n(S'America/Port_minus_au_minus_Prince'" - "\np2\nI-17340\nI0\nS'PPMT'\np3\ntRp4\n.")) - pap2 = pytz.timezone('America/Port-au-Prince') - self.assertTrue(pap1 is pap2) - - gmt1 = pickle.loads(_byte_string( - "cpytz\n_p\np1\n(S'Etc/GMT_plus_10'\np2\ntRp3\n.")) - gmt2 = pytz.timezone('Etc/GMT+10') - self.assertTrue(gmt1 is gmt2) - - -class USEasternDSTStartTestCase(unittest.TestCase): - tzinfo = pytz.timezone('US/Eastern') - - # 24 hours before DST changeover - transition_time = datetime(2002, 4, 7, 7, 0, 0, tzinfo=UTC) - - # Increase for 'flexible' DST transitions due to 1 minute granularity - # of Python's datetime library - instant = timedelta(seconds=1) - - # before transition - before = { - 'tzname': 'EST', - 'utcoffset': timedelta(hours = -5), - 'dst': timedelta(hours = 0), - } - - # after transition - after = { - 'tzname': 'EDT', - 'utcoffset': timedelta(hours = -4), - 'dst': timedelta(hours = 1), - } - - def _test_tzname(self, utc_dt, wanted): - tzname = wanted['tzname'] - dt = utc_dt.astimezone(self.tzinfo) - self.assertEqual(dt.tzname(), tzname, - 'Expected %s as tzname for %s. Got %s' % ( - tzname, str(utc_dt), dt.tzname() - ) - ) - - def _test_utcoffset(self, utc_dt, wanted): - utcoffset = wanted['utcoffset'] - dt = utc_dt.astimezone(self.tzinfo) - self.assertEqual( - dt.utcoffset(), wanted['utcoffset'], - 'Expected %s as utcoffset for %s. Got %s' % ( - utcoffset, utc_dt, dt.utcoffset() - ) - ) - - def _test_dst(self, utc_dt, wanted): - dst = wanted['dst'] - dt = utc_dt.astimezone(self.tzinfo) - self.assertEqual(dt.dst(),dst, - 'Expected %s as dst for %s. Got %s' % ( - dst, utc_dt, dt.dst() - ) - ) - - def test_arithmetic(self): - utc_dt = self.transition_time - - for days in range(-420, 720, 20): - delta = timedelta(days=days) - - # Make sure we can get back where we started - dt = utc_dt.astimezone(self.tzinfo) - dt2 = dt + delta - dt2 = dt2 - delta - self.assertEqual(dt, dt2) - - # Make sure arithmetic crossing DST boundaries ends - # up in the correct timezone after normalization - utc_plus_delta = (utc_dt + delta).astimezone(self.tzinfo) - local_plus_delta = self.tzinfo.normalize(dt + delta) - self.assertEqual( - prettydt(utc_plus_delta), - prettydt(local_plus_delta), - 'Incorrect result for delta==%d days. Wanted %r. Got %r'%( - days, - prettydt(utc_plus_delta), - prettydt(local_plus_delta), - ) - ) - - def _test_all(self, utc_dt, wanted): - self._test_utcoffset(utc_dt, wanted) - self._test_tzname(utc_dt, wanted) - self._test_dst(utc_dt, wanted) - - def testDayBefore(self): - self._test_all( - self.transition_time - timedelta(days=1), self.before - ) - - def testTwoHoursBefore(self): - self._test_all( - self.transition_time - timedelta(hours=2), self.before - ) - - def testHourBefore(self): - self._test_all( - self.transition_time - timedelta(hours=1), self.before - ) - - def testInstantBefore(self): - self._test_all( - self.transition_time - self.instant, self.before - ) - - def testTransition(self): - self._test_all( - self.transition_time, self.after - ) - - def testInstantAfter(self): - self._test_all( - self.transition_time + self.instant, self.after - ) - - def testHourAfter(self): - self._test_all( - self.transition_time + timedelta(hours=1), self.after - ) - - def testTwoHoursAfter(self): - self._test_all( - self.transition_time + timedelta(hours=1), self.after - ) - - def testDayAfter(self): - self._test_all( - self.transition_time + timedelta(days=1), self.after - ) - - -class USEasternDSTEndTestCase(USEasternDSTStartTestCase): - tzinfo = pytz.timezone('US/Eastern') - transition_time = datetime(2002, 10, 27, 6, 0, 0, tzinfo=UTC) - before = { - 'tzname': 'EDT', - 'utcoffset': timedelta(hours = -4), - 'dst': timedelta(hours = 1), - } - after = { - 'tzname': 'EST', - 'utcoffset': timedelta(hours = -5), - 'dst': timedelta(hours = 0), - } - - -class USEasternEPTStartTestCase(USEasternDSTStartTestCase): - transition_time = datetime(1945, 8, 14, 23, 0, 0, tzinfo=UTC) - before = { - 'tzname': 'EWT', - 'utcoffset': timedelta(hours = -4), - 'dst': timedelta(hours = 1), - } - after = { - 'tzname': 'EPT', - 'utcoffset': timedelta(hours = -4), - 'dst': timedelta(hours = 1), - } - - -class USEasternEPTEndTestCase(USEasternDSTStartTestCase): - transition_time = datetime(1945, 9, 30, 6, 0, 0, tzinfo=UTC) - before = { - 'tzname': 'EPT', - 'utcoffset': timedelta(hours = -4), - 'dst': timedelta(hours = 1), - } - after = { - 'tzname': 'EST', - 'utcoffset': timedelta(hours = -5), - 'dst': timedelta(hours = 0), - } - - -class WarsawWMTEndTestCase(USEasternDSTStartTestCase): - # In 1915, Warsaw changed from Warsaw to Central European time. - # This involved the clocks being set backwards, causing a end-of-DST - # like situation without DST being involved. - tzinfo = pytz.timezone('Europe/Warsaw') - transition_time = datetime(1915, 8, 4, 22, 36, 0, tzinfo=UTC) - before = { - 'tzname': 'WMT', - 'utcoffset': timedelta(hours=1, minutes=24), - 'dst': timedelta(0), - } - after = { - 'tzname': 'CET', - 'utcoffset': timedelta(hours=1), - 'dst': timedelta(0), - } - - -class VilniusWMTEndTestCase(USEasternDSTStartTestCase): - # At the end of 1916, Vilnius changed timezones putting its clock - # forward by 11 minutes 35 seconds. Neither timezone was in DST mode. - tzinfo = pytz.timezone('Europe/Vilnius') - instant = timedelta(seconds=31) - transition_time = datetime(1916, 12, 31, 22, 36, 00, tzinfo=UTC) - before = { - 'tzname': 'WMT', - 'utcoffset': timedelta(hours=1, minutes=24), - 'dst': timedelta(0), - } - after = { - 'tzname': 'KMT', - 'utcoffset': timedelta(hours=1, minutes=36), # Really 1:35:36 - 'dst': timedelta(0), - } - - -class VilniusCESTStartTestCase(USEasternDSTStartTestCase): - # In 1941, Vilnius changed from MSG to CEST, switching to summer - # time while simultaneously reducing its UTC offset by two hours, - # causing the clocks to go backwards for this summer time - # switchover. - tzinfo = pytz.timezone('Europe/Vilnius') - transition_time = datetime(1941, 6, 23, 21, 00, 00, tzinfo=UTC) - before = { - 'tzname': 'MSK', - 'utcoffset': timedelta(hours=3), - 'dst': timedelta(0), - } - after = { - 'tzname': 'CEST', - 'utcoffset': timedelta(hours=2), - 'dst': timedelta(hours=1), - } - - -class LondonHistoryStartTestCase(USEasternDSTStartTestCase): - # The first known timezone transition in London was in 1847 when - # clocks where synchronized to GMT. However, we currently only - # understand v1 format tzfile(5) files which does handle years - # this far in the past, so our earliest known transition is in - # 1916. - tzinfo = pytz.timezone('Europe/London') - # transition_time = datetime(1847, 12, 1, 1, 15, 00, tzinfo=UTC) - # before = { - # 'tzname': 'LMT', - # 'utcoffset': timedelta(minutes=-75), - # 'dst': timedelta(0), - # } - # after = { - # 'tzname': 'GMT', - # 'utcoffset': timedelta(0), - # 'dst': timedelta(0), - # } - transition_time = datetime(1916, 5, 21, 2, 00, 00, tzinfo=UTC) - before = { - 'tzname': 'GMT', - 'utcoffset': timedelta(0), - 'dst': timedelta(0), - } - after = { - 'tzname': 'BST', - 'utcoffset': timedelta(hours=1), - 'dst': timedelta(hours=1), - } - - -class LondonHistoryEndTestCase(USEasternDSTStartTestCase): - # Timezone switchovers are projected into the future, even - # though no official statements exist or could be believed even - # if they did exist. We currently only check the last known - # transition in 2037, as we are still using v1 format tzfile(5) - # files. - tzinfo = pytz.timezone('Europe/London') - # transition_time = datetime(2499, 10, 25, 1, 0, 0, tzinfo=UTC) - transition_time = datetime(2037, 10, 25, 1, 0, 0, tzinfo=UTC) - before = { - 'tzname': 'BST', - 'utcoffset': timedelta(hours=1), - 'dst': timedelta(hours=1), - } - after = { - 'tzname': 'GMT', - 'utcoffset': timedelta(0), - 'dst': timedelta(0), - } - - -class NoumeaHistoryStartTestCase(USEasternDSTStartTestCase): - # Noumea adopted a whole hour offset in 1912. Previously - # it was 11 hours, 5 minutes and 48 seconds off UTC. However, - # due to limitations of the Python datetime library, we need - # to round that to 11 hours 6 minutes. - tzinfo = pytz.timezone('Pacific/Noumea') - transition_time = datetime(1912, 1, 12, 12, 54, 12, tzinfo=UTC) - before = { - 'tzname': 'LMT', - 'utcoffset': timedelta(hours=11, minutes=6), - 'dst': timedelta(0), - } - after = { - 'tzname': 'NCT', - 'utcoffset': timedelta(hours=11), - 'dst': timedelta(0), - } - - -class NoumeaDSTEndTestCase(USEasternDSTStartTestCase): - # Noumea dropped DST in 1997. - tzinfo = pytz.timezone('Pacific/Noumea') - transition_time = datetime(1997, 3, 1, 15, 00, 00, tzinfo=UTC) - before = { - 'tzname': 'NCST', - 'utcoffset': timedelta(hours=12), - 'dst': timedelta(hours=1), - } - after = { - 'tzname': 'NCT', - 'utcoffset': timedelta(hours=11), - 'dst': timedelta(0), - } - - -class NoumeaNoMoreDSTTestCase(NoumeaDSTEndTestCase): - # Noumea dropped DST in 1997. Here we test that it stops occuring. - transition_time = ( - NoumeaDSTEndTestCase.transition_time + timedelta(days=365*10)) - before = NoumeaDSTEndTestCase.after - after = NoumeaDSTEndTestCase.after - - -class TahitiTestCase(USEasternDSTStartTestCase): - # Tahiti has had a single transition in its history. - tzinfo = pytz.timezone('Pacific/Tahiti') - transition_time = datetime(1912, 10, 1, 9, 58, 16, tzinfo=UTC) - before = { - 'tzname': 'LMT', - 'utcoffset': timedelta(hours=-9, minutes=-58), - 'dst': timedelta(0), - } - after = { - 'tzname': 'TAHT', - 'utcoffset': timedelta(hours=-10), - 'dst': timedelta(0), - } - - -class SamoaInternationalDateLineChange(USEasternDSTStartTestCase): - # At the end of 2011, Samoa will switch from being east of the - # international dateline to the west. There will be no Dec 30th - # 2011 and it will switch from UTC-10 to UTC+14. - tzinfo = pytz.timezone('Pacific/Apia') - transition_time = datetime(2011, 12, 30, 10, 0, 0, tzinfo=UTC) - before = { - 'tzname': 'WSDT', - 'utcoffset': timedelta(hours=-10), - 'dst': timedelta(hours=1), - } - after = { - 'tzname': 'WSDT', - 'utcoffset': timedelta(hours=14), - 'dst': timedelta(hours=1), - } - - -class ReferenceUSEasternDSTStartTestCase(USEasternDSTStartTestCase): - tzinfo = reference.Eastern - def test_arithmetic(self): - # Reference implementation cannot handle this - pass - - -class ReferenceUSEasternDSTEndTestCase(USEasternDSTEndTestCase): - tzinfo = reference.Eastern - - def testHourBefore(self): - # Python's datetime library has a bug, where the hour before - # a daylight savings transition is one hour out. For example, - # at the end of US/Eastern daylight savings time, 01:00 EST - # occurs twice (once at 05:00 UTC and once at 06:00 UTC), - # whereas the first should actually be 01:00 EDT. - # Note that this bug is by design - by accepting this ambiguity - # for one hour one hour per year, an is_dst flag on datetime.time - # became unnecessary. - self._test_all( - self.transition_time - timedelta(hours=1), self.after - ) - - def testInstantBefore(self): - self._test_all( - self.transition_time - timedelta(seconds=1), self.after - ) - - def test_arithmetic(self): - # Reference implementation cannot handle this - pass - - -class LocalTestCase(unittest.TestCase): - def testLocalize(self): - loc_tz = pytz.timezone('Europe/Amsterdam') - - loc_time = loc_tz.localize(datetime(1930, 5, 10, 0, 0, 0)) - # Actually +00:19:32, but Python datetime rounds this - self.assertEqual(loc_time.strftime('%Z%z'), 'AMT+0020') - - loc_time = loc_tz.localize(datetime(1930, 5, 20, 0, 0, 0)) - # Actually +00:19:32, but Python datetime rounds this - self.assertEqual(loc_time.strftime('%Z%z'), 'NST+0120') - - loc_time = loc_tz.localize(datetime(1940, 5, 10, 0, 0, 0)) - self.assertEqual(loc_time.strftime('%Z%z'), 'NET+0020') - - loc_time = loc_tz.localize(datetime(1940, 5, 20, 0, 0, 0)) - self.assertEqual(loc_time.strftime('%Z%z'), 'CEST+0200') - - loc_time = loc_tz.localize(datetime(2004, 2, 1, 0, 0, 0)) - self.assertEqual(loc_time.strftime('%Z%z'), 'CET+0100') - - loc_time = loc_tz.localize(datetime(2004, 4, 1, 0, 0, 0)) - self.assertEqual(loc_time.strftime('%Z%z'), 'CEST+0200') - - tz = pytz.timezone('Europe/Amsterdam') - loc_time = loc_tz.localize(datetime(1943, 3, 29, 1, 59, 59)) - self.assertEqual(loc_time.strftime('%Z%z'), 'CET+0100') - - - # Switch to US - loc_tz = pytz.timezone('US/Eastern') - - # End of DST ambiguity check - loc_time = loc_tz.localize(datetime(1918, 10, 27, 1, 59, 59), is_dst=1) - self.assertEqual(loc_time.strftime('%Z%z'), 'EDT-0400') - - loc_time = loc_tz.localize(datetime(1918, 10, 27, 1, 59, 59), is_dst=0) - self.assertEqual(loc_time.strftime('%Z%z'), 'EST-0500') - - self.assertRaises(pytz.AmbiguousTimeError, - loc_tz.localize, datetime(1918, 10, 27, 1, 59, 59), is_dst=None - ) - - # Start of DST non-existent times - loc_time = loc_tz.localize(datetime(1918, 3, 31, 2, 0, 0), is_dst=0) - self.assertEqual(loc_time.strftime('%Z%z'), 'EST-0500') - - loc_time = loc_tz.localize(datetime(1918, 3, 31, 2, 0, 0), is_dst=1) - self.assertEqual(loc_time.strftime('%Z%z'), 'EDT-0400') - - self.assertRaises(pytz.NonExistentTimeError, - loc_tz.localize, datetime(1918, 3, 31, 2, 0, 0), is_dst=None - ) - - # Weird changes - war time and peace time both is_dst==True - - loc_time = loc_tz.localize(datetime(1942, 2, 9, 3, 0, 0)) - self.assertEqual(loc_time.strftime('%Z%z'), 'EWT-0400') - - loc_time = loc_tz.localize(datetime(1945, 8, 14, 19, 0, 0)) - self.assertEqual(loc_time.strftime('%Z%z'), 'EPT-0400') - - loc_time = loc_tz.localize(datetime(1945, 9, 30, 1, 0, 0), is_dst=1) - self.assertEqual(loc_time.strftime('%Z%z'), 'EPT-0400') - - loc_time = loc_tz.localize(datetime(1945, 9, 30, 1, 0, 0), is_dst=0) - self.assertEqual(loc_time.strftime('%Z%z'), 'EST-0500') - - def testNormalize(self): - tz = pytz.timezone('US/Eastern') - dt = datetime(2004, 4, 4, 7, 0, 0, tzinfo=UTC).astimezone(tz) - dt2 = dt - timedelta(minutes=10) - self.assertEqual( - dt2.strftime('%Y-%m-%d %H:%M:%S %Z%z'), - '2004-04-04 02:50:00 EDT-0400' - ) - - dt2 = tz.normalize(dt2) - self.assertEqual( - dt2.strftime('%Y-%m-%d %H:%M:%S %Z%z'), - '2004-04-04 01:50:00 EST-0500' - ) - - def testPartialMinuteOffsets(self): - # utcoffset in Amsterdam was not a whole minute until 1937 - # However, we fudge this by rounding them, as the Python - # datetime library - tz = pytz.timezone('Europe/Amsterdam') - utc_dt = datetime(1914, 1, 1, 13, 40, 28, tzinfo=UTC) # correct - utc_dt = utc_dt.replace(second=0) # But we need to fudge it - loc_dt = utc_dt.astimezone(tz) - self.assertEqual( - loc_dt.strftime('%Y-%m-%d %H:%M:%S %Z%z'), - '1914-01-01 14:00:00 AMT+0020' - ) - - # And get back... - utc_dt = loc_dt.astimezone(UTC) - self.assertEqual( - utc_dt.strftime('%Y-%m-%d %H:%M:%S %Z%z'), - '1914-01-01 13:40:00 UTC+0000' - ) - - def no_testCreateLocaltime(self): - # It would be nice if this worked, but it doesn't. - tz = pytz.timezone('Europe/Amsterdam') - dt = datetime(2004, 10, 31, 2, 0, 0, tzinfo=tz) - self.assertEqual( - dt.strftime(fmt), - '2004-10-31 02:00:00 CET+0100' - ) - - -class CommonTimezonesTestCase(unittest.TestCase): - def test_bratislava(self): - # Bratislava is the default timezone for Slovakia, but our - # heuristics where not adding it to common_timezones. Ideally, - # common_timezones should be populated from zone.tab at runtime, - # but I'm hesitant to pay the startup cost as loading the list - # on demand whilst remaining backwards compatible seems - # difficult. - self.assertTrue('Europe/Bratislava' in pytz.common_timezones) - self.assertTrue('Europe/Bratislava' in pytz.common_timezones_set) - - def test_us_eastern(self): - self.assertTrue('US/Eastern' in pytz.common_timezones) - self.assertTrue('US/Eastern' in pytz.common_timezones_set) - - def test_belfast(self): - # Belfast uses London time. - self.assertTrue('Europe/Belfast' in pytz.all_timezones_set) - self.assertFalse('Europe/Belfast' in pytz.common_timezones) - self.assertFalse('Europe/Belfast' in pytz.common_timezones_set) - - -class BaseTzInfoTestCase: - '''Ensure UTC, StaticTzInfo and DstTzInfo work consistently. - - These tests are run for each type of tzinfo. - ''' - tz = None # override - tz_class = None # override - - def test_expectedclass(self): - self.assertTrue(isinstance(self.tz, self.tz_class)) - - def test_fromutc(self): - # naive datetime. - dt1 = datetime(2011, 10, 31) - - # localized datetime, same timezone. - dt2 = self.tz.localize(dt1) - - # Both should give the same results. Note that the standard - # Python tzinfo.fromutc() only supports the second. - for dt in [dt1, dt2]: - loc_dt = self.tz.fromutc(dt) - loc_dt2 = pytz.utc.localize(dt1).astimezone(self.tz) - self.assertEqual(loc_dt, loc_dt2) - - # localized datetime, different timezone. - new_tz = pytz.timezone('Europe/Paris') - self.assertTrue(self.tz is not new_tz) - dt3 = new_tz.localize(dt1) - self.assertRaises(ValueError, self.tz.fromutc, dt3) - - def test_normalize(self): - other_tz = pytz.timezone('Europe/Paris') - self.assertTrue(self.tz is not other_tz) - - dt = datetime(2012, 3, 26, 12, 0) - other_dt = other_tz.localize(dt) - - local_dt = self.tz.normalize(other_dt) - - self.assertTrue(local_dt.tzinfo is not other_dt.tzinfo) - self.assertNotEqual( - local_dt.replace(tzinfo=None), other_dt.replace(tzinfo=None)) - - def test_astimezone(self): - other_tz = pytz.timezone('Europe/Paris') - self.assertTrue(self.tz is not other_tz) - - dt = datetime(2012, 3, 26, 12, 0) - other_dt = other_tz.localize(dt) - - local_dt = other_dt.astimezone(self.tz) - - self.assertTrue(local_dt.tzinfo is not other_dt.tzinfo) - self.assertNotEqual( - local_dt.replace(tzinfo=None), other_dt.replace(tzinfo=None)) - - -class OptimizedUTCTestCase(unittest.TestCase, BaseTzInfoTestCase): - tz = pytz.utc - tz_class = tz.__class__ - - -class LegacyUTCTestCase(unittest.TestCase, BaseTzInfoTestCase): - # Deprecated timezone, but useful for comparison tests. - tz = pytz.timezone('Etc/UTC') - tz_class = StaticTzInfo - - -class StaticTzInfoTestCase(unittest.TestCase, BaseTzInfoTestCase): - tz = pytz.timezone('GMT') - tz_class = StaticTzInfo - - -class DstTzInfoTestCase(unittest.TestCase, BaseTzInfoTestCase): - tz = pytz.timezone('Australia/Melbourne') - tz_class = DstTzInfo - - -def test_suite(): - suite = unittest.TestSuite() - suite.addTest(doctest.DocTestSuite('pytz')) - suite.addTest(doctest.DocTestSuite('pytz.tzinfo')) - import test_tzinfo - suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(test_tzinfo)) - return suite - - -if __name__ == '__main__': - warnings.simplefilter("error") # Warnings should be fatal in tests. - unittest.main(defaultTest='test_suite') - diff --git a/lib/pytz/tzfile.py b/lib/pytz/tzfile.py deleted file mode 100644 index 9c007c80995a..000000000000 --- a/lib/pytz/tzfile.py +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/env python -''' -$Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $ -''' - -try: - from cStringIO import StringIO -except ImportError: - from io import StringIO -from datetime import datetime, timedelta -from struct import unpack, calcsize - -from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo -from pytz.tzinfo import memorized_datetime, memorized_timedelta - -def _byte_string(s): - """Cast a string or byte string to an ASCII byte string.""" - return s.encode('US-ASCII') - -_NULL = _byte_string('\0') - -def _std_string(s): - """Cast a string or byte string to an ASCII string.""" - return str(s.decode('US-ASCII')) - -def build_tzinfo(zone, fp): - head_fmt = '>4s c 15x 6l' - head_size = calcsize(head_fmt) - (magic, format, ttisgmtcnt, ttisstdcnt,leapcnt, timecnt, - typecnt, charcnt) = unpack(head_fmt, fp.read(head_size)) - - # Make sure it is a tzfile(5) file - assert magic == _byte_string('TZif'), 'Got magic %s' % repr(magic) - - # Read out the transition times, localtime indices and ttinfo structures. - data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict( - timecnt=timecnt, ttinfo='lBB'*typecnt, charcnt=charcnt) - data_size = calcsize(data_fmt) - data = unpack(data_fmt, fp.read(data_size)) - - # make sure we unpacked the right number of values - assert len(data) == 2 * timecnt + 3 * typecnt + 1 - transitions = [memorized_datetime(trans) - for trans in data[:timecnt]] - lindexes = list(data[timecnt:2 * timecnt]) - ttinfo_raw = data[2 * timecnt:-1] - tznames_raw = data[-1] - del data - - # Process ttinfo into separate structs - ttinfo = [] - tznames = {} - i = 0 - while i < len(ttinfo_raw): - # have we looked up this timezone name yet? - tzname_offset = ttinfo_raw[i+2] - if tzname_offset not in tznames: - nul = tznames_raw.find(_NULL, tzname_offset) - if nul < 0: - nul = len(tznames_raw) - tznames[tzname_offset] = _std_string( - tznames_raw[tzname_offset:nul]) - ttinfo.append((ttinfo_raw[i], - bool(ttinfo_raw[i+1]), - tznames[tzname_offset])) - i += 3 - - # Now build the timezone object - if len(transitions) == 0: - ttinfo[0][0], ttinfo[0][2] - cls = type(zone, (StaticTzInfo,), dict( - zone=zone, - _utcoffset=memorized_timedelta(ttinfo[0][0]), - _tzname=ttinfo[0][2])) - else: - # Early dates use the first standard time ttinfo - i = 0 - while ttinfo[i][1]: - i += 1 - if ttinfo[i] == ttinfo[lindexes[0]]: - transitions[0] = datetime.min - else: - transitions.insert(0, datetime.min) - lindexes.insert(0, i) - - # calculate transition info - transition_info = [] - for i in range(len(transitions)): - inf = ttinfo[lindexes[i]] - utcoffset = inf[0] - if not inf[1]: - dst = 0 - else: - for j in range(i-1, -1, -1): - prev_inf = ttinfo[lindexes[j]] - if not prev_inf[1]: - break - dst = inf[0] - prev_inf[0] # dst offset - - # Bad dst? Look further. DST > 24 hours happens when - # a timzone has moved across the international dateline. - if dst <= 0 or dst > 3600*3: - for j in range(i+1, len(transitions)): - stdinf = ttinfo[lindexes[j]] - if not stdinf[1]: - dst = inf[0] - stdinf[0] - if dst > 0: - break # Found a useful std time. - - tzname = inf[2] - - # Round utcoffset and dst to the nearest minute or the - # datetime library will complain. Conversions to these timezones - # might be up to plus or minus 30 seconds out, but it is - # the best we can do. - utcoffset = int((utcoffset + 30) // 60) * 60 - dst = int((dst + 30) // 60) * 60 - transition_info.append(memorized_ttinfo(utcoffset, dst, tzname)) - - cls = type(zone, (DstTzInfo,), dict( - zone=zone, - _utc_transition_times=transitions, - _transition_info=transition_info)) - - return cls() - -if __name__ == '__main__': - import os.path - from pprint import pprint - base = os.path.join(os.path.dirname(__file__), 'zoneinfo') - tz = build_tzinfo('Australia/Melbourne', - open(os.path.join(base,'Australia','Melbourne'), 'rb')) - tz = build_tzinfo('US/Eastern', - open(os.path.join(base,'US','Eastern'), 'rb')) - pprint(tz._utc_transition_times) - #print tz.asPython(4) - #print tz.transitions_mapping diff --git a/lib/pytz/tzinfo.py b/lib/pytz/tzinfo.py deleted file mode 100644 index a1e43cdf0c01..000000000000 --- a/lib/pytz/tzinfo.py +++ /dev/null @@ -1,563 +0,0 @@ -'''Base classes and helpers for building zone specific tzinfo classes''' - -from datetime import datetime, timedelta, tzinfo -from bisect import bisect_right -try: - set -except NameError: - from sets import Set as set - -import pytz -from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError - -__all__ = [] - -_timedelta_cache = {} -def memorized_timedelta(seconds): - '''Create only one instance of each distinct timedelta''' - try: - return _timedelta_cache[seconds] - except KeyError: - delta = timedelta(seconds=seconds) - _timedelta_cache[seconds] = delta - return delta - -_epoch = datetime.utcfromtimestamp(0) -_datetime_cache = {0: _epoch} -def memorized_datetime(seconds): - '''Create only one instance of each distinct datetime''' - try: - return _datetime_cache[seconds] - except KeyError: - # NB. We can't just do datetime.utcfromtimestamp(seconds) as this - # fails with negative values under Windows (Bug #90096) - dt = _epoch + timedelta(seconds=seconds) - _datetime_cache[seconds] = dt - return dt - -_ttinfo_cache = {} -def memorized_ttinfo(*args): - '''Create only one instance of each distinct tuple''' - try: - return _ttinfo_cache[args] - except KeyError: - ttinfo = ( - memorized_timedelta(args[0]), - memorized_timedelta(args[1]), - args[2] - ) - _ttinfo_cache[args] = ttinfo - return ttinfo - -_notime = memorized_timedelta(0) - -def _to_seconds(td): - '''Convert a timedelta to seconds''' - return td.seconds + td.days * 24 * 60 * 60 - - -class BaseTzInfo(tzinfo): - # Overridden in subclass - _utcoffset = None - _tzname = None - zone = None - - def __str__(self): - return self.zone - - -class StaticTzInfo(BaseTzInfo): - '''A timezone that has a constant offset from UTC - - These timezones are rare, as most locations have changed their - offset at some point in their history - ''' - def fromutc(self, dt): - '''See datetime.tzinfo.fromutc''' - if dt.tzinfo is not None and dt.tzinfo is not self: - raise ValueError('fromutc: dt.tzinfo is not self') - return (dt + self._utcoffset).replace(tzinfo=self) - - def utcoffset(self, dt, is_dst=None): - '''See datetime.tzinfo.utcoffset - - is_dst is ignored for StaticTzInfo, and exists only to - retain compatibility with DstTzInfo. - ''' - return self._utcoffset - - def dst(self, dt, is_dst=None): - '''See datetime.tzinfo.dst - - is_dst is ignored for StaticTzInfo, and exists only to - retain compatibility with DstTzInfo. - ''' - return _notime - - def tzname(self, dt, is_dst=None): - '''See datetime.tzinfo.tzname - - is_dst is ignored for StaticTzInfo, and exists only to - retain compatibility with DstTzInfo. - ''' - return self._tzname - - def localize(self, dt, is_dst=False): - '''Convert naive time to local time''' - if dt.tzinfo is not None: - raise ValueError('Not naive datetime (tzinfo is already set)') - return dt.replace(tzinfo=self) - - def normalize(self, dt, is_dst=False): - '''Correct the timezone information on the given datetime. - - This is normally a no-op, as StaticTzInfo timezones never have - ambiguous cases to correct: - - >>> from pytz import timezone - >>> gmt = timezone('GMT') - >>> isinstance(gmt, StaticTzInfo) - True - >>> dt = datetime(2011, 5, 8, 1, 2, 3, tzinfo=gmt) - >>> gmt.normalize(dt) is dt - True - - The supported method of converting between timezones is to use - datetime.astimezone(). Currently normalize() also works: - - >>> la = timezone('America/Los_Angeles') - >>> dt = la.localize(datetime(2011, 5, 7, 1, 2, 3)) - >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' - >>> gmt.normalize(dt).strftime(fmt) - '2011-05-07 08:02:03 GMT (+0000)' - ''' - if dt.tzinfo is self: - return dt - if dt.tzinfo is None: - raise ValueError('Naive time - no tzinfo set') - return dt.astimezone(self) - - def __repr__(self): - return '' % (self.zone,) - - def __reduce__(self): - # Special pickle to zone remains a singleton and to cope with - # database changes. - return pytz._p, (self.zone,) - - -class DstTzInfo(BaseTzInfo): - '''A timezone that has a variable offset from UTC - - The offset might change if daylight savings time comes into effect, - or at a point in history when the region decides to change their - timezone definition. - ''' - # Overridden in subclass - _utc_transition_times = None # Sorted list of DST transition times in UTC - _transition_info = None # [(utcoffset, dstoffset, tzname)] corresponding - # to _utc_transition_times entries - zone = None - - # Set in __init__ - _tzinfos = None - _dst = None # DST offset - - def __init__(self, _inf=None, _tzinfos=None): - if _inf: - self._tzinfos = _tzinfos - self._utcoffset, self._dst, self._tzname = _inf - else: - _tzinfos = {} - self._tzinfos = _tzinfos - self._utcoffset, self._dst, self._tzname = self._transition_info[0] - _tzinfos[self._transition_info[0]] = self - for inf in self._transition_info[1:]: - if inf not in _tzinfos: - _tzinfos[inf] = self.__class__(inf, _tzinfos) - - def fromutc(self, dt): - '''See datetime.tzinfo.fromutc''' - if (dt.tzinfo is not None - and getattr(dt.tzinfo, '_tzinfos', None) is not self._tzinfos): - raise ValueError('fromutc: dt.tzinfo is not self') - dt = dt.replace(tzinfo=None) - idx = max(0, bisect_right(self._utc_transition_times, dt) - 1) - inf = self._transition_info[idx] - return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf]) - - def normalize(self, dt): - '''Correct the timezone information on the given datetime - - If date arithmetic crosses DST boundaries, the tzinfo - is not magically adjusted. This method normalizes the - tzinfo to the correct one. - - To test, first we need to do some setup - - >>> from pytz import timezone - >>> utc = timezone('UTC') - >>> eastern = timezone('US/Eastern') - >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' - - We next create a datetime right on an end-of-DST transition point, - the instant when the wallclocks are wound back one hour. - - >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) - >>> loc_dt = utc_dt.astimezone(eastern) - >>> loc_dt.strftime(fmt) - '2002-10-27 01:00:00 EST (-0500)' - - Now, if we subtract a few minutes from it, note that the timezone - information has not changed. - - >>> before = loc_dt - timedelta(minutes=10) - >>> before.strftime(fmt) - '2002-10-27 00:50:00 EST (-0500)' - - But we can fix that by calling the normalize method - - >>> before = eastern.normalize(before) - >>> before.strftime(fmt) - '2002-10-27 01:50:00 EDT (-0400)' - - The supported method of converting between timezones is to use - datetime.astimezone(). Currently, normalize() also works: - - >>> th = timezone('Asia/Bangkok') - >>> am = timezone('Europe/Amsterdam') - >>> dt = th.localize(datetime(2011, 5, 7, 1, 2, 3)) - >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' - >>> am.normalize(dt).strftime(fmt) - '2011-05-06 20:02:03 CEST (+0200)' - ''' - if dt.tzinfo is None: - raise ValueError('Naive time - no tzinfo set') - - # Convert dt in localtime to UTC - offset = dt.tzinfo._utcoffset - dt = dt.replace(tzinfo=None) - dt = dt - offset - # convert it back, and return it - return self.fromutc(dt) - - def localize(self, dt, is_dst=False): - '''Convert naive time to local time. - - This method should be used to construct localtimes, rather - than passing a tzinfo argument to a datetime constructor. - - is_dst is used to determine the correct timezone in the ambigous - period at the end of daylight savings time. - - >>> from pytz import timezone - >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' - >>> amdam = timezone('Europe/Amsterdam') - >>> dt = datetime(2004, 10, 31, 2, 0, 0) - >>> loc_dt1 = amdam.localize(dt, is_dst=True) - >>> loc_dt2 = amdam.localize(dt, is_dst=False) - >>> loc_dt1.strftime(fmt) - '2004-10-31 02:00:00 CEST (+0200)' - >>> loc_dt2.strftime(fmt) - '2004-10-31 02:00:00 CET (+0100)' - >>> str(loc_dt2 - loc_dt1) - '1:00:00' - - Use is_dst=None to raise an AmbiguousTimeError for ambiguous - times at the end of daylight savings - - >>> try: - ... loc_dt1 = amdam.localize(dt, is_dst=None) - ... except AmbiguousTimeError: - ... print('Ambiguous') - Ambiguous - - is_dst defaults to False - - >>> amdam.localize(dt) == amdam.localize(dt, False) - True - - is_dst is also used to determine the correct timezone in the - wallclock times jumped over at the start of daylight savings time. - - >>> pacific = timezone('US/Pacific') - >>> dt = datetime(2008, 3, 9, 2, 0, 0) - >>> ploc_dt1 = pacific.localize(dt, is_dst=True) - >>> ploc_dt2 = pacific.localize(dt, is_dst=False) - >>> ploc_dt1.strftime(fmt) - '2008-03-09 02:00:00 PDT (-0700)' - >>> ploc_dt2.strftime(fmt) - '2008-03-09 02:00:00 PST (-0800)' - >>> str(ploc_dt2 - ploc_dt1) - '1:00:00' - - Use is_dst=None to raise a NonExistentTimeError for these skipped - times. - - >>> try: - ... loc_dt1 = pacific.localize(dt, is_dst=None) - ... except NonExistentTimeError: - ... print('Non-existent') - Non-existent - ''' - if dt.tzinfo is not None: - raise ValueError('Not naive datetime (tzinfo is already set)') - - # Find the two best possibilities. - possible_loc_dt = set() - for delta in [timedelta(days=-1), timedelta(days=1)]: - loc_dt = dt + delta - idx = max(0, bisect_right( - self._utc_transition_times, loc_dt) - 1) - inf = self._transition_info[idx] - tzinfo = self._tzinfos[inf] - loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo)) - if loc_dt.replace(tzinfo=None) == dt: - possible_loc_dt.add(loc_dt) - - if len(possible_loc_dt) == 1: - return possible_loc_dt.pop() - - # If there are no possibly correct timezones, we are attempting - # to convert a time that never happened - the time period jumped - # during the start-of-DST transition period. - if len(possible_loc_dt) == 0: - # If we refuse to guess, raise an exception. - if is_dst is None: - raise NonExistentTimeError(dt) - - # If we are forcing the pre-DST side of the DST transition, we - # obtain the correct timezone by winding the clock forward a few - # hours. - elif is_dst: - return self.localize( - dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6) - - # If we are forcing the post-DST side of the DST transition, we - # obtain the correct timezone by winding the clock back. - else: - return self.localize( - dt - timedelta(hours=6), is_dst=False) + timedelta(hours=6) - - - # If we get this far, we have multiple possible timezones - this - # is an ambiguous case occuring during the end-of-DST transition. - - # If told to be strict, raise an exception since we have an - # ambiguous case - if is_dst is None: - raise AmbiguousTimeError(dt) - - # Filter out the possiblilities that don't match the requested - # is_dst - filtered_possible_loc_dt = [ - p for p in possible_loc_dt - if bool(p.tzinfo._dst) == is_dst - ] - - # Hopefully we only have one possibility left. Return it. - if len(filtered_possible_loc_dt) == 1: - return filtered_possible_loc_dt[0] - - if len(filtered_possible_loc_dt) == 0: - filtered_possible_loc_dt = list(possible_loc_dt) - - # If we get this far, we have in a wierd timezone transition - # where the clocks have been wound back but is_dst is the same - # in both (eg. Europe/Warsaw 1915 when they switched to CET). - # At this point, we just have to guess unless we allow more - # hints to be passed in (such as the UTC offset or abbreviation), - # but that is just getting silly. - # - # Choose the earliest (by UTC) applicable timezone. - sorting_keys = {} - for local_dt in filtered_possible_loc_dt: - key = local_dt.replace(tzinfo=None) - local_dt.tzinfo._utcoffset - sorting_keys[key] = local_dt - first_key = sorted(sorting_keys)[0] - return sorting_keys[first_key] - - def utcoffset(self, dt, is_dst=None): - '''See datetime.tzinfo.utcoffset - - The is_dst parameter may be used to remove ambiguity during DST - transitions. - - >>> from pytz import timezone - >>> tz = timezone('America/St_Johns') - >>> ambiguous = datetime(2009, 10, 31, 23, 30) - - >>> tz.utcoffset(ambiguous, is_dst=False) - datetime.timedelta(-1, 73800) - - >>> tz.utcoffset(ambiguous, is_dst=True) - datetime.timedelta(-1, 77400) - - >>> try: - ... tz.utcoffset(ambiguous) - ... except AmbiguousTimeError: - ... print('Ambiguous') - Ambiguous - - ''' - if dt is None: - return None - elif dt.tzinfo is not self: - dt = self.localize(dt, is_dst) - return dt.tzinfo._utcoffset - else: - return self._utcoffset - - def dst(self, dt, is_dst=None): - '''See datetime.tzinfo.dst - - The is_dst parameter may be used to remove ambiguity during DST - transitions. - - >>> from pytz import timezone - >>> tz = timezone('America/St_Johns') - - >>> normal = datetime(2009, 9, 1) - - >>> tz.dst(normal) - datetime.timedelta(0, 3600) - >>> tz.dst(normal, is_dst=False) - datetime.timedelta(0, 3600) - >>> tz.dst(normal, is_dst=True) - datetime.timedelta(0, 3600) - - >>> ambiguous = datetime(2009, 10, 31, 23, 30) - - >>> tz.dst(ambiguous, is_dst=False) - datetime.timedelta(0) - >>> tz.dst(ambiguous, is_dst=True) - datetime.timedelta(0, 3600) - >>> try: - ... tz.dst(ambiguous) - ... except AmbiguousTimeError: - ... print('Ambiguous') - Ambiguous - - ''' - if dt is None: - return None - elif dt.tzinfo is not self: - dt = self.localize(dt, is_dst) - return dt.tzinfo._dst - else: - return self._dst - - def tzname(self, dt, is_dst=None): - '''See datetime.tzinfo.tzname - - The is_dst parameter may be used to remove ambiguity during DST - transitions. - - >>> from pytz import timezone - >>> tz = timezone('America/St_Johns') - - >>> normal = datetime(2009, 9, 1) - - >>> tz.tzname(normal) - 'NDT' - >>> tz.tzname(normal, is_dst=False) - 'NDT' - >>> tz.tzname(normal, is_dst=True) - 'NDT' - - >>> ambiguous = datetime(2009, 10, 31, 23, 30) - - >>> tz.tzname(ambiguous, is_dst=False) - 'NST' - >>> tz.tzname(ambiguous, is_dst=True) - 'NDT' - >>> try: - ... tz.tzname(ambiguous) - ... except AmbiguousTimeError: - ... print('Ambiguous') - Ambiguous - ''' - if dt is None: - return self.zone - elif dt.tzinfo is not self: - dt = self.localize(dt, is_dst) - return dt.tzinfo._tzname - else: - return self._tzname - - def __repr__(self): - if self._dst: - dst = 'DST' - else: - dst = 'STD' - if self._utcoffset > _notime: - return '' % ( - self.zone, self._tzname, self._utcoffset, dst - ) - else: - return '' % ( - self.zone, self._tzname, self._utcoffset, dst - ) - - def __reduce__(self): - # Special pickle to zone remains a singleton and to cope with - # database changes. - return pytz._p, ( - self.zone, - _to_seconds(self._utcoffset), - _to_seconds(self._dst), - self._tzname - ) - - - -def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None): - """Factory function for unpickling pytz tzinfo instances. - - This is shared for both StaticTzInfo and DstTzInfo instances, because - database changes could cause a zones implementation to switch between - these two base classes and we can't break pickles on a pytz version - upgrade. - """ - # Raises a KeyError if zone no longer exists, which should never happen - # and would be a bug. - tz = pytz.timezone(zone) - - # A StaticTzInfo - just return it - if utcoffset is None: - return tz - - # This pickle was created from a DstTzInfo. We need to - # determine which of the list of tzinfo instances for this zone - # to use in order to restore the state of any datetime instances using - # it correctly. - utcoffset = memorized_timedelta(utcoffset) - dstoffset = memorized_timedelta(dstoffset) - try: - return tz._tzinfos[(utcoffset, dstoffset, tzname)] - except KeyError: - # The particular state requested in this timezone no longer exists. - # This indicates a corrupt pickle, or the timezone database has been - # corrected violently enough to make this particular - # (utcoffset,dstoffset) no longer exist in the zone, or the - # abbreviation has been changed. - pass - - # See if we can find an entry differing only by tzname. Abbreviations - # get changed from the initial guess by the database maintainers to - # match reality when this information is discovered. - for localized_tz in tz._tzinfos.values(): - if (localized_tz._utcoffset == utcoffset - and localized_tz._dst == dstoffset): - return localized_tz - - # This (utcoffset, dstoffset) information has been removed from the - # zone. Add it back. This might occur when the database maintainers have - # corrected incorrect information. datetime instances using this - # incorrect information will continue to do so, exactly as they were - # before being pickled. This is purely an overly paranoid safety net - I - # doubt this will ever been needed in real life. - inf = (utcoffset, dstoffset, tzname) - tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos) - return tz._tzinfos[inf] - diff --git a/lib/pytz/zoneinfo/Africa/Abidjan b/lib/pytz/zoneinfo/Africa/Abidjan deleted file mode 100644 index 65d19ec2651a..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Abidjan and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Accra b/lib/pytz/zoneinfo/Africa/Accra deleted file mode 100644 index 8c473eda0b70..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Accra and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Addis_Ababa b/lib/pytz/zoneinfo/Africa/Addis_Ababa deleted file mode 100644 index 5a95ab66625c..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Addis_Ababa and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Algiers b/lib/pytz/zoneinfo/Africa/Algiers deleted file mode 100644 index c88883117117..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Algiers and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Asmara b/lib/pytz/zoneinfo/Africa/Asmara deleted file mode 100644 index d1e876e2ff93..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Asmara and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Asmera b/lib/pytz/zoneinfo/Africa/Asmera deleted file mode 100644 index d1e876e2ff93..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Asmera and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Bamako b/lib/pytz/zoneinfo/Africa/Bamako deleted file mode 100644 index da18d7137740..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Bamako and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Bangui b/lib/pytz/zoneinfo/Africa/Bangui deleted file mode 100644 index 883e597eb0f3..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Bangui and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Banjul b/lib/pytz/zoneinfo/Africa/Banjul deleted file mode 100644 index a85a7d87261f..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Banjul and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Bissau b/lib/pytz/zoneinfo/Africa/Bissau deleted file mode 100644 index ab4a195a8804..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Bissau and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Blantyre b/lib/pytz/zoneinfo/Africa/Blantyre deleted file mode 100644 index 2972580dad6a..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Blantyre and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Brazzaville b/lib/pytz/zoneinfo/Africa/Brazzaville deleted file mode 100644 index abb0c08700a2..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Brazzaville and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Bujumbura b/lib/pytz/zoneinfo/Africa/Bujumbura deleted file mode 100644 index cac565245744..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Bujumbura and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Cairo b/lib/pytz/zoneinfo/Africa/Cairo deleted file mode 100644 index d3730dfbc1d2..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Cairo and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Casablanca b/lib/pytz/zoneinfo/Africa/Casablanca deleted file mode 100644 index d3d6da917d16..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Casablanca and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Ceuta b/lib/pytz/zoneinfo/Africa/Ceuta deleted file mode 100644 index c9b0c08bc86c..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Ceuta and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Conakry b/lib/pytz/zoneinfo/Africa/Conakry deleted file mode 100644 index 75b8523fb299..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Conakry and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Dakar b/lib/pytz/zoneinfo/Africa/Dakar deleted file mode 100644 index 31104133c293..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Dakar and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Dar_es_Salaam b/lib/pytz/zoneinfo/Africa/Dar_es_Salaam deleted file mode 100644 index 720d76c8300f..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Dar_es_Salaam and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Djibouti b/lib/pytz/zoneinfo/Africa/Djibouti deleted file mode 100644 index 297d93a3e464..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Djibouti and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Douala b/lib/pytz/zoneinfo/Africa/Douala deleted file mode 100644 index 8627f2e05e95..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Douala and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/El_Aaiun b/lib/pytz/zoneinfo/Africa/El_Aaiun deleted file mode 100644 index 3275161b17a1..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/El_Aaiun and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Freetown b/lib/pytz/zoneinfo/Africa/Freetown deleted file mode 100644 index 720b8e3c905e..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Freetown and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Gaborone b/lib/pytz/zoneinfo/Africa/Gaborone deleted file mode 100644 index ffc77950e1dc..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Gaborone and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Harare b/lib/pytz/zoneinfo/Africa/Harare deleted file mode 100644 index 258b39363729..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Harare and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Johannesburg b/lib/pytz/zoneinfo/Africa/Johannesburg deleted file mode 100644 index d1bec7381574..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Johannesburg and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Juba b/lib/pytz/zoneinfo/Africa/Juba deleted file mode 100644 index 20284ff94b9b..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Juba and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Kampala b/lib/pytz/zoneinfo/Africa/Kampala deleted file mode 100644 index b018ba26f2f3..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Kampala and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Khartoum b/lib/pytz/zoneinfo/Africa/Khartoum deleted file mode 100644 index 6f62fd764cef..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Khartoum and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Kigali b/lib/pytz/zoneinfo/Africa/Kigali deleted file mode 100644 index c9623c56e853..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Kigali and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Kinshasa b/lib/pytz/zoneinfo/Africa/Kinshasa deleted file mode 100644 index e8481f3476aa..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Kinshasa and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Lagos b/lib/pytz/zoneinfo/Africa/Lagos deleted file mode 100644 index cbdc0450fc3b..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Lagos and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Libreville b/lib/pytz/zoneinfo/Africa/Libreville deleted file mode 100644 index d7691ae56f56..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Libreville and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Lome b/lib/pytz/zoneinfo/Africa/Lome deleted file mode 100644 index 297ec5dae378..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Lome and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Luanda b/lib/pytz/zoneinfo/Africa/Luanda deleted file mode 100644 index 576b2043cfb3..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Luanda and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Lubumbashi b/lib/pytz/zoneinfo/Africa/Lubumbashi deleted file mode 100644 index d3fab52a6ce7..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Lubumbashi and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Lusaka b/lib/pytz/zoneinfo/Africa/Lusaka deleted file mode 100644 index 87d7a95fc7f1..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Lusaka and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Malabo b/lib/pytz/zoneinfo/Africa/Malabo deleted file mode 100644 index c70de1f99d9f..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Malabo and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Maputo b/lib/pytz/zoneinfo/Africa/Maputo deleted file mode 100644 index 31cfad771a5c..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Maputo and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Maseru b/lib/pytz/zoneinfo/Africa/Maseru deleted file mode 100644 index 117006eead2a..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Maseru and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Mbabane b/lib/pytz/zoneinfo/Africa/Mbabane deleted file mode 100644 index be6ed60baaf8..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Mbabane and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Mogadishu b/lib/pytz/zoneinfo/Africa/Mogadishu deleted file mode 100644 index bd08463429de..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Mogadishu and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Monrovia b/lib/pytz/zoneinfo/Africa/Monrovia deleted file mode 100644 index bd2fa4e6318a..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Monrovia and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Nairobi b/lib/pytz/zoneinfo/Africa/Nairobi deleted file mode 100644 index 72676bb987d4..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Nairobi and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Ndjamena b/lib/pytz/zoneinfo/Africa/Ndjamena deleted file mode 100644 index 8779590e04a6..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Ndjamena and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Niamey b/lib/pytz/zoneinfo/Africa/Niamey deleted file mode 100644 index 799381c310ca..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Niamey and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Nouakchott b/lib/pytz/zoneinfo/Africa/Nouakchott deleted file mode 100644 index ead817afcdd3..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Nouakchott and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Ouagadougou b/lib/pytz/zoneinfo/Africa/Ouagadougou deleted file mode 100644 index df782a489c29..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Ouagadougou and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Porto-Novo b/lib/pytz/zoneinfo/Africa/Porto-Novo deleted file mode 100644 index 600a30d82ac6..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Porto-Novo and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Sao_Tome b/lib/pytz/zoneinfo/Africa/Sao_Tome deleted file mode 100644 index ddf7fb42e4e9..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Sao_Tome and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Timbuktu b/lib/pytz/zoneinfo/Africa/Timbuktu deleted file mode 100644 index da18d7137740..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Timbuktu and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Tripoli b/lib/pytz/zoneinfo/Africa/Tripoli deleted file mode 100644 index 943f9a36b120..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Tripoli and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Tunis b/lib/pytz/zoneinfo/Africa/Tunis deleted file mode 100644 index dd559ee76340..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Tunis and /dev/null differ diff --git a/lib/pytz/zoneinfo/Africa/Windhoek b/lib/pytz/zoneinfo/Africa/Windhoek deleted file mode 100644 index 6f22b0a7df8c..000000000000 Binary files a/lib/pytz/zoneinfo/Africa/Windhoek and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Adak b/lib/pytz/zoneinfo/America/Adak deleted file mode 100644 index 391ec98ec0f3..000000000000 Binary files a/lib/pytz/zoneinfo/America/Adak and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Anchorage b/lib/pytz/zoneinfo/America/Anchorage deleted file mode 100644 index d14735026a09..000000000000 Binary files a/lib/pytz/zoneinfo/America/Anchorage and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Anguilla b/lib/pytz/zoneinfo/America/Anguilla deleted file mode 100644 index 20bc9464b8dc..000000000000 Binary files a/lib/pytz/zoneinfo/America/Anguilla and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Antigua b/lib/pytz/zoneinfo/America/Antigua deleted file mode 100644 index 608b635977bd..000000000000 Binary files a/lib/pytz/zoneinfo/America/Antigua and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Araguaina b/lib/pytz/zoneinfo/America/Araguaina deleted file mode 100644 index 34d9caf5a79c..000000000000 Binary files a/lib/pytz/zoneinfo/America/Araguaina and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Argentina/Buenos_Aires b/lib/pytz/zoneinfo/America/Argentina/Buenos_Aires deleted file mode 100644 index 5a52a51fc85a..000000000000 Binary files a/lib/pytz/zoneinfo/America/Argentina/Buenos_Aires and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Argentina/Catamarca b/lib/pytz/zoneinfo/America/Argentina/Catamarca deleted file mode 100644 index b9c987bb5689..000000000000 Binary files a/lib/pytz/zoneinfo/America/Argentina/Catamarca and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Argentina/ComodRivadavia b/lib/pytz/zoneinfo/America/Argentina/ComodRivadavia deleted file mode 100644 index b9c987bb5689..000000000000 Binary files a/lib/pytz/zoneinfo/America/Argentina/ComodRivadavia and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Argentina/Cordoba b/lib/pytz/zoneinfo/America/Argentina/Cordoba deleted file mode 100644 index a703e957d5eb..000000000000 Binary files a/lib/pytz/zoneinfo/America/Argentina/Cordoba and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Argentina/Jujuy b/lib/pytz/zoneinfo/America/Argentina/Jujuy deleted file mode 100644 index 86800f0344a0..000000000000 Binary files a/lib/pytz/zoneinfo/America/Argentina/Jujuy and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Argentina/La_Rioja b/lib/pytz/zoneinfo/America/Argentina/La_Rioja deleted file mode 100644 index 333819a15f79..000000000000 Binary files a/lib/pytz/zoneinfo/America/Argentina/La_Rioja and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Argentina/Mendoza b/lib/pytz/zoneinfo/America/Argentina/Mendoza deleted file mode 100644 index 76afd5909e0e..000000000000 Binary files a/lib/pytz/zoneinfo/America/Argentina/Mendoza and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Argentina/Rio_Gallegos b/lib/pytz/zoneinfo/America/Argentina/Rio_Gallegos deleted file mode 100644 index 65d0230a2d06..000000000000 Binary files a/lib/pytz/zoneinfo/America/Argentina/Rio_Gallegos and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Argentina/Salta b/lib/pytz/zoneinfo/America/Argentina/Salta deleted file mode 100644 index 963917a01a26..000000000000 Binary files a/lib/pytz/zoneinfo/America/Argentina/Salta and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Argentina/San_Juan b/lib/pytz/zoneinfo/America/Argentina/San_Juan deleted file mode 100644 index fe7007b85ca4..000000000000 Binary files a/lib/pytz/zoneinfo/America/Argentina/San_Juan and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Argentina/San_Luis b/lib/pytz/zoneinfo/America/Argentina/San_Luis deleted file mode 100644 index fa30a68397d2..000000000000 Binary files a/lib/pytz/zoneinfo/America/Argentina/San_Luis and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Argentina/Tucuman b/lib/pytz/zoneinfo/America/Argentina/Tucuman deleted file mode 100644 index be7bd271639a..000000000000 Binary files a/lib/pytz/zoneinfo/America/Argentina/Tucuman and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Argentina/Ushuaia b/lib/pytz/zoneinfo/America/Argentina/Ushuaia deleted file mode 100644 index 18590effb093..000000000000 Binary files a/lib/pytz/zoneinfo/America/Argentina/Ushuaia and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Aruba b/lib/pytz/zoneinfo/America/Aruba deleted file mode 100644 index 73bb7eaa8166..000000000000 Binary files a/lib/pytz/zoneinfo/America/Aruba and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Asuncion b/lib/pytz/zoneinfo/America/Asuncion deleted file mode 100644 index d4014a6b7893..000000000000 Binary files a/lib/pytz/zoneinfo/America/Asuncion and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Atikokan b/lib/pytz/zoneinfo/America/Atikokan deleted file mode 100644 index 1b49e37c943b..000000000000 Binary files a/lib/pytz/zoneinfo/America/Atikokan and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Atka b/lib/pytz/zoneinfo/America/Atka deleted file mode 100644 index 391ec98ec0f3..000000000000 Binary files a/lib/pytz/zoneinfo/America/Atka and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Bahia b/lib/pytz/zoneinfo/America/Bahia deleted file mode 100644 index 9735a14a6eab..000000000000 Binary files a/lib/pytz/zoneinfo/America/Bahia and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Bahia_Banderas b/lib/pytz/zoneinfo/America/Bahia_Banderas deleted file mode 100644 index cd531078d0e8..000000000000 Binary files a/lib/pytz/zoneinfo/America/Bahia_Banderas and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Barbados b/lib/pytz/zoneinfo/America/Barbados deleted file mode 100644 index 63ca138ee469..000000000000 Binary files a/lib/pytz/zoneinfo/America/Barbados and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Belem b/lib/pytz/zoneinfo/America/Belem deleted file mode 100644 index 9c37b6a5c493..000000000000 Binary files a/lib/pytz/zoneinfo/America/Belem and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Belize b/lib/pytz/zoneinfo/America/Belize deleted file mode 100644 index a18cd39058fa..000000000000 Binary files a/lib/pytz/zoneinfo/America/Belize and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Blanc-Sablon b/lib/pytz/zoneinfo/America/Blanc-Sablon deleted file mode 100644 index 8a33789afcbb..000000000000 Binary files a/lib/pytz/zoneinfo/America/Blanc-Sablon and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Boa_Vista b/lib/pytz/zoneinfo/America/Boa_Vista deleted file mode 100644 index cb15afbf5c30..000000000000 Binary files a/lib/pytz/zoneinfo/America/Boa_Vista and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Bogota b/lib/pytz/zoneinfo/America/Bogota deleted file mode 100644 index 9df037ee3c44..000000000000 Binary files a/lib/pytz/zoneinfo/America/Bogota and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Boise b/lib/pytz/zoneinfo/America/Boise deleted file mode 100644 index 441afe55c413..000000000000 Binary files a/lib/pytz/zoneinfo/America/Boise and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Buenos_Aires b/lib/pytz/zoneinfo/America/Buenos_Aires deleted file mode 100644 index 5a52a51fc85a..000000000000 Binary files a/lib/pytz/zoneinfo/America/Buenos_Aires and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Cambridge_Bay b/lib/pytz/zoneinfo/America/Cambridge_Bay deleted file mode 100644 index 99c77c5b2083..000000000000 Binary files a/lib/pytz/zoneinfo/America/Cambridge_Bay and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Campo_Grande b/lib/pytz/zoneinfo/America/Campo_Grande deleted file mode 100644 index 46840981043e..000000000000 Binary files a/lib/pytz/zoneinfo/America/Campo_Grande and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Cancun b/lib/pytz/zoneinfo/America/Cancun deleted file mode 100644 index 90993faa7051..000000000000 Binary files a/lib/pytz/zoneinfo/America/Cancun and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Caracas b/lib/pytz/zoneinfo/America/Caracas deleted file mode 100644 index d96a5c00bffe..000000000000 Binary files a/lib/pytz/zoneinfo/America/Caracas and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Catamarca b/lib/pytz/zoneinfo/America/Catamarca deleted file mode 100644 index b9c987bb5689..000000000000 Binary files a/lib/pytz/zoneinfo/America/Catamarca and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Cayenne b/lib/pytz/zoneinfo/America/Cayenne deleted file mode 100644 index 7109a98ec5ce..000000000000 Binary files a/lib/pytz/zoneinfo/America/Cayenne and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Cayman b/lib/pytz/zoneinfo/America/Cayman deleted file mode 100644 index a4095d386334..000000000000 Binary files a/lib/pytz/zoneinfo/America/Cayman and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Chicago b/lib/pytz/zoneinfo/America/Chicago deleted file mode 100644 index 71aae7246a30..000000000000 Binary files a/lib/pytz/zoneinfo/America/Chicago and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Chihuahua b/lib/pytz/zoneinfo/America/Chihuahua deleted file mode 100644 index b2687241cd05..000000000000 Binary files a/lib/pytz/zoneinfo/America/Chihuahua and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Coral_Harbour b/lib/pytz/zoneinfo/America/Coral_Harbour deleted file mode 100644 index 1b49e37c943b..000000000000 Binary files a/lib/pytz/zoneinfo/America/Coral_Harbour and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Cordoba b/lib/pytz/zoneinfo/America/Cordoba deleted file mode 100644 index a703e957d5eb..000000000000 Binary files a/lib/pytz/zoneinfo/America/Cordoba and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Costa_Rica b/lib/pytz/zoneinfo/America/Costa_Rica deleted file mode 100644 index 2029eb6d6ff0..000000000000 Binary files a/lib/pytz/zoneinfo/America/Costa_Rica and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Creston b/lib/pytz/zoneinfo/America/Creston deleted file mode 100644 index 1cf719ae8374..000000000000 Binary files a/lib/pytz/zoneinfo/America/Creston and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Cuiaba b/lib/pytz/zoneinfo/America/Cuiaba deleted file mode 100644 index 232ef670ff53..000000000000 Binary files a/lib/pytz/zoneinfo/America/Cuiaba and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Curacao b/lib/pytz/zoneinfo/America/Curacao deleted file mode 100644 index 6733d2413e3b..000000000000 Binary files a/lib/pytz/zoneinfo/America/Curacao and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Danmarkshavn b/lib/pytz/zoneinfo/America/Danmarkshavn deleted file mode 100644 index 9feacfb1c9ff..000000000000 Binary files a/lib/pytz/zoneinfo/America/Danmarkshavn and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Dawson b/lib/pytz/zoneinfo/America/Dawson deleted file mode 100644 index fab060999850..000000000000 Binary files a/lib/pytz/zoneinfo/America/Dawson and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Dawson_Creek b/lib/pytz/zoneinfo/America/Dawson_Creek deleted file mode 100644 index c3fb166b0887..000000000000 Binary files a/lib/pytz/zoneinfo/America/Dawson_Creek and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Denver b/lib/pytz/zoneinfo/America/Denver deleted file mode 100644 index f8908febf220..000000000000 Binary files a/lib/pytz/zoneinfo/America/Denver and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Detroit b/lib/pytz/zoneinfo/America/Detroit deleted file mode 100644 index da53d46df347..000000000000 Binary files a/lib/pytz/zoneinfo/America/Detroit and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Dominica b/lib/pytz/zoneinfo/America/Dominica deleted file mode 100644 index 7783831b49c4..000000000000 Binary files a/lib/pytz/zoneinfo/America/Dominica and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Edmonton b/lib/pytz/zoneinfo/America/Edmonton deleted file mode 100644 index 3fa0579891a9..000000000000 Binary files a/lib/pytz/zoneinfo/America/Edmonton and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Eirunepe b/lib/pytz/zoneinfo/America/Eirunepe deleted file mode 100644 index 4e586a341f3a..000000000000 Binary files a/lib/pytz/zoneinfo/America/Eirunepe and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/El_Salvador b/lib/pytz/zoneinfo/America/El_Salvador deleted file mode 100644 index ac774e83f46b..000000000000 Binary files a/lib/pytz/zoneinfo/America/El_Salvador and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Ensenada b/lib/pytz/zoneinfo/America/Ensenada deleted file mode 100644 index fffdc24bfc4c..000000000000 Binary files a/lib/pytz/zoneinfo/America/Ensenada and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Fort_Wayne b/lib/pytz/zoneinfo/America/Fort_Wayne deleted file mode 100644 index aa3dfc43730e..000000000000 Binary files a/lib/pytz/zoneinfo/America/Fort_Wayne and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Fortaleza b/lib/pytz/zoneinfo/America/Fortaleza deleted file mode 100644 index 2598c53559d3..000000000000 Binary files a/lib/pytz/zoneinfo/America/Fortaleza and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Glace_Bay b/lib/pytz/zoneinfo/America/Glace_Bay deleted file mode 100644 index 48412a4cbf92..000000000000 Binary files a/lib/pytz/zoneinfo/America/Glace_Bay and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Godthab b/lib/pytz/zoneinfo/America/Godthab deleted file mode 100644 index 85623ce7a7df..000000000000 Binary files a/lib/pytz/zoneinfo/America/Godthab and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Goose_Bay b/lib/pytz/zoneinfo/America/Goose_Bay deleted file mode 100644 index 83e5a9b398fd..000000000000 Binary files a/lib/pytz/zoneinfo/America/Goose_Bay and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Grand_Turk b/lib/pytz/zoneinfo/America/Grand_Turk deleted file mode 100644 index 733c17984b41..000000000000 Binary files a/lib/pytz/zoneinfo/America/Grand_Turk and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Grenada b/lib/pytz/zoneinfo/America/Grenada deleted file mode 100644 index df1b6895f9c1..000000000000 Binary files a/lib/pytz/zoneinfo/America/Grenada and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Guadeloupe b/lib/pytz/zoneinfo/America/Guadeloupe deleted file mode 100644 index 15c0f1f7450c..000000000000 Binary files a/lib/pytz/zoneinfo/America/Guadeloupe and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Guatemala b/lib/pytz/zoneinfo/America/Guatemala deleted file mode 100644 index 6118b5ce2d95..000000000000 Binary files a/lib/pytz/zoneinfo/America/Guatemala and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Guayaquil b/lib/pytz/zoneinfo/America/Guayaquil deleted file mode 100644 index e6de7f8da297..000000000000 Binary files a/lib/pytz/zoneinfo/America/Guayaquil and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Guyana b/lib/pytz/zoneinfo/America/Guyana deleted file mode 100644 index 5f98c4a0e88b..000000000000 Binary files a/lib/pytz/zoneinfo/America/Guyana and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Halifax b/lib/pytz/zoneinfo/America/Halifax deleted file mode 100644 index 756099abe6ce..000000000000 Binary files a/lib/pytz/zoneinfo/America/Halifax and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Havana b/lib/pytz/zoneinfo/America/Havana deleted file mode 100644 index 96eaf81ee9e4..000000000000 Binary files a/lib/pytz/zoneinfo/America/Havana and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Hermosillo b/lib/pytz/zoneinfo/America/Hermosillo deleted file mode 100644 index 26c269d96748..000000000000 Binary files a/lib/pytz/zoneinfo/America/Hermosillo and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Indiana/Indianapolis b/lib/pytz/zoneinfo/America/Indiana/Indianapolis deleted file mode 100644 index aa3dfc43730e..000000000000 Binary files a/lib/pytz/zoneinfo/America/Indiana/Indianapolis and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Indiana/Knox b/lib/pytz/zoneinfo/America/Indiana/Knox deleted file mode 100644 index 33169f459638..000000000000 Binary files a/lib/pytz/zoneinfo/America/Indiana/Knox and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Indiana/Marengo b/lib/pytz/zoneinfo/America/Indiana/Marengo deleted file mode 100644 index 255b739718d2..000000000000 Binary files a/lib/pytz/zoneinfo/America/Indiana/Marengo and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Indiana/Petersburg b/lib/pytz/zoneinfo/America/Indiana/Petersburg deleted file mode 100644 index c611106d57e4..000000000000 Binary files a/lib/pytz/zoneinfo/America/Indiana/Petersburg and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Indiana/Tell_City b/lib/pytz/zoneinfo/America/Indiana/Tell_City deleted file mode 100644 index 97e319e34350..000000000000 Binary files a/lib/pytz/zoneinfo/America/Indiana/Tell_City and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Indiana/Vevay b/lib/pytz/zoneinfo/America/Indiana/Vevay deleted file mode 100644 index de6167c08291..000000000000 Binary files a/lib/pytz/zoneinfo/America/Indiana/Vevay and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Indiana/Vincennes b/lib/pytz/zoneinfo/America/Indiana/Vincennes deleted file mode 100644 index b79f6725b62a..000000000000 Binary files a/lib/pytz/zoneinfo/America/Indiana/Vincennes and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Indiana/Winamac b/lib/pytz/zoneinfo/America/Indiana/Winamac deleted file mode 100644 index b2611e75ee8b..000000000000 Binary files a/lib/pytz/zoneinfo/America/Indiana/Winamac and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Indianapolis b/lib/pytz/zoneinfo/America/Indianapolis deleted file mode 100644 index aa3dfc43730e..000000000000 Binary files a/lib/pytz/zoneinfo/America/Indianapolis and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Inuvik b/lib/pytz/zoneinfo/America/Inuvik deleted file mode 100644 index c17af37f5f60..000000000000 Binary files a/lib/pytz/zoneinfo/America/Inuvik and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Iqaluit b/lib/pytz/zoneinfo/America/Iqaluit deleted file mode 100644 index cea5c2e0f30f..000000000000 Binary files a/lib/pytz/zoneinfo/America/Iqaluit and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Jamaica b/lib/pytz/zoneinfo/America/Jamaica deleted file mode 100644 index 09e3eb93996d..000000000000 Binary files a/lib/pytz/zoneinfo/America/Jamaica and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Jujuy b/lib/pytz/zoneinfo/America/Jujuy deleted file mode 100644 index 86800f0344a0..000000000000 Binary files a/lib/pytz/zoneinfo/America/Jujuy and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Juneau b/lib/pytz/zoneinfo/America/Juneau deleted file mode 100644 index 48bd37e88e27..000000000000 Binary files a/lib/pytz/zoneinfo/America/Juneau and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Kentucky/Louisville b/lib/pytz/zoneinfo/America/Kentucky/Louisville deleted file mode 100644 index 65e7e1904951..000000000000 Binary files a/lib/pytz/zoneinfo/America/Kentucky/Louisville and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Kentucky/Monticello b/lib/pytz/zoneinfo/America/Kentucky/Monticello deleted file mode 100644 index fc2f1b0df86e..000000000000 Binary files a/lib/pytz/zoneinfo/America/Kentucky/Monticello and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Knox_IN b/lib/pytz/zoneinfo/America/Knox_IN deleted file mode 100644 index 33169f459638..000000000000 Binary files a/lib/pytz/zoneinfo/America/Knox_IN and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Kralendijk b/lib/pytz/zoneinfo/America/Kralendijk deleted file mode 100644 index 6733d2413e3b..000000000000 Binary files a/lib/pytz/zoneinfo/America/Kralendijk and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/La_Paz b/lib/pytz/zoneinfo/America/La_Paz deleted file mode 100644 index 2a5a15e4c4f0..000000000000 Binary files a/lib/pytz/zoneinfo/America/La_Paz and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Lima b/lib/pytz/zoneinfo/America/Lima deleted file mode 100644 index a37eeff7de59..000000000000 Binary files a/lib/pytz/zoneinfo/America/Lima and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Los_Angeles b/lib/pytz/zoneinfo/America/Los_Angeles deleted file mode 100644 index 3b7ce1dceebf..000000000000 Binary files a/lib/pytz/zoneinfo/America/Los_Angeles and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Louisville b/lib/pytz/zoneinfo/America/Louisville deleted file mode 100644 index 65e7e1904951..000000000000 Binary files a/lib/pytz/zoneinfo/America/Louisville and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Lower_Princes b/lib/pytz/zoneinfo/America/Lower_Princes deleted file mode 100644 index 6733d2413e3b..000000000000 Binary files a/lib/pytz/zoneinfo/America/Lower_Princes and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Maceio b/lib/pytz/zoneinfo/America/Maceio deleted file mode 100644 index b5201e81d42f..000000000000 Binary files a/lib/pytz/zoneinfo/America/Maceio and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Managua b/lib/pytz/zoneinfo/America/Managua deleted file mode 100644 index f1c35040ce46..000000000000 Binary files a/lib/pytz/zoneinfo/America/Managua and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Manaus b/lib/pytz/zoneinfo/America/Manaus deleted file mode 100644 index 112921184139..000000000000 Binary files a/lib/pytz/zoneinfo/America/Manaus and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Marigot b/lib/pytz/zoneinfo/America/Marigot deleted file mode 100644 index 15c0f1f7450c..000000000000 Binary files a/lib/pytz/zoneinfo/America/Marigot and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Martinique b/lib/pytz/zoneinfo/America/Martinique deleted file mode 100644 index c223ef5c2011..000000000000 Binary files a/lib/pytz/zoneinfo/America/Martinique and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Matamoros b/lib/pytz/zoneinfo/America/Matamoros deleted file mode 100644 index 5c59984def29..000000000000 Binary files a/lib/pytz/zoneinfo/America/Matamoros and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Mazatlan b/lib/pytz/zoneinfo/America/Mazatlan deleted file mode 100644 index 43ee12d84a7c..000000000000 Binary files a/lib/pytz/zoneinfo/America/Mazatlan and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Mendoza b/lib/pytz/zoneinfo/America/Mendoza deleted file mode 100644 index 76afd5909e0e..000000000000 Binary files a/lib/pytz/zoneinfo/America/Mendoza and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Menominee b/lib/pytz/zoneinfo/America/Menominee deleted file mode 100644 index 438f5ff0b846..000000000000 Binary files a/lib/pytz/zoneinfo/America/Menominee and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Merida b/lib/pytz/zoneinfo/America/Merida deleted file mode 100644 index b46298e1f202..000000000000 Binary files a/lib/pytz/zoneinfo/America/Merida and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Metlakatla b/lib/pytz/zoneinfo/America/Metlakatla deleted file mode 100644 index 4145b9a58164..000000000000 Binary files a/lib/pytz/zoneinfo/America/Metlakatla and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Mexico_City b/lib/pytz/zoneinfo/America/Mexico_City deleted file mode 100644 index 1434ab08804d..000000000000 Binary files a/lib/pytz/zoneinfo/America/Mexico_City and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Miquelon b/lib/pytz/zoneinfo/America/Miquelon deleted file mode 100644 index 52cd391ebb47..000000000000 Binary files a/lib/pytz/zoneinfo/America/Miquelon and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Moncton b/lib/pytz/zoneinfo/America/Moncton deleted file mode 100644 index b51125ebf112..000000000000 Binary files a/lib/pytz/zoneinfo/America/Moncton and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Monterrey b/lib/pytz/zoneinfo/America/Monterrey deleted file mode 100644 index 7dc50577749b..000000000000 Binary files a/lib/pytz/zoneinfo/America/Monterrey and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Montevideo b/lib/pytz/zoneinfo/America/Montevideo deleted file mode 100644 index 4745f0dfb2d9..000000000000 Binary files a/lib/pytz/zoneinfo/America/Montevideo and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Montreal b/lib/pytz/zoneinfo/America/Montreal deleted file mode 100644 index 47633bd49f89..000000000000 Binary files a/lib/pytz/zoneinfo/America/Montreal and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Montserrat b/lib/pytz/zoneinfo/America/Montserrat deleted file mode 100644 index ee5043a8e3f9..000000000000 Binary files a/lib/pytz/zoneinfo/America/Montserrat and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Nassau b/lib/pytz/zoneinfo/America/Nassau deleted file mode 100644 index aff956c8ec7e..000000000000 Binary files a/lib/pytz/zoneinfo/America/Nassau and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/New_York b/lib/pytz/zoneinfo/America/New_York deleted file mode 100644 index b2c2377f4e87..000000000000 Binary files a/lib/pytz/zoneinfo/America/New_York and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Nipigon b/lib/pytz/zoneinfo/America/Nipigon deleted file mode 100644 index 619f1f75905e..000000000000 Binary files a/lib/pytz/zoneinfo/America/Nipigon and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Nome b/lib/pytz/zoneinfo/America/Nome deleted file mode 100644 index b682bfd9cd89..000000000000 Binary files a/lib/pytz/zoneinfo/America/Nome and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Noronha b/lib/pytz/zoneinfo/America/Noronha deleted file mode 100644 index c60239009e46..000000000000 Binary files a/lib/pytz/zoneinfo/America/Noronha and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/North_Dakota/Beulah b/lib/pytz/zoneinfo/America/North_Dakota/Beulah deleted file mode 100644 index c1e3b025b916..000000000000 Binary files a/lib/pytz/zoneinfo/America/North_Dakota/Beulah and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/North_Dakota/Center b/lib/pytz/zoneinfo/America/North_Dakota/Center deleted file mode 100644 index 786ba1778acb..000000000000 Binary files a/lib/pytz/zoneinfo/America/North_Dakota/Center and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/North_Dakota/New_Salem b/lib/pytz/zoneinfo/America/North_Dakota/New_Salem deleted file mode 100644 index 3488e466d522..000000000000 Binary files a/lib/pytz/zoneinfo/America/North_Dakota/New_Salem and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Ojinaga b/lib/pytz/zoneinfo/America/Ojinaga deleted file mode 100644 index 37d78301bd10..000000000000 Binary files a/lib/pytz/zoneinfo/America/Ojinaga and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Panama b/lib/pytz/zoneinfo/America/Panama deleted file mode 100644 index 3a4ff2aecf89..000000000000 Binary files a/lib/pytz/zoneinfo/America/Panama and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Pangnirtung b/lib/pytz/zoneinfo/America/Pangnirtung deleted file mode 100644 index 80a6009381ef..000000000000 Binary files a/lib/pytz/zoneinfo/America/Pangnirtung and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Paramaribo b/lib/pytz/zoneinfo/America/Paramaribo deleted file mode 100644 index 6f889ccaf1ff..000000000000 Binary files a/lib/pytz/zoneinfo/America/Paramaribo and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Phoenix b/lib/pytz/zoneinfo/America/Phoenix deleted file mode 100644 index 67589026c21c..000000000000 Binary files a/lib/pytz/zoneinfo/America/Phoenix and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Port-au-Prince b/lib/pytz/zoneinfo/America/Port-au-Prince deleted file mode 100644 index 189fd5252cf4..000000000000 Binary files a/lib/pytz/zoneinfo/America/Port-au-Prince and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Port_of_Spain b/lib/pytz/zoneinfo/America/Port_of_Spain deleted file mode 100644 index bdedd1bd9bc8..000000000000 Binary files a/lib/pytz/zoneinfo/America/Port_of_Spain and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Porto_Acre b/lib/pytz/zoneinfo/America/Porto_Acre deleted file mode 100644 index 7be212b1e651..000000000000 Binary files a/lib/pytz/zoneinfo/America/Porto_Acre and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Porto_Velho b/lib/pytz/zoneinfo/America/Porto_Velho deleted file mode 100644 index 12774792326b..000000000000 Binary files a/lib/pytz/zoneinfo/America/Porto_Velho and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Puerto_Rico b/lib/pytz/zoneinfo/America/Puerto_Rico deleted file mode 100644 index eada37a111c0..000000000000 Binary files a/lib/pytz/zoneinfo/America/Puerto_Rico and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Rainy_River b/lib/pytz/zoneinfo/America/Rainy_River deleted file mode 100644 index e006a30dbf35..000000000000 Binary files a/lib/pytz/zoneinfo/America/Rainy_River and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Rankin_Inlet b/lib/pytz/zoneinfo/America/Rankin_Inlet deleted file mode 100644 index 99195714c4b3..000000000000 Binary files a/lib/pytz/zoneinfo/America/Rankin_Inlet and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Recife b/lib/pytz/zoneinfo/America/Recife deleted file mode 100644 index 0903a7719535..000000000000 Binary files a/lib/pytz/zoneinfo/America/Recife and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Regina b/lib/pytz/zoneinfo/America/Regina deleted file mode 100644 index 20c9c84df491..000000000000 Binary files a/lib/pytz/zoneinfo/America/Regina and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Resolute b/lib/pytz/zoneinfo/America/Resolute deleted file mode 100644 index 7713f5bead43..000000000000 Binary files a/lib/pytz/zoneinfo/America/Resolute and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Rio_Branco b/lib/pytz/zoneinfo/America/Rio_Branco deleted file mode 100644 index 7be212b1e651..000000000000 Binary files a/lib/pytz/zoneinfo/America/Rio_Branco and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Rosario b/lib/pytz/zoneinfo/America/Rosario deleted file mode 100644 index a703e957d5eb..000000000000 Binary files a/lib/pytz/zoneinfo/America/Rosario and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Santa_Isabel b/lib/pytz/zoneinfo/America/Santa_Isabel deleted file mode 100644 index 80a2f2d5b1df..000000000000 Binary files a/lib/pytz/zoneinfo/America/Santa_Isabel and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Santarem b/lib/pytz/zoneinfo/America/Santarem deleted file mode 100644 index 45419528f9df..000000000000 Binary files a/lib/pytz/zoneinfo/America/Santarem and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Santiago b/lib/pytz/zoneinfo/America/Santiago deleted file mode 100644 index de74ddf1a095..000000000000 Binary files a/lib/pytz/zoneinfo/America/Santiago and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Santo_Domingo b/lib/pytz/zoneinfo/America/Santo_Domingo deleted file mode 100644 index 23ace9adc3e2..000000000000 Binary files a/lib/pytz/zoneinfo/America/Santo_Domingo and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Sao_Paulo b/lib/pytz/zoneinfo/America/Sao_Paulo deleted file mode 100644 index 8df63a17bd46..000000000000 Binary files a/lib/pytz/zoneinfo/America/Sao_Paulo and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Scoresbysund b/lib/pytz/zoneinfo/America/Scoresbysund deleted file mode 100644 index fae3757ce9da..000000000000 Binary files a/lib/pytz/zoneinfo/America/Scoresbysund and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Shiprock b/lib/pytz/zoneinfo/America/Shiprock deleted file mode 100644 index f8908febf220..000000000000 Binary files a/lib/pytz/zoneinfo/America/Shiprock and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Sitka b/lib/pytz/zoneinfo/America/Sitka deleted file mode 100644 index f2ae47a323e7..000000000000 Binary files a/lib/pytz/zoneinfo/America/Sitka and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/St_Barthelemy b/lib/pytz/zoneinfo/America/St_Barthelemy deleted file mode 100644 index 15c0f1f7450c..000000000000 Binary files a/lib/pytz/zoneinfo/America/St_Barthelemy and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/St_Johns b/lib/pytz/zoneinfo/America/St_Johns deleted file mode 100644 index e7a18d601d02..000000000000 Binary files a/lib/pytz/zoneinfo/America/St_Johns and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/St_Kitts b/lib/pytz/zoneinfo/America/St_Kitts deleted file mode 100644 index 911d2221b6a2..000000000000 Binary files a/lib/pytz/zoneinfo/America/St_Kitts and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/St_Lucia b/lib/pytz/zoneinfo/America/St_Lucia deleted file mode 100644 index b37a1cf7e807..000000000000 Binary files a/lib/pytz/zoneinfo/America/St_Lucia and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/St_Thomas b/lib/pytz/zoneinfo/America/St_Thomas deleted file mode 100644 index 482f0b549fb8..000000000000 Binary files a/lib/pytz/zoneinfo/America/St_Thomas and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/St_Vincent b/lib/pytz/zoneinfo/America/St_Vincent deleted file mode 100644 index e553af77b534..000000000000 Binary files a/lib/pytz/zoneinfo/America/St_Vincent and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Swift_Current b/lib/pytz/zoneinfo/America/Swift_Current deleted file mode 100644 index 8e9ef255eeb1..000000000000 Binary files a/lib/pytz/zoneinfo/America/Swift_Current and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Tegucigalpa b/lib/pytz/zoneinfo/America/Tegucigalpa deleted file mode 100644 index 477e93950c2f..000000000000 Binary files a/lib/pytz/zoneinfo/America/Tegucigalpa and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Thule b/lib/pytz/zoneinfo/America/Thule deleted file mode 100644 index 2969ebe59bd2..000000000000 Binary files a/lib/pytz/zoneinfo/America/Thule and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Thunder_Bay b/lib/pytz/zoneinfo/America/Thunder_Bay deleted file mode 100644 index 34f750b4df72..000000000000 Binary files a/lib/pytz/zoneinfo/America/Thunder_Bay and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Tijuana b/lib/pytz/zoneinfo/America/Tijuana deleted file mode 100644 index fffdc24bfc4c..000000000000 Binary files a/lib/pytz/zoneinfo/America/Tijuana and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Toronto b/lib/pytz/zoneinfo/America/Toronto deleted file mode 100644 index 1698477a4877..000000000000 Binary files a/lib/pytz/zoneinfo/America/Toronto and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Tortola b/lib/pytz/zoneinfo/America/Tortola deleted file mode 100644 index 6f9d9323858c..000000000000 Binary files a/lib/pytz/zoneinfo/America/Tortola and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Vancouver b/lib/pytz/zoneinfo/America/Vancouver deleted file mode 100644 index 0c1fa5269049..000000000000 Binary files a/lib/pytz/zoneinfo/America/Vancouver and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Virgin b/lib/pytz/zoneinfo/America/Virgin deleted file mode 100644 index 482f0b549fb8..000000000000 Binary files a/lib/pytz/zoneinfo/America/Virgin and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Whitehorse b/lib/pytz/zoneinfo/America/Whitehorse deleted file mode 100644 index 15216d55ff54..000000000000 Binary files a/lib/pytz/zoneinfo/America/Whitehorse and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Winnipeg b/lib/pytz/zoneinfo/America/Winnipeg deleted file mode 100644 index 2d22791686e8..000000000000 Binary files a/lib/pytz/zoneinfo/America/Winnipeg and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Yakutat b/lib/pytz/zoneinfo/America/Yakutat deleted file mode 100644 index 80716027731e..000000000000 Binary files a/lib/pytz/zoneinfo/America/Yakutat and /dev/null differ diff --git a/lib/pytz/zoneinfo/America/Yellowknife b/lib/pytz/zoneinfo/America/Yellowknife deleted file mode 100644 index 947bec914be3..000000000000 Binary files a/lib/pytz/zoneinfo/America/Yellowknife and /dev/null differ diff --git a/lib/pytz/zoneinfo/Antarctica/Casey b/lib/pytz/zoneinfo/Antarctica/Casey deleted file mode 100644 index 8ebf0598ff90..000000000000 Binary files a/lib/pytz/zoneinfo/Antarctica/Casey and /dev/null differ diff --git a/lib/pytz/zoneinfo/Antarctica/Davis b/lib/pytz/zoneinfo/Antarctica/Davis deleted file mode 100644 index cd7acad690b6..000000000000 Binary files a/lib/pytz/zoneinfo/Antarctica/Davis and /dev/null differ diff --git a/lib/pytz/zoneinfo/Antarctica/DumontDUrville b/lib/pytz/zoneinfo/Antarctica/DumontDUrville deleted file mode 100644 index 5ea18e6e776a..000000000000 Binary files a/lib/pytz/zoneinfo/Antarctica/DumontDUrville and /dev/null differ diff --git a/lib/pytz/zoneinfo/Antarctica/Macquarie b/lib/pytz/zoneinfo/Antarctica/Macquarie deleted file mode 100644 index 43e01c0989f7..000000000000 Binary files a/lib/pytz/zoneinfo/Antarctica/Macquarie and /dev/null differ diff --git a/lib/pytz/zoneinfo/Antarctica/Mawson b/lib/pytz/zoneinfo/Antarctica/Mawson deleted file mode 100644 index 48e24e1e5ef8..000000000000 Binary files a/lib/pytz/zoneinfo/Antarctica/Mawson and /dev/null differ diff --git a/lib/pytz/zoneinfo/Antarctica/McMurdo b/lib/pytz/zoneinfo/Antarctica/McMurdo deleted file mode 100644 index 62ac42f078d5..000000000000 Binary files a/lib/pytz/zoneinfo/Antarctica/McMurdo and /dev/null differ diff --git a/lib/pytz/zoneinfo/Antarctica/Palmer b/lib/pytz/zoneinfo/Antarctica/Palmer deleted file mode 100644 index c51d91e64ccf..000000000000 Binary files a/lib/pytz/zoneinfo/Antarctica/Palmer and /dev/null differ diff --git a/lib/pytz/zoneinfo/Antarctica/Rothera b/lib/pytz/zoneinfo/Antarctica/Rothera deleted file mode 100644 index b5dc735639df..000000000000 Binary files a/lib/pytz/zoneinfo/Antarctica/Rothera and /dev/null differ diff --git a/lib/pytz/zoneinfo/Antarctica/South_Pole b/lib/pytz/zoneinfo/Antarctica/South_Pole deleted file mode 100644 index 62ac42f078d5..000000000000 Binary files a/lib/pytz/zoneinfo/Antarctica/South_Pole and /dev/null differ diff --git a/lib/pytz/zoneinfo/Antarctica/Syowa b/lib/pytz/zoneinfo/Antarctica/Syowa deleted file mode 100644 index ba6e5f388779..000000000000 Binary files a/lib/pytz/zoneinfo/Antarctica/Syowa and /dev/null differ diff --git a/lib/pytz/zoneinfo/Antarctica/Vostok b/lib/pytz/zoneinfo/Antarctica/Vostok deleted file mode 100644 index e19e2b7cdb55..000000000000 Binary files a/lib/pytz/zoneinfo/Antarctica/Vostok and /dev/null differ diff --git a/lib/pytz/zoneinfo/Arctic/Longyearbyen b/lib/pytz/zoneinfo/Arctic/Longyearbyen deleted file mode 100644 index 6326961453f4..000000000000 Binary files a/lib/pytz/zoneinfo/Arctic/Longyearbyen and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Aden b/lib/pytz/zoneinfo/Asia/Aden deleted file mode 100644 index 5aa5a324a4ca..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Aden and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Almaty b/lib/pytz/zoneinfo/Asia/Almaty deleted file mode 100644 index 52f941e266a9..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Almaty and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Amman b/lib/pytz/zoneinfo/Asia/Amman deleted file mode 100644 index b62664e6755a..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Amman and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Anadyr b/lib/pytz/zoneinfo/Asia/Anadyr deleted file mode 100644 index 2841a6376d9f..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Anadyr and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Aqtau b/lib/pytz/zoneinfo/Asia/Aqtau deleted file mode 100644 index 27a3d50d3739..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Aqtau and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Aqtobe b/lib/pytz/zoneinfo/Asia/Aqtobe deleted file mode 100644 index 3683be2db87b..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Aqtobe and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Ashgabat b/lib/pytz/zoneinfo/Asia/Ashgabat deleted file mode 100644 index 589dbc18ea2d..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Ashgabat and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Ashkhabad b/lib/pytz/zoneinfo/Asia/Ashkhabad deleted file mode 100644 index 589dbc18ea2d..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Ashkhabad and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Baghdad b/lib/pytz/zoneinfo/Asia/Baghdad deleted file mode 100644 index 3ad361495ce5..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Baghdad and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Bahrain b/lib/pytz/zoneinfo/Asia/Bahrain deleted file mode 100644 index d87b7ce73915..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Bahrain and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Baku b/lib/pytz/zoneinfo/Asia/Baku deleted file mode 100644 index 72ae96e9cc95..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Baku and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Bangkok b/lib/pytz/zoneinfo/Asia/Bangkok deleted file mode 100644 index 44a1018ef638..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Bangkok and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Beirut b/lib/pytz/zoneinfo/Asia/Beirut deleted file mode 100644 index c1270bc1adce..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Beirut and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Bishkek b/lib/pytz/zoneinfo/Asia/Bishkek deleted file mode 100644 index fc827d887134..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Bishkek and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Brunei b/lib/pytz/zoneinfo/Asia/Brunei deleted file mode 100644 index d6e713d436fb..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Brunei and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Calcutta b/lib/pytz/zoneinfo/Asia/Calcutta deleted file mode 100644 index bc909c92c14d..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Calcutta and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Choibalsan b/lib/pytz/zoneinfo/Asia/Choibalsan deleted file mode 100644 index 043b7ed5c2d7..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Choibalsan and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Chongqing b/lib/pytz/zoneinfo/Asia/Chongqing deleted file mode 100644 index 8a7a28a480a4..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Chongqing and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Chungking b/lib/pytz/zoneinfo/Asia/Chungking deleted file mode 100644 index 8a7a28a480a4..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Chungking and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Colombo b/lib/pytz/zoneinfo/Asia/Colombo deleted file mode 100644 index c71c0503d999..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Colombo and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Dacca b/lib/pytz/zoneinfo/Asia/Dacca deleted file mode 100644 index 52e98ffc2c14..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Dacca and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Damascus b/lib/pytz/zoneinfo/Asia/Damascus deleted file mode 100644 index 4b610b5a0836..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Damascus and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Dhaka b/lib/pytz/zoneinfo/Asia/Dhaka deleted file mode 100644 index 52e98ffc2c14..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Dhaka and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Dili b/lib/pytz/zoneinfo/Asia/Dili deleted file mode 100644 index 37bfc4b2786b..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Dili and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Dubai b/lib/pytz/zoneinfo/Asia/Dubai deleted file mode 100644 index 53f70d57a15a..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Dubai and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Dushanbe b/lib/pytz/zoneinfo/Asia/Dushanbe deleted file mode 100644 index c65ff2a7b3e5..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Dushanbe and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Gaza b/lib/pytz/zoneinfo/Asia/Gaza deleted file mode 100644 index 6f1cabc29150..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Gaza and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Harbin b/lib/pytz/zoneinfo/Asia/Harbin deleted file mode 100644 index 11e352a51106..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Harbin and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Hebron b/lib/pytz/zoneinfo/Asia/Hebron deleted file mode 100644 index a073e06ef72e..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Hebron and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Ho_Chi_Minh b/lib/pytz/zoneinfo/Asia/Ho_Chi_Minh deleted file mode 100644 index 6401a10256bb..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Ho_Chi_Minh and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Hong_Kong b/lib/pytz/zoneinfo/Asia/Hong_Kong deleted file mode 100644 index 45db6e226144..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Hong_Kong and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Hovd b/lib/pytz/zoneinfo/Asia/Hovd deleted file mode 100644 index 27fab05c1912..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Hovd and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Irkutsk b/lib/pytz/zoneinfo/Asia/Irkutsk deleted file mode 100644 index 7c38e7fd6b28..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Irkutsk and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Istanbul b/lib/pytz/zoneinfo/Asia/Istanbul deleted file mode 100644 index 864099556bc9..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Istanbul and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Jakarta b/lib/pytz/zoneinfo/Asia/Jakarta deleted file mode 100644 index a4cbe0c576a1..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Jakarta and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Jayapura b/lib/pytz/zoneinfo/Asia/Jayapura deleted file mode 100644 index 0e79d3178813..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Jayapura and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Jerusalem b/lib/pytz/zoneinfo/Asia/Jerusalem deleted file mode 100644 index 4e6410f2b2d3..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Jerusalem and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Kabul b/lib/pytz/zoneinfo/Asia/Kabul deleted file mode 100644 index 7392c0497a75..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Kabul and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Kamchatka b/lib/pytz/zoneinfo/Asia/Kamchatka deleted file mode 100644 index 090bf488957b..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Kamchatka and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Karachi b/lib/pytz/zoneinfo/Asia/Karachi deleted file mode 100644 index a8ff8cb769d1..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Karachi and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Kashgar b/lib/pytz/zoneinfo/Asia/Kashgar deleted file mode 100644 index 25e64945cffa..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Kashgar and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Kathmandu b/lib/pytz/zoneinfo/Asia/Kathmandu deleted file mode 100644 index 65c7b6325833..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Kathmandu and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Katmandu b/lib/pytz/zoneinfo/Asia/Katmandu deleted file mode 100644 index 65c7b6325833..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Katmandu and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Kolkata b/lib/pytz/zoneinfo/Asia/Kolkata deleted file mode 100644 index bc909c92c14d..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Kolkata and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Krasnoyarsk b/lib/pytz/zoneinfo/Asia/Krasnoyarsk deleted file mode 100644 index 580e8dd2f43e..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Krasnoyarsk and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Kuala_Lumpur b/lib/pytz/zoneinfo/Asia/Kuala_Lumpur deleted file mode 100644 index 41bba37b0c5f..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Kuala_Lumpur and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Kuching b/lib/pytz/zoneinfo/Asia/Kuching deleted file mode 100644 index 272f46546e9d..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Kuching and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Kuwait b/lib/pytz/zoneinfo/Asia/Kuwait deleted file mode 100644 index 1dab31cba01d..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Kuwait and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Macao b/lib/pytz/zoneinfo/Asia/Macao deleted file mode 100644 index 7c9377957872..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Macao and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Macau b/lib/pytz/zoneinfo/Asia/Macau deleted file mode 100644 index 7c9377957872..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Macau and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Magadan b/lib/pytz/zoneinfo/Asia/Magadan deleted file mode 100644 index e3c76b57f59f..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Magadan and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Makassar b/lib/pytz/zoneinfo/Asia/Makassar deleted file mode 100644 index f35823303bcd..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Makassar and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Manila b/lib/pytz/zoneinfo/Asia/Manila deleted file mode 100644 index 0e90ba6326a2..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Manila and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Muscat b/lib/pytz/zoneinfo/Asia/Muscat deleted file mode 100644 index 729d95e1c083..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Muscat and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Nicosia b/lib/pytz/zoneinfo/Asia/Nicosia deleted file mode 100644 index f7f10ab7665e..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Nicosia and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Novokuznetsk b/lib/pytz/zoneinfo/Asia/Novokuznetsk deleted file mode 100644 index f78c1f88bf65..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Novokuznetsk and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Novosibirsk b/lib/pytz/zoneinfo/Asia/Novosibirsk deleted file mode 100644 index c401a9817dab..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Novosibirsk and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Omsk b/lib/pytz/zoneinfo/Asia/Omsk deleted file mode 100644 index a3dbb4bb1c5a..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Omsk and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Oral b/lib/pytz/zoneinfo/Asia/Oral deleted file mode 100644 index 8da2a1dee74d..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Oral and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Phnom_Penh b/lib/pytz/zoneinfo/Asia/Phnom_Penh deleted file mode 100644 index 5a52722a1612..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Phnom_Penh and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Pontianak b/lib/pytz/zoneinfo/Asia/Pontianak deleted file mode 100644 index 3e882dc35705..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Pontianak and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Pyongyang b/lib/pytz/zoneinfo/Asia/Pyongyang deleted file mode 100644 index 9dbd3c1ae006..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Pyongyang and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Qatar b/lib/pytz/zoneinfo/Asia/Qatar deleted file mode 100644 index 49668c2583c8..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Qatar and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Qyzylorda b/lib/pytz/zoneinfo/Asia/Qyzylorda deleted file mode 100644 index fc3bf46bccc5..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Qyzylorda and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Rangoon b/lib/pytz/zoneinfo/Asia/Rangoon deleted file mode 100644 index efae2612a87a..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Rangoon and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Riyadh b/lib/pytz/zoneinfo/Asia/Riyadh deleted file mode 100644 index 6ebe393d0b86..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Riyadh and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Riyadh87 b/lib/pytz/zoneinfo/Asia/Riyadh87 deleted file mode 100644 index ebe16c49957c..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Riyadh87 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Riyadh88 b/lib/pytz/zoneinfo/Asia/Riyadh88 deleted file mode 100644 index 7f2224bbb29b..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Riyadh88 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Riyadh89 b/lib/pytz/zoneinfo/Asia/Riyadh89 deleted file mode 100644 index a50ca48a91b9..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Riyadh89 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Saigon b/lib/pytz/zoneinfo/Asia/Saigon deleted file mode 100644 index 6401a10256bb..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Saigon and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Sakhalin b/lib/pytz/zoneinfo/Asia/Sakhalin deleted file mode 100644 index f5105a363cfd..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Sakhalin and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Samarkand b/lib/pytz/zoneinfo/Asia/Samarkand deleted file mode 100644 index 191c07c12755..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Samarkand and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Seoul b/lib/pytz/zoneinfo/Asia/Seoul deleted file mode 100644 index 96bb0c36d7e3..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Seoul and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Shanghai b/lib/pytz/zoneinfo/Asia/Shanghai deleted file mode 100644 index 240c4c6f76da..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Shanghai and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Singapore b/lib/pytz/zoneinfo/Asia/Singapore deleted file mode 100644 index a6f2db8f3a88..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Singapore and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Taipei b/lib/pytz/zoneinfo/Asia/Taipei deleted file mode 100644 index 70cfb27ca91f..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Taipei and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Tashkent b/lib/pytz/zoneinfo/Asia/Tashkent deleted file mode 100644 index 5bc806238a9e..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Tashkent and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Tbilisi b/lib/pytz/zoneinfo/Asia/Tbilisi deleted file mode 100644 index d7e40548acec..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Tbilisi and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Tehran b/lib/pytz/zoneinfo/Asia/Tehran deleted file mode 100644 index 16149ed6bf5b..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Tehran and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Tel_Aviv b/lib/pytz/zoneinfo/Asia/Tel_Aviv deleted file mode 100644 index 4e6410f2b2d3..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Tel_Aviv and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Thimbu b/lib/pytz/zoneinfo/Asia/Thimbu deleted file mode 100644 index 90294aea2026..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Thimbu and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Thimphu b/lib/pytz/zoneinfo/Asia/Thimphu deleted file mode 100644 index 90294aea2026..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Thimphu and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Tokyo b/lib/pytz/zoneinfo/Asia/Tokyo deleted file mode 100644 index 058c1e99ba26..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Tokyo and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Ujung_Pandang b/lib/pytz/zoneinfo/Asia/Ujung_Pandang deleted file mode 100644 index f35823303bcd..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Ujung_Pandang and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Ulaanbaatar b/lib/pytz/zoneinfo/Asia/Ulaanbaatar deleted file mode 100644 index 39bdd89416a5..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Ulaanbaatar and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Ulan_Bator b/lib/pytz/zoneinfo/Asia/Ulan_Bator deleted file mode 100644 index 39bdd89416a5..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Ulan_Bator and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Urumqi b/lib/pytz/zoneinfo/Asia/Urumqi deleted file mode 100644 index f46ff380a055..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Urumqi and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Vientiane b/lib/pytz/zoneinfo/Asia/Vientiane deleted file mode 100644 index 7d39589f19e8..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Vientiane and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Vladivostok b/lib/pytz/zoneinfo/Asia/Vladivostok deleted file mode 100644 index 1cae6d0fd988..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Vladivostok and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Yakutsk b/lib/pytz/zoneinfo/Asia/Yakutsk deleted file mode 100644 index 461901f6f83b..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Yakutsk and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Yekaterinburg b/lib/pytz/zoneinfo/Asia/Yekaterinburg deleted file mode 100644 index aca50c6e1d8b..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Yekaterinburg and /dev/null differ diff --git a/lib/pytz/zoneinfo/Asia/Yerevan b/lib/pytz/zoneinfo/Asia/Yerevan deleted file mode 100644 index c4ab2197f803..000000000000 Binary files a/lib/pytz/zoneinfo/Asia/Yerevan and /dev/null differ diff --git a/lib/pytz/zoneinfo/Atlantic/Azores b/lib/pytz/zoneinfo/Atlantic/Azores deleted file mode 100644 index 19e40040672a..000000000000 Binary files a/lib/pytz/zoneinfo/Atlantic/Azores and /dev/null differ diff --git a/lib/pytz/zoneinfo/Atlantic/Bermuda b/lib/pytz/zoneinfo/Atlantic/Bermuda deleted file mode 100644 index 54dd33dbcbb3..000000000000 Binary files a/lib/pytz/zoneinfo/Atlantic/Bermuda and /dev/null differ diff --git a/lib/pytz/zoneinfo/Atlantic/Canary b/lib/pytz/zoneinfo/Atlantic/Canary deleted file mode 100644 index 972388be7176..000000000000 Binary files a/lib/pytz/zoneinfo/Atlantic/Canary and /dev/null differ diff --git a/lib/pytz/zoneinfo/Atlantic/Cape_Verde b/lib/pytz/zoneinfo/Atlantic/Cape_Verde deleted file mode 100644 index 5238ac8a6704..000000000000 Binary files a/lib/pytz/zoneinfo/Atlantic/Cape_Verde and /dev/null differ diff --git a/lib/pytz/zoneinfo/Atlantic/Faeroe b/lib/pytz/zoneinfo/Atlantic/Faeroe deleted file mode 100644 index 4dab7ef0859c..000000000000 Binary files a/lib/pytz/zoneinfo/Atlantic/Faeroe and /dev/null differ diff --git a/lib/pytz/zoneinfo/Atlantic/Faroe b/lib/pytz/zoneinfo/Atlantic/Faroe deleted file mode 100644 index 4dab7ef0859c..000000000000 Binary files a/lib/pytz/zoneinfo/Atlantic/Faroe and /dev/null differ diff --git a/lib/pytz/zoneinfo/Atlantic/Jan_Mayen b/lib/pytz/zoneinfo/Atlantic/Jan_Mayen deleted file mode 100644 index 6326961453f4..000000000000 Binary files a/lib/pytz/zoneinfo/Atlantic/Jan_Mayen and /dev/null differ diff --git a/lib/pytz/zoneinfo/Atlantic/Madeira b/lib/pytz/zoneinfo/Atlantic/Madeira deleted file mode 100644 index 2175096cf122..000000000000 Binary files a/lib/pytz/zoneinfo/Atlantic/Madeira and /dev/null differ diff --git a/lib/pytz/zoneinfo/Atlantic/Reykjavik b/lib/pytz/zoneinfo/Atlantic/Reykjavik deleted file mode 100644 index e97f13a65201..000000000000 Binary files a/lib/pytz/zoneinfo/Atlantic/Reykjavik and /dev/null differ diff --git a/lib/pytz/zoneinfo/Atlantic/South_Georgia b/lib/pytz/zoneinfo/Atlantic/South_Georgia deleted file mode 100644 index ab2c8236b00b..000000000000 Binary files a/lib/pytz/zoneinfo/Atlantic/South_Georgia and /dev/null differ diff --git a/lib/pytz/zoneinfo/Atlantic/St_Helena b/lib/pytz/zoneinfo/Atlantic/St_Helena deleted file mode 100644 index d365e3ddf349..000000000000 Binary files a/lib/pytz/zoneinfo/Atlantic/St_Helena and /dev/null differ diff --git a/lib/pytz/zoneinfo/Atlantic/Stanley b/lib/pytz/zoneinfo/Atlantic/Stanley deleted file mode 100644 index 34f9d022cd5a..000000000000 Binary files a/lib/pytz/zoneinfo/Atlantic/Stanley and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/ACT b/lib/pytz/zoneinfo/Australia/ACT deleted file mode 100644 index d95c245e5ee1..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/ACT and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/Adelaide b/lib/pytz/zoneinfo/Australia/Adelaide deleted file mode 100644 index b350cb6660a6..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/Adelaide and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/Brisbane b/lib/pytz/zoneinfo/Australia/Brisbane deleted file mode 100644 index 3e899a163f51..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/Brisbane and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/Broken_Hill b/lib/pytz/zoneinfo/Australia/Broken_Hill deleted file mode 100644 index d8f3155c84df..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/Broken_Hill and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/Canberra b/lib/pytz/zoneinfo/Australia/Canberra deleted file mode 100644 index d95c245e5ee1..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/Canberra and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/Currie b/lib/pytz/zoneinfo/Australia/Currie deleted file mode 100644 index 43ca1e455803..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/Currie and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/Darwin b/lib/pytz/zoneinfo/Australia/Darwin deleted file mode 100644 index c44512fbb73b..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/Darwin and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/Eucla b/lib/pytz/zoneinfo/Australia/Eucla deleted file mode 100644 index e78c2d42514e..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/Eucla and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/Hobart b/lib/pytz/zoneinfo/Australia/Hobart deleted file mode 100644 index c4604e5386ef..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/Hobart and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/LHI b/lib/pytz/zoneinfo/Australia/LHI deleted file mode 100644 index 1f542d3700d5..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/LHI and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/Lindeman b/lib/pytz/zoneinfo/Australia/Lindeman deleted file mode 100644 index 05c3c1c324cc..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/Lindeman and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/Lord_Howe b/lib/pytz/zoneinfo/Australia/Lord_Howe deleted file mode 100644 index 1f542d3700d5..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/Lord_Howe and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/Melbourne b/lib/pytz/zoneinfo/Australia/Melbourne deleted file mode 100644 index af3152f6c4cb..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/Melbourne and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/NSW b/lib/pytz/zoneinfo/Australia/NSW deleted file mode 100644 index d95c245e5ee1..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/NSW and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/North b/lib/pytz/zoneinfo/Australia/North deleted file mode 100644 index c44512fbb73b..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/North and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/Perth b/lib/pytz/zoneinfo/Australia/Perth deleted file mode 100644 index 1c7ebb795cb7..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/Perth and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/Queensland b/lib/pytz/zoneinfo/Australia/Queensland deleted file mode 100644 index 3e899a163f51..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/Queensland and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/South b/lib/pytz/zoneinfo/Australia/South deleted file mode 100644 index b350cb6660a6..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/South and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/Sydney b/lib/pytz/zoneinfo/Australia/Sydney deleted file mode 100644 index d95c245e5ee1..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/Sydney and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/Tasmania b/lib/pytz/zoneinfo/Australia/Tasmania deleted file mode 100644 index c4604e5386ef..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/Tasmania and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/Victoria b/lib/pytz/zoneinfo/Australia/Victoria deleted file mode 100644 index af3152f6c4cb..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/Victoria and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/West b/lib/pytz/zoneinfo/Australia/West deleted file mode 100644 index 1c7ebb795cb7..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/West and /dev/null differ diff --git a/lib/pytz/zoneinfo/Australia/Yancowinna b/lib/pytz/zoneinfo/Australia/Yancowinna deleted file mode 100644 index d8f3155c84df..000000000000 Binary files a/lib/pytz/zoneinfo/Australia/Yancowinna and /dev/null differ diff --git a/lib/pytz/zoneinfo/Brazil/Acre b/lib/pytz/zoneinfo/Brazil/Acre deleted file mode 100644 index 7be212b1e651..000000000000 Binary files a/lib/pytz/zoneinfo/Brazil/Acre and /dev/null differ diff --git a/lib/pytz/zoneinfo/Brazil/DeNoronha b/lib/pytz/zoneinfo/Brazil/DeNoronha deleted file mode 100644 index c60239009e46..000000000000 Binary files a/lib/pytz/zoneinfo/Brazil/DeNoronha and /dev/null differ diff --git a/lib/pytz/zoneinfo/Brazil/East b/lib/pytz/zoneinfo/Brazil/East deleted file mode 100644 index 8df63a17bd46..000000000000 Binary files a/lib/pytz/zoneinfo/Brazil/East and /dev/null differ diff --git a/lib/pytz/zoneinfo/Brazil/West b/lib/pytz/zoneinfo/Brazil/West deleted file mode 100644 index 112921184139..000000000000 Binary files a/lib/pytz/zoneinfo/Brazil/West and /dev/null differ diff --git a/lib/pytz/zoneinfo/CET b/lib/pytz/zoneinfo/CET deleted file mode 100644 index 4c4f8ef9aed8..000000000000 Binary files a/lib/pytz/zoneinfo/CET and /dev/null differ diff --git a/lib/pytz/zoneinfo/CST6CDT b/lib/pytz/zoneinfo/CST6CDT deleted file mode 100644 index 5c8a1d9a3ea4..000000000000 Binary files a/lib/pytz/zoneinfo/CST6CDT and /dev/null differ diff --git a/lib/pytz/zoneinfo/Canada/Atlantic b/lib/pytz/zoneinfo/Canada/Atlantic deleted file mode 100644 index 756099abe6ce..000000000000 Binary files a/lib/pytz/zoneinfo/Canada/Atlantic and /dev/null differ diff --git a/lib/pytz/zoneinfo/Canada/Central b/lib/pytz/zoneinfo/Canada/Central deleted file mode 100644 index 2d22791686e8..000000000000 Binary files a/lib/pytz/zoneinfo/Canada/Central and /dev/null differ diff --git a/lib/pytz/zoneinfo/Canada/East-Saskatchewan b/lib/pytz/zoneinfo/Canada/East-Saskatchewan deleted file mode 100644 index 20c9c84df491..000000000000 Binary files a/lib/pytz/zoneinfo/Canada/East-Saskatchewan and /dev/null differ diff --git a/lib/pytz/zoneinfo/Canada/Eastern b/lib/pytz/zoneinfo/Canada/Eastern deleted file mode 100644 index 1698477a4877..000000000000 Binary files a/lib/pytz/zoneinfo/Canada/Eastern and /dev/null differ diff --git a/lib/pytz/zoneinfo/Canada/Mountain b/lib/pytz/zoneinfo/Canada/Mountain deleted file mode 100644 index 3fa0579891a9..000000000000 Binary files a/lib/pytz/zoneinfo/Canada/Mountain and /dev/null differ diff --git a/lib/pytz/zoneinfo/Canada/Newfoundland b/lib/pytz/zoneinfo/Canada/Newfoundland deleted file mode 100644 index e7a18d601d02..000000000000 Binary files a/lib/pytz/zoneinfo/Canada/Newfoundland and /dev/null differ diff --git a/lib/pytz/zoneinfo/Canada/Pacific b/lib/pytz/zoneinfo/Canada/Pacific deleted file mode 100644 index 0c1fa5269049..000000000000 Binary files a/lib/pytz/zoneinfo/Canada/Pacific and /dev/null differ diff --git a/lib/pytz/zoneinfo/Canada/Saskatchewan b/lib/pytz/zoneinfo/Canada/Saskatchewan deleted file mode 100644 index 20c9c84df491..000000000000 Binary files a/lib/pytz/zoneinfo/Canada/Saskatchewan and /dev/null differ diff --git a/lib/pytz/zoneinfo/Canada/Yukon b/lib/pytz/zoneinfo/Canada/Yukon deleted file mode 100644 index 15216d55ff54..000000000000 Binary files a/lib/pytz/zoneinfo/Canada/Yukon and /dev/null differ diff --git a/lib/pytz/zoneinfo/Chile/Continental b/lib/pytz/zoneinfo/Chile/Continental deleted file mode 100644 index de74ddf1a095..000000000000 Binary files a/lib/pytz/zoneinfo/Chile/Continental and /dev/null differ diff --git a/lib/pytz/zoneinfo/Chile/EasterIsland b/lib/pytz/zoneinfo/Chile/EasterIsland deleted file mode 100644 index de6b8ff5839f..000000000000 Binary files a/lib/pytz/zoneinfo/Chile/EasterIsland and /dev/null differ diff --git a/lib/pytz/zoneinfo/Cuba b/lib/pytz/zoneinfo/Cuba deleted file mode 100644 index 96eaf81ee9e4..000000000000 Binary files a/lib/pytz/zoneinfo/Cuba and /dev/null differ diff --git a/lib/pytz/zoneinfo/EET b/lib/pytz/zoneinfo/EET deleted file mode 100644 index beb273a24838..000000000000 Binary files a/lib/pytz/zoneinfo/EET and /dev/null differ diff --git a/lib/pytz/zoneinfo/EST b/lib/pytz/zoneinfo/EST deleted file mode 100644 index 074a4fc76ad8..000000000000 Binary files a/lib/pytz/zoneinfo/EST and /dev/null differ diff --git a/lib/pytz/zoneinfo/EST5EDT b/lib/pytz/zoneinfo/EST5EDT deleted file mode 100644 index 54541fc27164..000000000000 Binary files a/lib/pytz/zoneinfo/EST5EDT and /dev/null differ diff --git a/lib/pytz/zoneinfo/Egypt b/lib/pytz/zoneinfo/Egypt deleted file mode 100644 index d3730dfbc1d2..000000000000 Binary files a/lib/pytz/zoneinfo/Egypt and /dev/null differ diff --git a/lib/pytz/zoneinfo/Eire b/lib/pytz/zoneinfo/Eire deleted file mode 100644 index 3dec02693d78..000000000000 Binary files a/lib/pytz/zoneinfo/Eire and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT b/lib/pytz/zoneinfo/Etc/GMT deleted file mode 100644 index 2ee14295f108..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT+0 b/lib/pytz/zoneinfo/Etc/GMT+0 deleted file mode 100644 index 2ee14295f108..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT+0 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT+1 b/lib/pytz/zoneinfo/Etc/GMT+1 deleted file mode 100644 index 67b88c96237f..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT+1 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT+10 b/lib/pytz/zoneinfo/Etc/GMT+10 deleted file mode 100644 index d564b28a6f8c..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT+10 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT+11 b/lib/pytz/zoneinfo/Etc/GMT+11 deleted file mode 100644 index 52eb573057b7..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT+11 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT+12 b/lib/pytz/zoneinfo/Etc/GMT+12 deleted file mode 100644 index c54cead625d2..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT+12 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT+2 b/lib/pytz/zoneinfo/Etc/GMT+2 deleted file mode 100644 index e43b63f66b6c..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT+2 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT+3 b/lib/pytz/zoneinfo/Etc/GMT+3 deleted file mode 100644 index f029bac683c7..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT+3 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT+4 b/lib/pytz/zoneinfo/Etc/GMT+4 deleted file mode 100644 index 0ad0ee3229bd..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT+4 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT+5 b/lib/pytz/zoneinfo/Etc/GMT+5 deleted file mode 100644 index e53f3febecf6..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT+5 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT+6 b/lib/pytz/zoneinfo/Etc/GMT+6 deleted file mode 100644 index b41149616a31..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT+6 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT+7 b/lib/pytz/zoneinfo/Etc/GMT+7 deleted file mode 100644 index 32fa6dcb42cc..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT+7 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT+8 b/lib/pytz/zoneinfo/Etc/GMT+8 deleted file mode 100644 index 512578ca6d7c..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT+8 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT+9 b/lib/pytz/zoneinfo/Etc/GMT+9 deleted file mode 100644 index d3e47e7b24e5..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT+9 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT-0 b/lib/pytz/zoneinfo/Etc/GMT-0 deleted file mode 100644 index 2ee14295f108..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT-0 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT-1 b/lib/pytz/zoneinfo/Etc/GMT-1 deleted file mode 100644 index 9a6adebc882e..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT-1 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT-10 b/lib/pytz/zoneinfo/Etc/GMT-10 deleted file mode 100644 index 37b93fb9d15d..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT-10 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT-11 b/lib/pytz/zoneinfo/Etc/GMT-11 deleted file mode 100644 index f1af0e290c98..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT-11 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT-12 b/lib/pytz/zoneinfo/Etc/GMT-12 deleted file mode 100644 index 0fa4a8dc03dd..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT-12 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT-13 b/lib/pytz/zoneinfo/Etc/GMT-13 deleted file mode 100644 index 0a5dbe16cde2..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT-13 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT-14 b/lib/pytz/zoneinfo/Etc/GMT-14 deleted file mode 100644 index 41c6a1d1ca0a..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT-14 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT-2 b/lib/pytz/zoneinfo/Etc/GMT-2 deleted file mode 100644 index 9f63268d09e0..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT-2 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT-3 b/lib/pytz/zoneinfo/Etc/GMT-3 deleted file mode 100644 index 38ccd8a6108b..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT-3 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT-4 b/lib/pytz/zoneinfo/Etc/GMT-4 deleted file mode 100644 index 43badfb220c4..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT-4 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT-5 b/lib/pytz/zoneinfo/Etc/GMT-5 deleted file mode 100644 index c88cf210c3ba..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT-5 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT-6 b/lib/pytz/zoneinfo/Etc/GMT-6 deleted file mode 100644 index c1a0634cf5ca..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT-6 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT-7 b/lib/pytz/zoneinfo/Etc/GMT-7 deleted file mode 100644 index bc152efdaf3c..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT-7 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT-8 b/lib/pytz/zoneinfo/Etc/GMT-8 deleted file mode 100644 index 2c0de20faa3e..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT-8 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT-9 b/lib/pytz/zoneinfo/Etc/GMT-9 deleted file mode 100644 index 8a3bd45af31a..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT-9 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/GMT0 b/lib/pytz/zoneinfo/Etc/GMT0 deleted file mode 100644 index 2ee14295f108..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/GMT0 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/Greenwich b/lib/pytz/zoneinfo/Etc/Greenwich deleted file mode 100644 index 2ee14295f108..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/Greenwich and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/UCT b/lib/pytz/zoneinfo/Etc/UCT deleted file mode 100644 index a88c4b665b3e..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/UCT and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/UTC b/lib/pytz/zoneinfo/Etc/UTC deleted file mode 100644 index 5583f5b0c6e6..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/UTC and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/Universal b/lib/pytz/zoneinfo/Etc/Universal deleted file mode 100644 index 5583f5b0c6e6..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/Universal and /dev/null differ diff --git a/lib/pytz/zoneinfo/Etc/Zulu b/lib/pytz/zoneinfo/Etc/Zulu deleted file mode 100644 index 5583f5b0c6e6..000000000000 Binary files a/lib/pytz/zoneinfo/Etc/Zulu and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Amsterdam b/lib/pytz/zoneinfo/Europe/Amsterdam deleted file mode 100644 index 30ca3243a5ab..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Amsterdam and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Andorra b/lib/pytz/zoneinfo/Europe/Andorra deleted file mode 100644 index cf9533a3e4d5..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Andorra and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Athens b/lib/pytz/zoneinfo/Europe/Athens deleted file mode 100644 index 726e56c3fe18..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Athens and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Belfast b/lib/pytz/zoneinfo/Europe/Belfast deleted file mode 100644 index fe63ff7e7f1e..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Belfast and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Belgrade b/lib/pytz/zoneinfo/Europe/Belgrade deleted file mode 100644 index 5f0389f03919..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Belgrade and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Berlin b/lib/pytz/zoneinfo/Europe/Berlin deleted file mode 100644 index 96059c7854e1..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Berlin and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Bratislava b/lib/pytz/zoneinfo/Europe/Bratislava deleted file mode 100644 index 9ab78e9156f2..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Bratislava and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Brussels b/lib/pytz/zoneinfo/Europe/Brussels deleted file mode 100644 index 2791edeba35c..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Brussels and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Bucharest b/lib/pytz/zoneinfo/Europe/Bucharest deleted file mode 100644 index de2a5f0af1a4..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Bucharest and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Budapest b/lib/pytz/zoneinfo/Europe/Budapest deleted file mode 100644 index 1b787b16cea3..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Budapest and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Chisinau b/lib/pytz/zoneinfo/Europe/Chisinau deleted file mode 100644 index 983cc7071676..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Chisinau and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Copenhagen b/lib/pytz/zoneinfo/Europe/Copenhagen deleted file mode 100644 index af7e9269e5e4..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Copenhagen and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Dublin b/lib/pytz/zoneinfo/Europe/Dublin deleted file mode 100644 index 3dec02693d78..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Dublin and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Gibraltar b/lib/pytz/zoneinfo/Europe/Gibraltar deleted file mode 100644 index f3dbeb65ecce..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Gibraltar and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Guernsey b/lib/pytz/zoneinfo/Europe/Guernsey deleted file mode 100644 index fe63ff7e7f1e..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Guernsey and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Helsinki b/lib/pytz/zoneinfo/Europe/Helsinki deleted file mode 100644 index 19d7babd531f..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Helsinki and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Isle_of_Man b/lib/pytz/zoneinfo/Europe/Isle_of_Man deleted file mode 100644 index fe63ff7e7f1e..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Isle_of_Man and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Istanbul b/lib/pytz/zoneinfo/Europe/Istanbul deleted file mode 100644 index 864099556bc9..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Istanbul and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Jersey b/lib/pytz/zoneinfo/Europe/Jersey deleted file mode 100644 index fe63ff7e7f1e..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Jersey and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Kaliningrad b/lib/pytz/zoneinfo/Europe/Kaliningrad deleted file mode 100644 index fa6bab862037..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Kaliningrad and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Kiev b/lib/pytz/zoneinfo/Europe/Kiev deleted file mode 100644 index 075cc02fd893..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Kiev and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Lisbon b/lib/pytz/zoneinfo/Europe/Lisbon deleted file mode 100644 index 168accf060c3..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Lisbon and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Ljubljana b/lib/pytz/zoneinfo/Europe/Ljubljana deleted file mode 100644 index 5f0389f03919..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Ljubljana and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/London b/lib/pytz/zoneinfo/Europe/London deleted file mode 100644 index fe63ff7e7f1e..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/London and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Luxembourg b/lib/pytz/zoneinfo/Europe/Luxembourg deleted file mode 100644 index 6c194a5cdcb2..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Luxembourg and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Madrid b/lib/pytz/zoneinfo/Europe/Madrid deleted file mode 100644 index 931195955a1b..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Madrid and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Malta b/lib/pytz/zoneinfo/Europe/Malta deleted file mode 100644 index 5f518a1f1773..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Malta and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Mariehamn b/lib/pytz/zoneinfo/Europe/Mariehamn deleted file mode 100644 index 19d7babd531f..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Mariehamn and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Minsk b/lib/pytz/zoneinfo/Europe/Minsk deleted file mode 100644 index ba9971c63138..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Minsk and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Monaco b/lib/pytz/zoneinfo/Europe/Monaco deleted file mode 100644 index 664f6161ab6c..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Monaco and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Moscow b/lib/pytz/zoneinfo/Europe/Moscow deleted file mode 100644 index 6068f8b9e584..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Moscow and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Nicosia b/lib/pytz/zoneinfo/Europe/Nicosia deleted file mode 100644 index f7f10ab7665e..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Nicosia and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Oslo b/lib/pytz/zoneinfo/Europe/Oslo deleted file mode 100644 index 6326961453f4..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Oslo and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Paris b/lib/pytz/zoneinfo/Europe/Paris deleted file mode 100644 index fd8ea7dbae9e..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Paris and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Podgorica b/lib/pytz/zoneinfo/Europe/Podgorica deleted file mode 100644 index 5f0389f03919..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Podgorica and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Prague b/lib/pytz/zoneinfo/Europe/Prague deleted file mode 100644 index 9ab78e9156f2..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Prague and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Riga b/lib/pytz/zoneinfo/Europe/Riga deleted file mode 100644 index abea45d309b7..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Riga and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Rome b/lib/pytz/zoneinfo/Europe/Rome deleted file mode 100644 index 28ddffe0d93e..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Rome and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Samara b/lib/pytz/zoneinfo/Europe/Samara deleted file mode 100644 index fe5060094b50..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Samara and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/San_Marino b/lib/pytz/zoneinfo/Europe/San_Marino deleted file mode 100644 index 28ddffe0d93e..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/San_Marino and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Sarajevo b/lib/pytz/zoneinfo/Europe/Sarajevo deleted file mode 100644 index 5f0389f03919..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Sarajevo and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Simferopol b/lib/pytz/zoneinfo/Europe/Simferopol deleted file mode 100644 index ebb63b4450b0..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Simferopol and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Skopje b/lib/pytz/zoneinfo/Europe/Skopje deleted file mode 100644 index 5f0389f03919..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Skopje and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Sofia b/lib/pytz/zoneinfo/Europe/Sofia deleted file mode 100644 index d8032335b20a..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Sofia and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Stockholm b/lib/pytz/zoneinfo/Europe/Stockholm deleted file mode 100644 index 3bc6dbd9d12b..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Stockholm and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Tallinn b/lib/pytz/zoneinfo/Europe/Tallinn deleted file mode 100644 index 4ba4424121ae..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Tallinn and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Tirane b/lib/pytz/zoneinfo/Europe/Tirane deleted file mode 100644 index 0b86017d243f..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Tirane and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Tiraspol b/lib/pytz/zoneinfo/Europe/Tiraspol deleted file mode 100644 index 983cc7071676..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Tiraspol and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Uzhgorod b/lib/pytz/zoneinfo/Europe/Uzhgorod deleted file mode 100644 index 7032ab9b34f9..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Uzhgorod and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Vaduz b/lib/pytz/zoneinfo/Europe/Vaduz deleted file mode 100644 index c4e20dbe0e8c..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Vaduz and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Vatican b/lib/pytz/zoneinfo/Europe/Vatican deleted file mode 100644 index 28ddffe0d93e..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Vatican and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Vienna b/lib/pytz/zoneinfo/Europe/Vienna deleted file mode 100644 index 8025ba520a85..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Vienna and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Vilnius b/lib/pytz/zoneinfo/Europe/Vilnius deleted file mode 100644 index b6545b24949c..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Vilnius and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Volgograd b/lib/pytz/zoneinfo/Europe/Volgograd deleted file mode 100644 index b91e4fbff6b4..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Volgograd and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Warsaw b/lib/pytz/zoneinfo/Europe/Warsaw deleted file mode 100644 index 3797b1cb6532..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Warsaw and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Zagreb b/lib/pytz/zoneinfo/Europe/Zagreb deleted file mode 100644 index 5f0389f03919..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Zagreb and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Zaporozhye b/lib/pytz/zoneinfo/Europe/Zaporozhye deleted file mode 100644 index 2ccf8998b24f..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Zaporozhye and /dev/null differ diff --git a/lib/pytz/zoneinfo/Europe/Zurich b/lib/pytz/zoneinfo/Europe/Zurich deleted file mode 100644 index 0cf15c17eeae..000000000000 Binary files a/lib/pytz/zoneinfo/Europe/Zurich and /dev/null differ diff --git a/lib/pytz/zoneinfo/Factory b/lib/pytz/zoneinfo/Factory deleted file mode 100644 index a65f97edd26d..000000000000 Binary files a/lib/pytz/zoneinfo/Factory and /dev/null differ diff --git a/lib/pytz/zoneinfo/GB b/lib/pytz/zoneinfo/GB deleted file mode 100644 index fe63ff7e7f1e..000000000000 Binary files a/lib/pytz/zoneinfo/GB and /dev/null differ diff --git a/lib/pytz/zoneinfo/GB-Eire b/lib/pytz/zoneinfo/GB-Eire deleted file mode 100644 index fe63ff7e7f1e..000000000000 Binary files a/lib/pytz/zoneinfo/GB-Eire and /dev/null differ diff --git a/lib/pytz/zoneinfo/GMT b/lib/pytz/zoneinfo/GMT deleted file mode 100644 index 2ee14295f108..000000000000 Binary files a/lib/pytz/zoneinfo/GMT and /dev/null differ diff --git a/lib/pytz/zoneinfo/GMT+0 b/lib/pytz/zoneinfo/GMT+0 deleted file mode 100644 index 2ee14295f108..000000000000 Binary files a/lib/pytz/zoneinfo/GMT+0 and /dev/null differ diff --git a/lib/pytz/zoneinfo/GMT-0 b/lib/pytz/zoneinfo/GMT-0 deleted file mode 100644 index 2ee14295f108..000000000000 Binary files a/lib/pytz/zoneinfo/GMT-0 and /dev/null differ diff --git a/lib/pytz/zoneinfo/GMT0 b/lib/pytz/zoneinfo/GMT0 deleted file mode 100644 index 2ee14295f108..000000000000 Binary files a/lib/pytz/zoneinfo/GMT0 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Greenwich b/lib/pytz/zoneinfo/Greenwich deleted file mode 100644 index 2ee14295f108..000000000000 Binary files a/lib/pytz/zoneinfo/Greenwich and /dev/null differ diff --git a/lib/pytz/zoneinfo/HST b/lib/pytz/zoneinfo/HST deleted file mode 100644 index 616c31bc5ea6..000000000000 Binary files a/lib/pytz/zoneinfo/HST and /dev/null differ diff --git a/lib/pytz/zoneinfo/Hongkong b/lib/pytz/zoneinfo/Hongkong deleted file mode 100644 index 45db6e226144..000000000000 Binary files a/lib/pytz/zoneinfo/Hongkong and /dev/null differ diff --git a/lib/pytz/zoneinfo/Iceland b/lib/pytz/zoneinfo/Iceland deleted file mode 100644 index e97f13a65201..000000000000 Binary files a/lib/pytz/zoneinfo/Iceland and /dev/null differ diff --git a/lib/pytz/zoneinfo/Indian/Antananarivo b/lib/pytz/zoneinfo/Indian/Antananarivo deleted file mode 100644 index ef6e745c4517..000000000000 Binary files a/lib/pytz/zoneinfo/Indian/Antananarivo and /dev/null differ diff --git a/lib/pytz/zoneinfo/Indian/Chagos b/lib/pytz/zoneinfo/Indian/Chagos deleted file mode 100644 index 864d3e29a2b0..000000000000 Binary files a/lib/pytz/zoneinfo/Indian/Chagos and /dev/null differ diff --git a/lib/pytz/zoneinfo/Indian/Christmas b/lib/pytz/zoneinfo/Indian/Christmas deleted file mode 100644 index 686d5b3c6542..000000000000 Binary files a/lib/pytz/zoneinfo/Indian/Christmas and /dev/null differ diff --git a/lib/pytz/zoneinfo/Indian/Cocos b/lib/pytz/zoneinfo/Indian/Cocos deleted file mode 100644 index 6f7d869f0fb3..000000000000 Binary files a/lib/pytz/zoneinfo/Indian/Cocos and /dev/null differ diff --git a/lib/pytz/zoneinfo/Indian/Comoro b/lib/pytz/zoneinfo/Indian/Comoro deleted file mode 100644 index 297c6db63c06..000000000000 Binary files a/lib/pytz/zoneinfo/Indian/Comoro and /dev/null differ diff --git a/lib/pytz/zoneinfo/Indian/Kerguelen b/lib/pytz/zoneinfo/Indian/Kerguelen deleted file mode 100644 index 1f42bbc1ffcb..000000000000 Binary files a/lib/pytz/zoneinfo/Indian/Kerguelen and /dev/null differ diff --git a/lib/pytz/zoneinfo/Indian/Mahe b/lib/pytz/zoneinfo/Indian/Mahe deleted file mode 100644 index d048242cac78..000000000000 Binary files a/lib/pytz/zoneinfo/Indian/Mahe and /dev/null differ diff --git a/lib/pytz/zoneinfo/Indian/Maldives b/lib/pytz/zoneinfo/Indian/Maldives deleted file mode 100644 index 65e7eeee8db7..000000000000 Binary files a/lib/pytz/zoneinfo/Indian/Maldives and /dev/null differ diff --git a/lib/pytz/zoneinfo/Indian/Mauritius b/lib/pytz/zoneinfo/Indian/Mauritius deleted file mode 100644 index 54f222010893..000000000000 Binary files a/lib/pytz/zoneinfo/Indian/Mauritius and /dev/null differ diff --git a/lib/pytz/zoneinfo/Indian/Mayotte b/lib/pytz/zoneinfo/Indian/Mayotte deleted file mode 100644 index 8401a37aa0ba..000000000000 Binary files a/lib/pytz/zoneinfo/Indian/Mayotte and /dev/null differ diff --git a/lib/pytz/zoneinfo/Indian/Reunion b/lib/pytz/zoneinfo/Indian/Reunion deleted file mode 100644 index 9b3830ec31a1..000000000000 Binary files a/lib/pytz/zoneinfo/Indian/Reunion and /dev/null differ diff --git a/lib/pytz/zoneinfo/Iran b/lib/pytz/zoneinfo/Iran deleted file mode 100644 index 16149ed6bf5b..000000000000 Binary files a/lib/pytz/zoneinfo/Iran and /dev/null differ diff --git a/lib/pytz/zoneinfo/Israel b/lib/pytz/zoneinfo/Israel deleted file mode 100644 index 4e6410f2b2d3..000000000000 Binary files a/lib/pytz/zoneinfo/Israel and /dev/null differ diff --git a/lib/pytz/zoneinfo/Jamaica b/lib/pytz/zoneinfo/Jamaica deleted file mode 100644 index 09e3eb93996d..000000000000 Binary files a/lib/pytz/zoneinfo/Jamaica and /dev/null differ diff --git a/lib/pytz/zoneinfo/Japan b/lib/pytz/zoneinfo/Japan deleted file mode 100644 index 058c1e99ba26..000000000000 Binary files a/lib/pytz/zoneinfo/Japan and /dev/null differ diff --git a/lib/pytz/zoneinfo/Kwajalein b/lib/pytz/zoneinfo/Kwajalein deleted file mode 100644 index b57237272d54..000000000000 Binary files a/lib/pytz/zoneinfo/Kwajalein and /dev/null differ diff --git a/lib/pytz/zoneinfo/Libya b/lib/pytz/zoneinfo/Libya deleted file mode 100644 index 943f9a36b120..000000000000 Binary files a/lib/pytz/zoneinfo/Libya and /dev/null differ diff --git a/lib/pytz/zoneinfo/MET b/lib/pytz/zoneinfo/MET deleted file mode 100644 index 71963d533e44..000000000000 Binary files a/lib/pytz/zoneinfo/MET and /dev/null differ diff --git a/lib/pytz/zoneinfo/MST b/lib/pytz/zoneinfo/MST deleted file mode 100644 index da3e926d23e7..000000000000 Binary files a/lib/pytz/zoneinfo/MST and /dev/null differ diff --git a/lib/pytz/zoneinfo/MST7MDT b/lib/pytz/zoneinfo/MST7MDT deleted file mode 100644 index 726a7e571765..000000000000 Binary files a/lib/pytz/zoneinfo/MST7MDT and /dev/null differ diff --git a/lib/pytz/zoneinfo/Mexico/BajaNorte b/lib/pytz/zoneinfo/Mexico/BajaNorte deleted file mode 100644 index fffdc24bfc4c..000000000000 Binary files a/lib/pytz/zoneinfo/Mexico/BajaNorte and /dev/null differ diff --git a/lib/pytz/zoneinfo/Mexico/BajaSur b/lib/pytz/zoneinfo/Mexico/BajaSur deleted file mode 100644 index 43ee12d84a7c..000000000000 Binary files a/lib/pytz/zoneinfo/Mexico/BajaSur and /dev/null differ diff --git a/lib/pytz/zoneinfo/Mexico/General b/lib/pytz/zoneinfo/Mexico/General deleted file mode 100644 index 1434ab08804d..000000000000 Binary files a/lib/pytz/zoneinfo/Mexico/General and /dev/null differ diff --git a/lib/pytz/zoneinfo/Mideast/Riyadh87 b/lib/pytz/zoneinfo/Mideast/Riyadh87 deleted file mode 100644 index ebe16c49957c..000000000000 Binary files a/lib/pytz/zoneinfo/Mideast/Riyadh87 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Mideast/Riyadh88 b/lib/pytz/zoneinfo/Mideast/Riyadh88 deleted file mode 100644 index 7f2224bbb29b..000000000000 Binary files a/lib/pytz/zoneinfo/Mideast/Riyadh88 and /dev/null differ diff --git a/lib/pytz/zoneinfo/Mideast/Riyadh89 b/lib/pytz/zoneinfo/Mideast/Riyadh89 deleted file mode 100644 index a50ca48a91b9..000000000000 Binary files a/lib/pytz/zoneinfo/Mideast/Riyadh89 and /dev/null differ diff --git a/lib/pytz/zoneinfo/NZ b/lib/pytz/zoneinfo/NZ deleted file mode 100644 index a40767df93d3..000000000000 Binary files a/lib/pytz/zoneinfo/NZ and /dev/null differ diff --git a/lib/pytz/zoneinfo/NZ-CHAT b/lib/pytz/zoneinfo/NZ-CHAT deleted file mode 100644 index 6329e4fce0e5..000000000000 Binary files a/lib/pytz/zoneinfo/NZ-CHAT and /dev/null differ diff --git a/lib/pytz/zoneinfo/Navajo b/lib/pytz/zoneinfo/Navajo deleted file mode 100644 index f8908febf220..000000000000 Binary files a/lib/pytz/zoneinfo/Navajo and /dev/null differ diff --git a/lib/pytz/zoneinfo/PRC b/lib/pytz/zoneinfo/PRC deleted file mode 100644 index 240c4c6f76da..000000000000 Binary files a/lib/pytz/zoneinfo/PRC and /dev/null differ diff --git a/lib/pytz/zoneinfo/PST8PDT b/lib/pytz/zoneinfo/PST8PDT deleted file mode 100644 index 6242ac04c09f..000000000000 Binary files a/lib/pytz/zoneinfo/PST8PDT and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Apia b/lib/pytz/zoneinfo/Pacific/Apia deleted file mode 100644 index e6c3c59f04e7..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Apia and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Auckland b/lib/pytz/zoneinfo/Pacific/Auckland deleted file mode 100644 index a40767df93d3..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Auckland and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Chatham b/lib/pytz/zoneinfo/Pacific/Chatham deleted file mode 100644 index 6329e4fce0e5..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Chatham and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Chuuk b/lib/pytz/zoneinfo/Pacific/Chuuk deleted file mode 100644 index 0ef473871d55..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Chuuk and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Easter b/lib/pytz/zoneinfo/Pacific/Easter deleted file mode 100644 index de6b8ff5839f..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Easter and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Efate b/lib/pytz/zoneinfo/Pacific/Efate deleted file mode 100644 index c46154a8056c..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Efate and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Enderbury b/lib/pytz/zoneinfo/Pacific/Enderbury deleted file mode 100644 index 69e75d754e2c..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Enderbury and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Fakaofo b/lib/pytz/zoneinfo/Pacific/Fakaofo deleted file mode 100644 index 2a4e7afa5ea5..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Fakaofo and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Fiji b/lib/pytz/zoneinfo/Pacific/Fiji deleted file mode 100644 index 797842aa3e1e..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Fiji and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Funafuti b/lib/pytz/zoneinfo/Pacific/Funafuti deleted file mode 100644 index 66cf5e1df3fc..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Funafuti and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Galapagos b/lib/pytz/zoneinfo/Pacific/Galapagos deleted file mode 100644 index 7504cc66f500..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Galapagos and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Gambier b/lib/pytz/zoneinfo/Pacific/Gambier deleted file mode 100644 index fc49c03f6e04..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Gambier and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Guadalcanal b/lib/pytz/zoneinfo/Pacific/Guadalcanal deleted file mode 100644 index 3a4ec12e56d7..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Guadalcanal and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Guam b/lib/pytz/zoneinfo/Pacific/Guam deleted file mode 100644 index a05292f4bacf..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Guam and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Honolulu b/lib/pytz/zoneinfo/Pacific/Honolulu deleted file mode 100644 index 1b4684b9b47f..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Honolulu and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Johnston b/lib/pytz/zoneinfo/Pacific/Johnston deleted file mode 100644 index 616c31bc5ea6..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Johnston and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Kiritimati b/lib/pytz/zoneinfo/Pacific/Kiritimati deleted file mode 100644 index 7131453c5540..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Kiritimati and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Kosrae b/lib/pytz/zoneinfo/Pacific/Kosrae deleted file mode 100644 index 61b7561589cb..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Kosrae and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Kwajalein b/lib/pytz/zoneinfo/Pacific/Kwajalein deleted file mode 100644 index b57237272d54..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Kwajalein and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Majuro b/lib/pytz/zoneinfo/Pacific/Majuro deleted file mode 100644 index eab93a2af990..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Majuro and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Marquesas b/lib/pytz/zoneinfo/Pacific/Marquesas deleted file mode 100644 index cd2d5b073aff..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Marquesas and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Midway b/lib/pytz/zoneinfo/Pacific/Midway deleted file mode 100644 index 8889a26fa7c8..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Midway and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Nauru b/lib/pytz/zoneinfo/Pacific/Nauru deleted file mode 100644 index 1d8179bcb50d..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Nauru and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Niue b/lib/pytz/zoneinfo/Pacific/Niue deleted file mode 100644 index b9f18a544ab4..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Niue and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Norfolk b/lib/pytz/zoneinfo/Pacific/Norfolk deleted file mode 100644 index 2e989c255617..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Norfolk and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Noumea b/lib/pytz/zoneinfo/Pacific/Noumea deleted file mode 100644 index ae9e138fa566..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Noumea and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Pago_Pago b/lib/pytz/zoneinfo/Pacific/Pago_Pago deleted file mode 100644 index fa084ba584c6..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Pago_Pago and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Palau b/lib/pytz/zoneinfo/Pacific/Palau deleted file mode 100644 index efc556b14024..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Palau and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Pitcairn b/lib/pytz/zoneinfo/Pacific/Pitcairn deleted file mode 100644 index 51f01c6410da..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Pitcairn and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Pohnpei b/lib/pytz/zoneinfo/Pacific/Pohnpei deleted file mode 100644 index f175ea587502..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Pohnpei and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Ponape b/lib/pytz/zoneinfo/Pacific/Ponape deleted file mode 100644 index f175ea587502..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Ponape and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Port_Moresby b/lib/pytz/zoneinfo/Pacific/Port_Moresby deleted file mode 100644 index 8d4d12ccb094..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Port_Moresby and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Rarotonga b/lib/pytz/zoneinfo/Pacific/Rarotonga deleted file mode 100644 index 581299788a61..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Rarotonga and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Saipan b/lib/pytz/zoneinfo/Pacific/Saipan deleted file mode 100644 index 519c86e96683..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Saipan and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Samoa b/lib/pytz/zoneinfo/Pacific/Samoa deleted file mode 100644 index fa084ba584c6..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Samoa and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Tahiti b/lib/pytz/zoneinfo/Pacific/Tahiti deleted file mode 100644 index 22f86974672e..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Tahiti and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Tarawa b/lib/pytz/zoneinfo/Pacific/Tarawa deleted file mode 100644 index 065dcd819473..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Tarawa and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Tongatapu b/lib/pytz/zoneinfo/Pacific/Tongatapu deleted file mode 100644 index 01ab6b87ef92..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Tongatapu and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Truk b/lib/pytz/zoneinfo/Pacific/Truk deleted file mode 100644 index 0ef473871d55..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Truk and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Wake b/lib/pytz/zoneinfo/Pacific/Wake deleted file mode 100644 index f89c52829a00..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Wake and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Wallis b/lib/pytz/zoneinfo/Pacific/Wallis deleted file mode 100644 index 9aaf558f1da2..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Wallis and /dev/null differ diff --git a/lib/pytz/zoneinfo/Pacific/Yap b/lib/pytz/zoneinfo/Pacific/Yap deleted file mode 100644 index 0ef473871d55..000000000000 Binary files a/lib/pytz/zoneinfo/Pacific/Yap and /dev/null differ diff --git a/lib/pytz/zoneinfo/Poland b/lib/pytz/zoneinfo/Poland deleted file mode 100644 index 3797b1cb6532..000000000000 Binary files a/lib/pytz/zoneinfo/Poland and /dev/null differ diff --git a/lib/pytz/zoneinfo/Portugal b/lib/pytz/zoneinfo/Portugal deleted file mode 100644 index 168accf060c3..000000000000 Binary files a/lib/pytz/zoneinfo/Portugal and /dev/null differ diff --git a/lib/pytz/zoneinfo/ROC b/lib/pytz/zoneinfo/ROC deleted file mode 100644 index 70cfb27ca91f..000000000000 Binary files a/lib/pytz/zoneinfo/ROC and /dev/null differ diff --git a/lib/pytz/zoneinfo/ROK b/lib/pytz/zoneinfo/ROK deleted file mode 100644 index 96bb0c36d7e3..000000000000 Binary files a/lib/pytz/zoneinfo/ROK and /dev/null differ diff --git a/lib/pytz/zoneinfo/Singapore b/lib/pytz/zoneinfo/Singapore deleted file mode 100644 index a6f2db8f3a88..000000000000 Binary files a/lib/pytz/zoneinfo/Singapore and /dev/null differ diff --git a/lib/pytz/zoneinfo/Turkey b/lib/pytz/zoneinfo/Turkey deleted file mode 100644 index 864099556bc9..000000000000 Binary files a/lib/pytz/zoneinfo/Turkey and /dev/null differ diff --git a/lib/pytz/zoneinfo/UCT b/lib/pytz/zoneinfo/UCT deleted file mode 100644 index a88c4b665b3e..000000000000 Binary files a/lib/pytz/zoneinfo/UCT and /dev/null differ diff --git a/lib/pytz/zoneinfo/US/Alaska b/lib/pytz/zoneinfo/US/Alaska deleted file mode 100644 index d14735026a09..000000000000 Binary files a/lib/pytz/zoneinfo/US/Alaska and /dev/null differ diff --git a/lib/pytz/zoneinfo/US/Aleutian b/lib/pytz/zoneinfo/US/Aleutian deleted file mode 100644 index 391ec98ec0f3..000000000000 Binary files a/lib/pytz/zoneinfo/US/Aleutian and /dev/null differ diff --git a/lib/pytz/zoneinfo/US/Arizona b/lib/pytz/zoneinfo/US/Arizona deleted file mode 100644 index 67589026c21c..000000000000 Binary files a/lib/pytz/zoneinfo/US/Arizona and /dev/null differ diff --git a/lib/pytz/zoneinfo/US/Central b/lib/pytz/zoneinfo/US/Central deleted file mode 100644 index 71aae7246a30..000000000000 Binary files a/lib/pytz/zoneinfo/US/Central and /dev/null differ diff --git a/lib/pytz/zoneinfo/US/East-Indiana b/lib/pytz/zoneinfo/US/East-Indiana deleted file mode 100644 index aa3dfc43730e..000000000000 Binary files a/lib/pytz/zoneinfo/US/East-Indiana and /dev/null differ diff --git a/lib/pytz/zoneinfo/US/Eastern b/lib/pytz/zoneinfo/US/Eastern deleted file mode 100644 index b2c2377f4e87..000000000000 Binary files a/lib/pytz/zoneinfo/US/Eastern and /dev/null differ diff --git a/lib/pytz/zoneinfo/US/Hawaii b/lib/pytz/zoneinfo/US/Hawaii deleted file mode 100644 index 1b4684b9b47f..000000000000 Binary files a/lib/pytz/zoneinfo/US/Hawaii and /dev/null differ diff --git a/lib/pytz/zoneinfo/US/Indiana-Starke b/lib/pytz/zoneinfo/US/Indiana-Starke deleted file mode 100644 index 33169f459638..000000000000 Binary files a/lib/pytz/zoneinfo/US/Indiana-Starke and /dev/null differ diff --git a/lib/pytz/zoneinfo/US/Michigan b/lib/pytz/zoneinfo/US/Michigan deleted file mode 100644 index da53d46df347..000000000000 Binary files a/lib/pytz/zoneinfo/US/Michigan and /dev/null differ diff --git a/lib/pytz/zoneinfo/US/Mountain b/lib/pytz/zoneinfo/US/Mountain deleted file mode 100644 index f8908febf220..000000000000 Binary files a/lib/pytz/zoneinfo/US/Mountain and /dev/null differ diff --git a/lib/pytz/zoneinfo/US/Pacific b/lib/pytz/zoneinfo/US/Pacific deleted file mode 100644 index 3b7ce1dceebf..000000000000 Binary files a/lib/pytz/zoneinfo/US/Pacific and /dev/null differ diff --git a/lib/pytz/zoneinfo/US/Pacific-New b/lib/pytz/zoneinfo/US/Pacific-New deleted file mode 100644 index 3b7ce1dceebf..000000000000 Binary files a/lib/pytz/zoneinfo/US/Pacific-New and /dev/null differ diff --git a/lib/pytz/zoneinfo/US/Samoa b/lib/pytz/zoneinfo/US/Samoa deleted file mode 100644 index fa084ba584c6..000000000000 Binary files a/lib/pytz/zoneinfo/US/Samoa and /dev/null differ diff --git a/lib/pytz/zoneinfo/UTC b/lib/pytz/zoneinfo/UTC deleted file mode 100644 index 5583f5b0c6e6..000000000000 Binary files a/lib/pytz/zoneinfo/UTC and /dev/null differ diff --git a/lib/pytz/zoneinfo/Universal b/lib/pytz/zoneinfo/Universal deleted file mode 100644 index 5583f5b0c6e6..000000000000 Binary files a/lib/pytz/zoneinfo/Universal and /dev/null differ diff --git a/lib/pytz/zoneinfo/W-SU b/lib/pytz/zoneinfo/W-SU deleted file mode 100644 index 6068f8b9e584..000000000000 Binary files a/lib/pytz/zoneinfo/W-SU and /dev/null differ diff --git a/lib/pytz/zoneinfo/WET b/lib/pytz/zoneinfo/WET deleted file mode 100644 index 444a1933d725..000000000000 Binary files a/lib/pytz/zoneinfo/WET and /dev/null differ diff --git a/lib/pytz/zoneinfo/Zulu b/lib/pytz/zoneinfo/Zulu deleted file mode 100644 index 5583f5b0c6e6..000000000000 Binary files a/lib/pytz/zoneinfo/Zulu and /dev/null differ diff --git a/lib/pytz/zoneinfo/iso3166.tab b/lib/pytz/zoneinfo/iso3166.tab deleted file mode 100644 index b952ca1c5900..000000000000 --- a/lib/pytz/zoneinfo/iso3166.tab +++ /dev/null @@ -1,276 +0,0 @@ -#
-# This file is in the public domain, so clarified as of
-# 2009-05-17 by Arthur David Olson.
-# ISO 3166 alpha-2 country codes
-#
-# From Paul Eggert (2006-09-27):
-#
-# This file contains a table with the following columns:
-# 1.  ISO 3166-1 alpha-2 country code, current as of
-#     ISO 3166-1 Newsletter VI-1 (2007-09-21).  See:
-#     
-#     ISO 3166 Maintenance agency (ISO 3166/MA)
-#     .
-# 2.  The usual English name for the country,
-#     chosen so that alphabetic sorting of subsets produces helpful lists.
-#     This is not the same as the English name in the ISO 3166 tables.
-#
-# Columns are separated by a single tab.
-# The table is sorted by country code.
-#
-# Lines beginning with `#' are comments.
-#
-# From Arthur David Olson (2011-08-17):
-# Resynchronized today with the ISO 3166 site (adding SS for South Sudan).
-#
-#country-
-#code	country name
-AD	Andorra
-AE	United Arab Emirates
-AF	Afghanistan
-AG	Antigua & Barbuda
-AI	Anguilla
-AL	Albania
-AM	Armenia
-AO	Angola
-AQ	Antarctica
-AR	Argentina
-AS	Samoa (American)
-AT	Austria
-AU	Australia
-AW	Aruba
-AX	Aaland Islands
-AZ	Azerbaijan
-BA	Bosnia & Herzegovina
-BB	Barbados
-BD	Bangladesh
-BE	Belgium
-BF	Burkina Faso
-BG	Bulgaria
-BH	Bahrain
-BI	Burundi
-BJ	Benin
-BL	St Barthelemy
-BM	Bermuda
-BN	Brunei
-BO	Bolivia
-BQ	Bonaire Sint Eustatius & Saba
-BR	Brazil
-BS	Bahamas
-BT	Bhutan
-BV	Bouvet Island
-BW	Botswana
-BY	Belarus
-BZ	Belize
-CA	Canada
-CC	Cocos (Keeling) Islands
-CD	Congo (Dem. Rep.)
-CF	Central African Rep.
-CG	Congo (Rep.)
-CH	Switzerland
-CI	Cote d'Ivoire
-CK	Cook Islands
-CL	Chile
-CM	Cameroon
-CN	China
-CO	Colombia
-CR	Costa Rica
-CU	Cuba
-CV	Cape Verde
-CW	Curacao
-CX	Christmas Island
-CY	Cyprus
-CZ	Czech Republic
-DE	Germany
-DJ	Djibouti
-DK	Denmark
-DM	Dominica
-DO	Dominican Republic
-DZ	Algeria
-EC	Ecuador
-EE	Estonia
-EG	Egypt
-EH	Western Sahara
-ER	Eritrea
-ES	Spain
-ET	Ethiopia
-FI	Finland
-FJ	Fiji
-FK	Falkland Islands
-FM	Micronesia
-FO	Faroe Islands
-FR	France
-GA	Gabon
-GB	Britain (UK)
-GD	Grenada
-GE	Georgia
-GF	French Guiana
-GG	Guernsey
-GH	Ghana
-GI	Gibraltar
-GL	Greenland
-GM	Gambia
-GN	Guinea
-GP	Guadeloupe
-GQ	Equatorial Guinea
-GR	Greece
-GS	South Georgia & the South Sandwich Islands
-GT	Guatemala
-GU	Guam
-GW	Guinea-Bissau
-GY	Guyana
-HK	Hong Kong
-HM	Heard Island & McDonald Islands
-HN	Honduras
-HR	Croatia
-HT	Haiti
-HU	Hungary
-ID	Indonesia
-IE	Ireland
-IL	Israel
-IM	Isle of Man
-IN	India
-IO	British Indian Ocean Territory
-IQ	Iraq
-IR	Iran
-IS	Iceland
-IT	Italy
-JE	Jersey
-JM	Jamaica
-JO	Jordan
-JP	Japan
-KE	Kenya
-KG	Kyrgyzstan
-KH	Cambodia
-KI	Kiribati
-KM	Comoros
-KN	St Kitts & Nevis
-KP	Korea (North)
-KR	Korea (South)
-KW	Kuwait
-KY	Cayman Islands
-KZ	Kazakhstan
-LA	Laos
-LB	Lebanon
-LC	St Lucia
-LI	Liechtenstein
-LK	Sri Lanka
-LR	Liberia
-LS	Lesotho
-LT	Lithuania
-LU	Luxembourg
-LV	Latvia
-LY	Libya
-MA	Morocco
-MC	Monaco
-MD	Moldova
-ME	Montenegro
-MF	St Martin (French part)
-MG	Madagascar
-MH	Marshall Islands
-MK	Macedonia
-ML	Mali
-MM	Myanmar (Burma)
-MN	Mongolia
-MO	Macau
-MP	Northern Mariana Islands
-MQ	Martinique
-MR	Mauritania
-MS	Montserrat
-MT	Malta
-MU	Mauritius
-MV	Maldives
-MW	Malawi
-MX	Mexico
-MY	Malaysia
-MZ	Mozambique
-NA	Namibia
-NC	New Caledonia
-NE	Niger
-NF	Norfolk Island
-NG	Nigeria
-NI	Nicaragua
-NL	Netherlands
-NO	Norway
-NP	Nepal
-NR	Nauru
-NU	Niue
-NZ	New Zealand
-OM	Oman
-PA	Panama
-PE	Peru
-PF	French Polynesia
-PG	Papua New Guinea
-PH	Philippines
-PK	Pakistan
-PL	Poland
-PM	St Pierre & Miquelon
-PN	Pitcairn
-PR	Puerto Rico
-PS	Palestine
-PT	Portugal
-PW	Palau
-PY	Paraguay
-QA	Qatar
-RE	Reunion
-RO	Romania
-RS	Serbia
-RU	Russia
-RW	Rwanda
-SA	Saudi Arabia
-SB	Solomon Islands
-SC	Seychelles
-SD	Sudan
-SE	Sweden
-SG	Singapore
-SH	St Helena
-SI	Slovenia
-SJ	Svalbard & Jan Mayen
-SK	Slovakia
-SL	Sierra Leone
-SM	San Marino
-SN	Senegal
-SO	Somalia
-SR	Suriname
-SS	South Sudan
-ST	Sao Tome & Principe
-SV	El Salvador
-SX	Sint Maarten
-SY	Syria
-SZ	Swaziland
-TC	Turks & Caicos Is
-TD	Chad
-TF	French Southern & Antarctic Lands
-TG	Togo
-TH	Thailand
-TJ	Tajikistan
-TK	Tokelau
-TL	East Timor
-TM	Turkmenistan
-TN	Tunisia
-TO	Tonga
-TR	Turkey
-TT	Trinidad & Tobago
-TV	Tuvalu
-TW	Taiwan
-TZ	Tanzania
-UA	Ukraine
-UG	Uganda
-UM	US minor outlying islands
-US	United States
-UY	Uruguay
-UZ	Uzbekistan
-VA	Vatican City
-VC	St Vincent
-VE	Venezuela
-VG	Virgin Islands (UK)
-VI	Virgin Islands (US)
-VN	Vietnam
-VU	Vanuatu
-WF	Wallis & Futuna
-WS	Samoa (western)
-YE	Yemen
-YT	Mayotte
-ZA	South Africa
-ZM	Zambia
-ZW	Zimbabwe
diff --git a/lib/pytz/zoneinfo/localtime b/lib/pytz/zoneinfo/localtime
deleted file mode 100644
index 2ee14295f108..000000000000
Binary files a/lib/pytz/zoneinfo/localtime and /dev/null differ
diff --git a/lib/pytz/zoneinfo/posixrules b/lib/pytz/zoneinfo/posixrules
deleted file mode 100644
index b2c2377f4e87..000000000000
Binary files a/lib/pytz/zoneinfo/posixrules and /dev/null differ
diff --git a/lib/pytz/zoneinfo/zone.tab b/lib/pytz/zoneinfo/zone.tab
deleted file mode 100644
index 6bda8266ba97..000000000000
--- a/lib/pytz/zoneinfo/zone.tab
+++ /dev/null
@@ -1,441 +0,0 @@
-# 
-# This file is in the public domain, so clarified as of
-# 2009-05-17 by Arthur David Olson.
-#
-# TZ zone descriptions
-#
-# From Paul Eggert (1996-08-05):
-#
-# This file contains a table with the following columns:
-# 1.  ISO 3166 2-character country code.  See the file `iso3166.tab'.
-# 2.  Latitude and longitude of the zone's principal location
-#     in ISO 6709 sign-degrees-minutes-seconds format,
-#     either +-DDMM+-DDDMM or +-DDMMSS+-DDDMMSS,
-#     first latitude (+ is north), then longitude (+ is east).
-# 3.  Zone name used in value of TZ environment variable.
-# 4.  Comments; present if and only if the country has multiple rows.
-#
-# Columns are separated by a single tab.
-# The table is sorted first by country, then an order within the country that
-# (1) makes some geographical sense, and
-# (2) puts the most populous zones first, where that does not contradict (1).
-#
-# Lines beginning with `#' are comments.
-#
-#country-
-#code	coordinates	TZ			comments
-AD	+4230+00131	Europe/Andorra
-AE	+2518+05518	Asia/Dubai
-AF	+3431+06912	Asia/Kabul
-AG	+1703-06148	America/Antigua
-AI	+1812-06304	America/Anguilla
-AL	+4120+01950	Europe/Tirane
-AM	+4011+04430	Asia/Yerevan
-AO	-0848+01314	Africa/Luanda
-AQ	-7750+16636	Antarctica/McMurdo	McMurdo Station, Ross Island
-AQ	-9000+00000	Antarctica/South_Pole	Amundsen-Scott Station, South Pole
-AQ	-6734-06808	Antarctica/Rothera	Rothera Station, Adelaide Island
-AQ	-6448-06406	Antarctica/Palmer	Palmer Station, Anvers Island
-AQ	-6736+06253	Antarctica/Mawson	Mawson Station, Holme Bay
-AQ	-6835+07758	Antarctica/Davis	Davis Station, Vestfold Hills
-AQ	-6617+11031	Antarctica/Casey	Casey Station, Bailey Peninsula
-AQ	-7824+10654	Antarctica/Vostok	Vostok Station, Lake Vostok
-AQ	-6640+14001	Antarctica/DumontDUrville	Dumont-d'Urville Station, Terre Adelie
-AQ	-690022+0393524	Antarctica/Syowa	Syowa Station, E Ongul I
-AQ	-5430+15857	Antarctica/Macquarie	Macquarie Island Station, Macquarie Island
-AR	-3436-05827	America/Argentina/Buenos_Aires	Buenos Aires (BA, CF)
-AR	-3124-06411	America/Argentina/Cordoba	most locations (CB, CC, CN, ER, FM, MN, SE, SF)
-AR	-2447-06525	America/Argentina/Salta	(SA, LP, NQ, RN)
-AR	-2411-06518	America/Argentina/Jujuy	Jujuy (JY)
-AR	-2649-06513	America/Argentina/Tucuman	Tucuman (TM)
-AR	-2828-06547	America/Argentina/Catamarca	Catamarca (CT), Chubut (CH)
-AR	-2926-06651	America/Argentina/La_Rioja	La Rioja (LR)
-AR	-3132-06831	America/Argentina/San_Juan	San Juan (SJ)
-AR	-3253-06849	America/Argentina/Mendoza	Mendoza (MZ)
-AR	-3319-06621	America/Argentina/San_Luis	San Luis (SL)
-AR	-5138-06913	America/Argentina/Rio_Gallegos	Santa Cruz (SC)
-AR	-5448-06818	America/Argentina/Ushuaia	Tierra del Fuego (TF)
-AS	-1416-17042	Pacific/Pago_Pago
-AT	+4813+01620	Europe/Vienna
-AU	-3133+15905	Australia/Lord_Howe	Lord Howe Island
-AU	-4253+14719	Australia/Hobart	Tasmania - most locations
-AU	-3956+14352	Australia/Currie	Tasmania - King Island
-AU	-3749+14458	Australia/Melbourne	Victoria
-AU	-3352+15113	Australia/Sydney	New South Wales - most locations
-AU	-3157+14127	Australia/Broken_Hill	New South Wales - Yancowinna
-AU	-2728+15302	Australia/Brisbane	Queensland - most locations
-AU	-2016+14900	Australia/Lindeman	Queensland - Holiday Islands
-AU	-3455+13835	Australia/Adelaide	South Australia
-AU	-1228+13050	Australia/Darwin	Northern Territory
-AU	-3157+11551	Australia/Perth	Western Australia - most locations
-AU	-3143+12852	Australia/Eucla	Western Australia - Eucla area
-AW	+1230-06958	America/Aruba
-AX	+6006+01957	Europe/Mariehamn
-AZ	+4023+04951	Asia/Baku
-BA	+4352+01825	Europe/Sarajevo
-BB	+1306-05937	America/Barbados
-BD	+2343+09025	Asia/Dhaka
-BE	+5050+00420	Europe/Brussels
-BF	+1222-00131	Africa/Ouagadougou
-BG	+4241+02319	Europe/Sofia
-BH	+2623+05035	Asia/Bahrain
-BI	-0323+02922	Africa/Bujumbura
-BJ	+0629+00237	Africa/Porto-Novo
-BL	+1753-06251	America/St_Barthelemy
-BM	+3217-06446	Atlantic/Bermuda
-BN	+0456+11455	Asia/Brunei
-BO	-1630-06809	America/La_Paz
-BQ	+120903-0681636	America/Kralendijk
-BR	-0351-03225	America/Noronha	Atlantic islands
-BR	-0127-04829	America/Belem	Amapa, E Para
-BR	-0343-03830	America/Fortaleza	NE Brazil (MA, PI, CE, RN, PB)
-BR	-0803-03454	America/Recife	Pernambuco
-BR	-0712-04812	America/Araguaina	Tocantins
-BR	-0940-03543	America/Maceio	Alagoas, Sergipe
-BR	-1259-03831	America/Bahia	Bahia
-BR	-2332-04637	America/Sao_Paulo	S & SE Brazil (GO, DF, MG, ES, RJ, SP, PR, SC, RS)
-BR	-2027-05437	America/Campo_Grande	Mato Grosso do Sul
-BR	-1535-05605	America/Cuiaba	Mato Grosso
-BR	-0226-05452	America/Santarem	W Para
-BR	-0846-06354	America/Porto_Velho	Rondonia
-BR	+0249-06040	America/Boa_Vista	Roraima
-BR	-0308-06001	America/Manaus	E Amazonas
-BR	-0640-06952	America/Eirunepe	W Amazonas
-BR	-0958-06748	America/Rio_Branco	Acre
-BS	+2505-07721	America/Nassau
-BT	+2728+08939	Asia/Thimphu
-BW	-2439+02555	Africa/Gaborone
-BY	+5354+02734	Europe/Minsk
-BZ	+1730-08812	America/Belize
-CA	+4734-05243	America/St_Johns	Newfoundland Time, including SE Labrador
-CA	+4439-06336	America/Halifax	Atlantic Time - Nova Scotia (most places), PEI
-CA	+4612-05957	America/Glace_Bay	Atlantic Time - Nova Scotia - places that did not observe DST 1966-1971
-CA	+4606-06447	America/Moncton	Atlantic Time - New Brunswick
-CA	+5320-06025	America/Goose_Bay	Atlantic Time - Labrador - most locations
-CA	+5125-05707	America/Blanc-Sablon	Atlantic Standard Time - Quebec - Lower North Shore
-CA	+4531-07334	America/Montreal	Eastern Time - Quebec - most locations
-CA	+4339-07923	America/Toronto	Eastern Time - Ontario - most locations
-CA	+4901-08816	America/Nipigon	Eastern Time - Ontario & Quebec - places that did not observe DST 1967-1973
-CA	+4823-08915	America/Thunder_Bay	Eastern Time - Thunder Bay, Ontario
-CA	+6344-06828	America/Iqaluit	Eastern Time - east Nunavut - most locations
-CA	+6608-06544	America/Pangnirtung	Eastern Time - Pangnirtung, Nunavut
-CA	+744144-0944945	America/Resolute	Central Standard Time - Resolute, Nunavut
-CA	+484531-0913718	America/Atikokan	Eastern Standard Time - Atikokan, Ontario and Southampton I, Nunavut
-CA	+624900-0920459	America/Rankin_Inlet	Central Time - central Nunavut
-CA	+4953-09709	America/Winnipeg	Central Time - Manitoba & west Ontario
-CA	+4843-09434	America/Rainy_River	Central Time - Rainy River & Fort Frances, Ontario
-CA	+5024-10439	America/Regina	Central Standard Time - Saskatchewan - most locations
-CA	+5017-10750	America/Swift_Current	Central Standard Time - Saskatchewan - midwest
-CA	+5333-11328	America/Edmonton	Mountain Time - Alberta, east British Columbia & west Saskatchewan
-CA	+690650-1050310	America/Cambridge_Bay	Mountain Time - west Nunavut
-CA	+6227-11421	America/Yellowknife	Mountain Time - central Northwest Territories
-CA	+682059-1334300	America/Inuvik	Mountain Time - west Northwest Territories
-CA	+4906-11631	America/Creston	Mountain Standard Time - Creston, British Columbia
-CA	+5946-12014	America/Dawson_Creek	Mountain Standard Time - Dawson Creek & Fort Saint John, British Columbia
-CA	+4916-12307	America/Vancouver	Pacific Time - west British Columbia
-CA	+6043-13503	America/Whitehorse	Pacific Time - south Yukon
-CA	+6404-13925	America/Dawson	Pacific Time - north Yukon
-CC	-1210+09655	Indian/Cocos
-CD	-0418+01518	Africa/Kinshasa	west Dem. Rep. of Congo
-CD	-1140+02728	Africa/Lubumbashi	east Dem. Rep. of Congo
-CF	+0422+01835	Africa/Bangui
-CG	-0416+01517	Africa/Brazzaville
-CH	+4723+00832	Europe/Zurich
-CI	+0519-00402	Africa/Abidjan
-CK	-2114-15946	Pacific/Rarotonga
-CL	-3327-07040	America/Santiago	most locations
-CL	-2709-10926	Pacific/Easter	Easter Island & Sala y Gomez
-CM	+0403+00942	Africa/Douala
-CN	+3114+12128	Asia/Shanghai	east China - Beijing, Guangdong, Shanghai, etc.
-CN	+4545+12641	Asia/Harbin	Heilongjiang (except Mohe), Jilin
-CN	+2934+10635	Asia/Chongqing	central China - Sichuan, Yunnan, Guangxi, Shaanxi, Guizhou, etc.
-CN	+4348+08735	Asia/Urumqi	most of Tibet & Xinjiang
-CN	+3929+07559	Asia/Kashgar	west Tibet & Xinjiang
-CO	+0436-07405	America/Bogota
-CR	+0956-08405	America/Costa_Rica
-CU	+2308-08222	America/Havana
-CV	+1455-02331	Atlantic/Cape_Verde
-CW	+1211-06900	America/Curacao
-CX	-1025+10543	Indian/Christmas
-CY	+3510+03322	Asia/Nicosia
-CZ	+5005+01426	Europe/Prague
-DE	+5230+01322	Europe/Berlin
-DJ	+1136+04309	Africa/Djibouti
-DK	+5540+01235	Europe/Copenhagen
-DM	+1518-06124	America/Dominica
-DO	+1828-06954	America/Santo_Domingo
-DZ	+3647+00303	Africa/Algiers
-EC	-0210-07950	America/Guayaquil	mainland
-EC	-0054-08936	Pacific/Galapagos	Galapagos Islands
-EE	+5925+02445	Europe/Tallinn
-EG	+3003+03115	Africa/Cairo
-EH	+2709-01312	Africa/El_Aaiun
-ER	+1520+03853	Africa/Asmara
-ES	+4024-00341	Europe/Madrid	mainland
-ES	+3553-00519	Africa/Ceuta	Ceuta & Melilla
-ES	+2806-01524	Atlantic/Canary	Canary Islands
-ET	+0902+03842	Africa/Addis_Ababa
-FI	+6010+02458	Europe/Helsinki
-FJ	-1808+17825	Pacific/Fiji
-FK	-5142-05751	Atlantic/Stanley
-FM	+0725+15147	Pacific/Chuuk	Chuuk (Truk) and Yap
-FM	+0658+15813	Pacific/Pohnpei	Pohnpei (Ponape)
-FM	+0519+16259	Pacific/Kosrae	Kosrae
-FO	+6201-00646	Atlantic/Faroe
-FR	+4852+00220	Europe/Paris
-GA	+0023+00927	Africa/Libreville
-GB	+513030-0000731	Europe/London
-GD	+1203-06145	America/Grenada
-GE	+4143+04449	Asia/Tbilisi
-GF	+0456-05220	America/Cayenne
-GG	+4927-00232	Europe/Guernsey
-GH	+0533-00013	Africa/Accra
-GI	+3608-00521	Europe/Gibraltar
-GL	+6411-05144	America/Godthab	most locations
-GL	+7646-01840	America/Danmarkshavn	east coast, north of Scoresbysund
-GL	+7029-02158	America/Scoresbysund	Scoresbysund / Ittoqqortoormiit
-GL	+7634-06847	America/Thule	Thule / Pituffik
-GM	+1328-01639	Africa/Banjul
-GN	+0931-01343	Africa/Conakry
-GP	+1614-06132	America/Guadeloupe
-GQ	+0345+00847	Africa/Malabo
-GR	+3758+02343	Europe/Athens
-GS	-5416-03632	Atlantic/South_Georgia
-GT	+1438-09031	America/Guatemala
-GU	+1328+14445	Pacific/Guam
-GW	+1151-01535	Africa/Bissau
-GY	+0648-05810	America/Guyana
-HK	+2217+11409	Asia/Hong_Kong
-HN	+1406-08713	America/Tegucigalpa
-HR	+4548+01558	Europe/Zagreb
-HT	+1832-07220	America/Port-au-Prince
-HU	+4730+01905	Europe/Budapest
-ID	-0610+10648	Asia/Jakarta	Java & Sumatra
-ID	-0002+10920	Asia/Pontianak	west & central Borneo
-ID	-0507+11924	Asia/Makassar	east & south Borneo, Sulawesi (Celebes), Bali, Nusa Tengarra, west Timor
-ID	-0232+14042	Asia/Jayapura	west New Guinea (Irian Jaya) & Malukus (Moluccas)
-IE	+5320-00615	Europe/Dublin
-IL	+3146+03514	Asia/Jerusalem
-IM	+5409-00428	Europe/Isle_of_Man
-IN	+2232+08822	Asia/Kolkata
-IO	-0720+07225	Indian/Chagos
-IQ	+3321+04425	Asia/Baghdad
-IR	+3540+05126	Asia/Tehran
-IS	+6409-02151	Atlantic/Reykjavik
-IT	+4154+01229	Europe/Rome
-JE	+4912-00207	Europe/Jersey
-JM	+1800-07648	America/Jamaica
-JO	+3157+03556	Asia/Amman
-JP	+353916+1394441	Asia/Tokyo
-KE	-0117+03649	Africa/Nairobi
-KG	+4254+07436	Asia/Bishkek
-KH	+1133+10455	Asia/Phnom_Penh
-KI	+0125+17300	Pacific/Tarawa	Gilbert Islands
-KI	-0308-17105	Pacific/Enderbury	Phoenix Islands
-KI	+0152-15720	Pacific/Kiritimati	Line Islands
-KM	-1141+04316	Indian/Comoro
-KN	+1718-06243	America/St_Kitts
-KP	+3901+12545	Asia/Pyongyang
-KR	+3733+12658	Asia/Seoul
-KW	+2920+04759	Asia/Kuwait
-KY	+1918-08123	America/Cayman
-KZ	+4315+07657	Asia/Almaty	most locations
-KZ	+4448+06528	Asia/Qyzylorda	Qyzylorda (Kyzylorda, Kzyl-Orda)
-KZ	+5017+05710	Asia/Aqtobe	Aqtobe (Aktobe)
-KZ	+4431+05016	Asia/Aqtau	Atyrau (Atirau, Gur'yev), Mangghystau (Mankistau)
-KZ	+5113+05121	Asia/Oral	West Kazakhstan
-LA	+1758+10236	Asia/Vientiane
-LB	+3353+03530	Asia/Beirut
-LC	+1401-06100	America/St_Lucia
-LI	+4709+00931	Europe/Vaduz
-LK	+0656+07951	Asia/Colombo
-LR	+0618-01047	Africa/Monrovia
-LS	-2928+02730	Africa/Maseru
-LT	+5441+02519	Europe/Vilnius
-LU	+4936+00609	Europe/Luxembourg
-LV	+5657+02406	Europe/Riga
-LY	+3254+01311	Africa/Tripoli
-MA	+3339-00735	Africa/Casablanca
-MC	+4342+00723	Europe/Monaco
-MD	+4700+02850	Europe/Chisinau
-ME	+4226+01916	Europe/Podgorica
-MF	+1804-06305	America/Marigot
-MG	-1855+04731	Indian/Antananarivo
-MH	+0709+17112	Pacific/Majuro	most locations
-MH	+0905+16720	Pacific/Kwajalein	Kwajalein
-MK	+4159+02126	Europe/Skopje
-ML	+1239-00800	Africa/Bamako
-MM	+1647+09610	Asia/Rangoon
-MN	+4755+10653	Asia/Ulaanbaatar	most locations
-MN	+4801+09139	Asia/Hovd	Bayan-Olgiy, Govi-Altai, Hovd, Uvs, Zavkhan
-MN	+4804+11430	Asia/Choibalsan	Dornod, Sukhbaatar
-MO	+2214+11335	Asia/Macau
-MP	+1512+14545	Pacific/Saipan
-MQ	+1436-06105	America/Martinique
-MR	+1806-01557	Africa/Nouakchott
-MS	+1643-06213	America/Montserrat
-MT	+3554+01431	Europe/Malta
-MU	-2010+05730	Indian/Mauritius
-MV	+0410+07330	Indian/Maldives
-MW	-1547+03500	Africa/Blantyre
-MX	+1924-09909	America/Mexico_City	Central Time - most locations
-MX	+2105-08646	America/Cancun	Central Time - Quintana Roo
-MX	+2058-08937	America/Merida	Central Time - Campeche, Yucatan
-MX	+2540-10019	America/Monterrey	Mexican Central Time - Coahuila, Durango, Nuevo Leon, Tamaulipas away from US border
-MX	+2550-09730	America/Matamoros	US Central Time - Coahuila, Durango, Nuevo Leon, Tamaulipas near US border
-MX	+2313-10625	America/Mazatlan	Mountain Time - S Baja, Nayarit, Sinaloa
-MX	+2838-10605	America/Chihuahua	Mexican Mountain Time - Chihuahua away from US border
-MX	+2934-10425	America/Ojinaga	US Mountain Time - Chihuahua near US border
-MX	+2904-11058	America/Hermosillo	Mountain Standard Time - Sonora
-MX	+3232-11701	America/Tijuana	US Pacific Time - Baja California near US border
-MX	+3018-11452	America/Santa_Isabel	Mexican Pacific Time - Baja California away from US border
-MX	+2048-10515	America/Bahia_Banderas	Mexican Central Time - Bahia de Banderas
-MY	+0310+10142	Asia/Kuala_Lumpur	peninsular Malaysia
-MY	+0133+11020	Asia/Kuching	Sabah & Sarawak
-MZ	-2558+03235	Africa/Maputo
-NA	-2234+01706	Africa/Windhoek
-NC	-2216+16627	Pacific/Noumea
-NE	+1331+00207	Africa/Niamey
-NF	-2903+16758	Pacific/Norfolk
-NG	+0627+00324	Africa/Lagos
-NI	+1209-08617	America/Managua
-NL	+5222+00454	Europe/Amsterdam
-NO	+5955+01045	Europe/Oslo
-NP	+2743+08519	Asia/Kathmandu
-NR	-0031+16655	Pacific/Nauru
-NU	-1901-16955	Pacific/Niue
-NZ	-3652+17446	Pacific/Auckland	most locations
-NZ	-4357-17633	Pacific/Chatham	Chatham Islands
-OM	+2336+05835	Asia/Muscat
-PA	+0858-07932	America/Panama
-PE	-1203-07703	America/Lima
-PF	-1732-14934	Pacific/Tahiti	Society Islands
-PF	-0900-13930	Pacific/Marquesas	Marquesas Islands
-PF	-2308-13457	Pacific/Gambier	Gambier Islands
-PG	-0930+14710	Pacific/Port_Moresby
-PH	+1435+12100	Asia/Manila
-PK	+2452+06703	Asia/Karachi
-PL	+5215+02100	Europe/Warsaw
-PM	+4703-05620	America/Miquelon
-PN	-2504-13005	Pacific/Pitcairn
-PR	+182806-0660622	America/Puerto_Rico
-PS	+3130+03428	Asia/Gaza	Gaza Strip
-PS	+313200+0350542	Asia/Hebron	West Bank
-PT	+3843-00908	Europe/Lisbon	mainland
-PT	+3238-01654	Atlantic/Madeira	Madeira Islands
-PT	+3744-02540	Atlantic/Azores	Azores
-PW	+0720+13429	Pacific/Palau
-PY	-2516-05740	America/Asuncion
-QA	+2517+05132	Asia/Qatar
-RE	-2052+05528	Indian/Reunion
-RO	+4426+02606	Europe/Bucharest
-RS	+4450+02030	Europe/Belgrade
-RU	+5443+02030	Europe/Kaliningrad	Moscow-01 - Kaliningrad
-RU	+5545+03735	Europe/Moscow	Moscow+00 - west Russia
-RU	+4844+04425	Europe/Volgograd	Moscow+00 - Caspian Sea
-RU	+5312+05009	Europe/Samara	Moscow+00 - Samara, Udmurtia
-RU	+5651+06036	Asia/Yekaterinburg	Moscow+02 - Urals
-RU	+5500+07324	Asia/Omsk	Moscow+03 - west Siberia
-RU	+5502+08255	Asia/Novosibirsk	Moscow+03 - Novosibirsk
-RU	+5345+08707	Asia/Novokuznetsk	Moscow+03 - Novokuznetsk
-RU	+5601+09250	Asia/Krasnoyarsk	Moscow+04 - Yenisei River
-RU	+5216+10420	Asia/Irkutsk	Moscow+05 - Lake Baikal
-RU	+6200+12940	Asia/Yakutsk	Moscow+06 - Lena River
-RU	+4310+13156	Asia/Vladivostok	Moscow+07 - Amur River
-RU	+4658+14242	Asia/Sakhalin	Moscow+07 - Sakhalin Island
-RU	+5934+15048	Asia/Magadan	Moscow+08 - Magadan
-RU	+5301+15839	Asia/Kamchatka	Moscow+08 - Kamchatka
-RU	+6445+17729	Asia/Anadyr	Moscow+08 - Bering Sea
-RW	-0157+03004	Africa/Kigali
-SA	+2438+04643	Asia/Riyadh
-SB	-0932+16012	Pacific/Guadalcanal
-SC	-0440+05528	Indian/Mahe
-SD	+1536+03232	Africa/Khartoum
-SE	+5920+01803	Europe/Stockholm
-SG	+0117+10351	Asia/Singapore
-SH	-1555-00542	Atlantic/St_Helena
-SI	+4603+01431	Europe/Ljubljana
-SJ	+7800+01600	Arctic/Longyearbyen
-SK	+4809+01707	Europe/Bratislava
-SL	+0830-01315	Africa/Freetown
-SM	+4355+01228	Europe/San_Marino
-SN	+1440-01726	Africa/Dakar
-SO	+0204+04522	Africa/Mogadishu
-SR	+0550-05510	America/Paramaribo
-SS	+0451+03136	Africa/Juba
-ST	+0020+00644	Africa/Sao_Tome
-SV	+1342-08912	America/El_Salvador
-SX	+180305-0630250	America/Lower_Princes
-SY	+3330+03618	Asia/Damascus
-SZ	-2618+03106	Africa/Mbabane
-TC	+2128-07108	America/Grand_Turk
-TD	+1207+01503	Africa/Ndjamena
-TF	-492110+0701303	Indian/Kerguelen
-TG	+0608+00113	Africa/Lome
-TH	+1345+10031	Asia/Bangkok
-TJ	+3835+06848	Asia/Dushanbe
-TK	-0922-17114	Pacific/Fakaofo
-TL	-0833+12535	Asia/Dili
-TM	+3757+05823	Asia/Ashgabat
-TN	+3648+01011	Africa/Tunis
-TO	-2110-17510	Pacific/Tongatapu
-TR	+4101+02858	Europe/Istanbul
-TT	+1039-06131	America/Port_of_Spain
-TV	-0831+17913	Pacific/Funafuti
-TW	+2503+12130	Asia/Taipei
-TZ	-0648+03917	Africa/Dar_es_Salaam
-UA	+5026+03031	Europe/Kiev	most locations
-UA	+4837+02218	Europe/Uzhgorod	Ruthenia
-UA	+4750+03510	Europe/Zaporozhye	Zaporozh'ye, E Lugansk / Zaporizhia, E Luhansk
-UA	+4457+03406	Europe/Simferopol	central Crimea
-UG	+0019+03225	Africa/Kampala
-UM	+1645-16931	Pacific/Johnston	Johnston Atoll
-UM	+2813-17722	Pacific/Midway	Midway Islands
-UM	+1917+16637	Pacific/Wake	Wake Island
-US	+404251-0740023	America/New_York	Eastern Time
-US	+421953-0830245	America/Detroit	Eastern Time - Michigan - most locations
-US	+381515-0854534	America/Kentucky/Louisville	Eastern Time - Kentucky - Louisville area
-US	+364947-0845057	America/Kentucky/Monticello	Eastern Time - Kentucky - Wayne County
-US	+394606-0860929	America/Indiana/Indianapolis	Eastern Time - Indiana - most locations
-US	+384038-0873143	America/Indiana/Vincennes	Eastern Time - Indiana - Daviess, Dubois, Knox & Martin Counties
-US	+410305-0863611	America/Indiana/Winamac	Eastern Time - Indiana - Pulaski County
-US	+382232-0862041	America/Indiana/Marengo	Eastern Time - Indiana - Crawford County
-US	+382931-0871643	America/Indiana/Petersburg	Eastern Time - Indiana - Pike County
-US	+384452-0850402	America/Indiana/Vevay	Eastern Time - Indiana - Switzerland County
-US	+415100-0873900	America/Chicago	Central Time
-US	+375711-0864541	America/Indiana/Tell_City	Central Time - Indiana - Perry County
-US	+411745-0863730	America/Indiana/Knox	Central Time - Indiana - Starke County
-US	+450628-0873651	America/Menominee	Central Time - Michigan - Dickinson, Gogebic, Iron & Menominee Counties
-US	+470659-1011757	America/North_Dakota/Center	Central Time - North Dakota - Oliver County
-US	+465042-1012439	America/North_Dakota/New_Salem	Central Time - North Dakota - Morton County (except Mandan area)
-US	+471551-1014640	America/North_Dakota/Beulah	Central Time - North Dakota - Mercer County
-US	+394421-1045903	America/Denver	Mountain Time
-US	+433649-1161209	America/Boise	Mountain Time - south Idaho & east Oregon
-US	+364708-1084111	America/Shiprock	Mountain Time - Navajo
-US	+332654-1120424	America/Phoenix	Mountain Standard Time - Arizona
-US	+340308-1181434	America/Los_Angeles	Pacific Time
-US	+611305-1495401	America/Anchorage	Alaska Time
-US	+581807-1342511	America/Juneau	Alaska Time - Alaska panhandle
-US	+571035-1351807	America/Sitka	Alaska Time - southeast Alaska panhandle
-US	+593249-1394338	America/Yakutat	Alaska Time - Alaska panhandle neck
-US	+643004-1652423	America/Nome	Alaska Time - west Alaska
-US	+515248-1763929	America/Adak	Aleutian Islands
-US	+550737-1313435	America/Metlakatla	Metlakatla Time - Annette Island
-US	+211825-1575130	Pacific/Honolulu	Hawaii
-UY	-3453-05611	America/Montevideo
-UZ	+3940+06648	Asia/Samarkand	west Uzbekistan
-UZ	+4120+06918	Asia/Tashkent	east Uzbekistan
-VA	+415408+0122711	Europe/Vatican
-VC	+1309-06114	America/St_Vincent
-VE	+1030-06656	America/Caracas
-VG	+1827-06437	America/Tortola
-VI	+1821-06456	America/St_Thomas
-VN	+1045+10640	Asia/Ho_Chi_Minh
-VU	-1740+16825	Pacific/Efate
-WF	-1318-17610	Pacific/Wallis
-WS	-1350-17144	Pacific/Apia
-YE	+1245+04512	Asia/Aden
-YT	-1247+04514	Indian/Mayotte
-ZA	-2615+02800	Africa/Johannesburg
-ZM	-1525+02817	Africa/Lusaka
-ZW	-1750+03103	Africa/Harare
diff --git a/lib/six.py b/lib/six.py
deleted file mode 100644
index 34f737fb3db0..000000000000
--- a/lib/six.py
+++ /dev/null
@@ -1,366 +0,0 @@
-"""Utilities for writing code that runs on Python 2 and 3"""
-
-import operator
-import sys
-import types
-
-__author__ = "Benjamin Peterson "
-__version__ = "1.2.0-mpl"
-
-
-# True if we are running on Python 3.
-PY3 = sys.version_info[0] == 3
-
-if PY3:
-    string_types = str,
-    integer_types = int,
-    class_types = type,
-    text_type = str
-    binary_type = bytes
-
-    MAXSIZE = sys.maxsize
-else:
-    string_types = basestring,
-    integer_types = (int, long)
-    class_types = (type, types.ClassType)
-    text_type = unicode
-    binary_type = str
-
-    if sys.platform == "java":
-        # Jython always uses 32 bits.
-        MAXSIZE = int((1 << 31) - 1)
-    else:
-        # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
-        class X(object):
-            def __len__(self):
-                return 1 << 31
-        try:
-            len(X())
-        except OverflowError:
-            # 32-bit
-            MAXSIZE = int((1 << 31) - 1)
-        else:
-            # 64-bit
-            MAXSIZE = int((1 << 63) - 1)
-            del X
-
-
-def _add_doc(func, doc):
-    """Add documentation to a function."""
-    func.__doc__ = doc
-
-
-def _import_module(name):
-    """Import module, returning the module after the last dot."""
-    __import__(name)
-    return sys.modules[name]
-
-
-class _LazyDescr(object):
-
-    def __init__(self, name):
-        self.name = name
-
-    def __get__(self, obj, tp):
-        result = self._resolve()
-        setattr(obj, self.name, result)
-        # This is a bit ugly, but it avoids running this again.
-        delattr(tp, self.name)
-        return result
-
-
-class MovedModule(_LazyDescr):
-
-    def __init__(self, name, old, new=None):
-        super(MovedModule, self).__init__(name)
-        if PY3:
-            if new is None:
-                new = name
-            self.mod = new
-        else:
-            self.mod = old
-
-    def _resolve(self):
-        return _import_module(self.mod)
-
-
-class MovedAttribute(_LazyDescr):
-
-    def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
-        super(MovedAttribute, self).__init__(name)
-        if PY3:
-            if new_mod is None:
-                new_mod = name
-            self.mod = new_mod
-            if new_attr is None:
-                if old_attr is None:
-                    new_attr = name
-                else:
-                    new_attr = old_attr
-            self.attr = new_attr
-        else:
-            self.mod = old_mod
-            if old_attr is None:
-                old_attr = name
-            self.attr = old_attr
-
-    def _resolve(self):
-        module = _import_module(self.mod)
-        return getattr(module, self.attr)
-
-
-
-class _MovedItems(types.ModuleType):
-    """Lazy loading of moved objects"""
-
-
-_moved_attributes = [
-    MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
-    MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
-    MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
-    MovedAttribute("map", "itertools", "builtins", "imap", "map"),
-    MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
-    MovedAttribute("reduce", "__builtin__", "functools"),
-    MovedAttribute("StringIO", "StringIO", "io"),
-    MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
-    MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
-
-    MovedModule("builtins", "__builtin__"),
-    MovedModule("configparser", "ConfigParser"),
-    MovedModule("copyreg", "copy_reg"),
-    MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
-    MovedModule("http_cookies", "Cookie", "http.cookies"),
-    MovedModule("html_entities", "htmlentitydefs", "html.entities"),
-    MovedModule("html_parser", "HTMLParser", "html.parser"),
-    MovedModule("http_client", "httplib", "http.client"),
-    MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
-    MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
-    MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
-    MovedModule("cPickle", "cPickle", "pickle"),
-    MovedModule("queue", "Queue"),
-    MovedModule("reprlib", "repr"),
-    MovedModule("socketserver", "SocketServer"),
-    MovedModule("tkinter", "Tkinter"),
-    MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
-    MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
-    MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
-    MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
-    MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
-    MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
-    MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
-    MovedModule("tkinter_colorchooser", "tkColorChooser",
-                "tkinter.colorchooser"),
-    MovedModule("tkinter_commondialog", "tkCommonDialog",
-                "tkinter.commondialog"),
-    MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
-    MovedModule("tkinter_font", "tkFont", "tkinter.font"),
-    MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
-    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
-                "tkinter.simpledialog"),
-    MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
-    MovedModule("winreg", "_winreg"),
-]
-for attr in _moved_attributes:
-    setattr(_MovedItems, attr.name, attr)
-del attr
-
-moves = sys.modules["six.moves"] = _MovedItems("moves")
-
-
-def add_move(move):
-    """Add an item to six.moves."""
-    setattr(_MovedItems, move.name, move)
-
-
-def remove_move(name):
-    """Remove item from six.moves."""
-    try:
-        delattr(_MovedItems, name)
-    except AttributeError:
-        try:
-            del moves.__dict__[name]
-        except KeyError:
-            raise AttributeError("no such move, %r" % (name,))
-
-
-if PY3:
-    _meth_func = "__func__"
-    _meth_self = "__self__"
-
-    _func_code = "__code__"
-    _func_defaults = "__defaults__"
-
-    _iterkeys = "keys"
-    _itervalues = "values"
-    _iteritems = "items"
-else:
-    _meth_func = "im_func"
-    _meth_self = "im_self"
-
-    _func_code = "func_code"
-    _func_defaults = "func_defaults"
-
-    _iterkeys = "iterkeys"
-    _itervalues = "itervalues"
-    _iteritems = "iteritems"
-
-
-try:
-    advance_iterator = next
-except NameError:
-    def advance_iterator(it):
-        return it.next()
-next = advance_iterator
-
-
-if PY3:
-    def get_unbound_function(unbound):
-        return unbound
-
-    Iterator = object
-
-    def callable(obj):
-        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
-else:
-    def get_unbound_function(unbound):
-        return unbound.im_func
-
-    class Iterator(object):
-
-        def next(self):
-            return type(self).__next__(self)
-
-    callable = callable
-_add_doc(get_unbound_function,
-         """Get the function out of a possibly unbound function""")
-
-
-get_method_function = operator.attrgetter(_meth_func)
-get_method_self = operator.attrgetter(_meth_self)
-get_function_code = operator.attrgetter(_func_code)
-get_function_defaults = operator.attrgetter(_func_defaults)
-
-
-def iterkeys(d):
-    """Return an iterator over the keys of a dictionary."""
-    return iter(getattr(d, _iterkeys)())
-
-def itervalues(d):
-    """Return an iterator over the values of a dictionary."""
-    return iter(getattr(d, _itervalues)())
-
-def iteritems(d):
-    """Return an iterator over the (key, value) pairs of a dictionary."""
-    return iter(getattr(d, _iteritems)())
-
-
-if PY3:
-    def b(s):
-        return s.encode("latin-1")
-    def u(s):
-        return s
-    if sys.version_info[1] <= 1:
-        def int2byte(i):
-            return bytes((i,))
-    else:
-        # This is about 2x faster than the implementation above on 3.2+
-        int2byte = operator.methodcaller("to_bytes", 1, "big")
-    import io
-    StringIO = io.StringIO
-    BytesIO = io.BytesIO
-else:
-    def b(s):
-        return s
-    def u(s):
-        return unicode(s, "unicode_escape")
-    int2byte = chr
-    import StringIO
-    StringIO = BytesIO = StringIO.StringIO
-_add_doc(b, """Byte literal""")
-_add_doc(u, """Text literal""")
-
-
-if PY3:
-    import builtins
-    exec_ = getattr(builtins, "exec")
-
-
-    def reraise(tp, value, tb=None):
-        if value.__traceback__ is not tb:
-            raise value.with_traceback(tb)
-        raise value
-
-
-    print_ = getattr(builtins, "print")
-    del builtins
-
-else:
-    def exec_(code, globs=None, locs=None):
-        """Execute code in a namespace."""
-        if globs is None:
-            frame = sys._getframe(1)
-            globs = frame.f_globals
-            if locs is None:
-                locs = frame.f_locals
-            del frame
-        elif locs is None:
-            locs = globs
-        exec("""exec code in globs, locs""")
-
-
-    exec_("""def reraise(tp, value, tb=None):
-    raise tp, value, tb
-""")
-
-
-    def print_(*args, **kwargs):
-        """The new-style print function."""
-        fp = kwargs.pop("file", sys.stdout)
-        if fp is None:
-            return
-        def write(data):
-            if not isinstance(data, basestring):
-                data = str(data)
-            fp.write(data)
-        want_unicode = False
-        sep = kwargs.pop("sep", None)
-        if sep is not None:
-            if isinstance(sep, unicode):
-                want_unicode = True
-            elif not isinstance(sep, str):
-                raise TypeError("sep must be None or a string")
-        end = kwargs.pop("end", None)
-        if end is not None:
-            if isinstance(end, unicode):
-                want_unicode = True
-            elif not isinstance(end, str):
-                raise TypeError("end must be None or a string")
-        if kwargs:
-            raise TypeError("invalid keyword arguments to print()")
-        if not want_unicode:
-            for arg in args:
-                if isinstance(arg, unicode):
-                    want_unicode = True
-                    break
-        if want_unicode:
-            newline = unicode("\n")
-            space = unicode(" ")
-        else:
-            newline = "\n"
-            space = " "
-        if sep is None:
-            sep = space
-        if end is None:
-            end = newline
-        for i, arg in enumerate(args):
-            if i:
-                write(sep)
-            write(arg)
-        write(end)
-
-_add_doc(reraise, """Reraise an exception.""")
-
-
-def with_metaclass(meta, base=object):
-    """Create a base class with a metaclass."""
-    return meta("NewBase", (base,), {})
diff --git a/setup.cfg.template b/setup.cfg.template
index 2cf7e53c6ad9..4b56914b29dc 100644
--- a/setup.cfg.template
+++ b/setup.cfg.template
@@ -11,24 +11,16 @@
 [status]
 # To suppress display of the dependencies and their versions
 # at the top of the build log, uncomment the following line:
-#suppress = True
-#
-# Uncomment to insert lots of diagnostic prints in extension code
-#verbose = True
+#suppress = False
 
-[provide_packages]
-# By default, matplotlib checks for a few dependencies and
-# installs them if missing. This feature can be turned off
-# by uncommenting the following lines. Acceptible values are:
-#     True: install, overwrite an existing installation
-#     False: do not install
-#     auto: install only if the package is unavailable. This
-#           is the default behavior
+[packages]
+# There are a number of subpackages of matplotlib that are considered
+# optional.  They are all installed by default, but they may be turned
+# off here.
 #
-## Date/timezone support:
-#pytz = False
-#dateutil = False
-#six = False
+#tests = True
+#sample_data = True
+#toolkits = True
 
 [gui_support]
 # Matplotlib supports multiple GUI toolkits, including Cocoa,
@@ -37,17 +29,24 @@
 # which is provided by matplotlib and built by default.
 #
 # Some backends are written in pure Python, and others require
-# extension code to be compiled. By default, matplotlib checks
-# for these GUI toolkits during installation and, if present,
-# compiles the required extensions to support the toolkit. GTK
-# support requires the GTK runtime environment and PyGTK. Wx
-# support requires wxWidgets and wxPython. Tk support requires
-# Tk and Tkinter. The other GUI toolkits do not require any
-# extension code, and can be used as long as the libraries are
-# installed on your system.
+# extension code to be compiled. By default, matplotlib checks for
+# these GUI toolkits during installation and, if present, compiles the
+# required extensions to support the toolkit.
+#
+# - GTK 2.x support of any kind requires the GTK runtime environment
+#   headers and PyGTK.
+# - Tk support requires Tk development headers and Tkinter.
+# - Mac OSX backend requires the Cocoa headers included with XCode.
+# - Windowing is MS-Windows specific, and requires the "windows.h"
+#   header.
+#
+# The other GUI toolkits do not require any extension code, and can be
+# used as long as the libraries are installed on your system --
+# therefore they are installed unconditionally.
+#
+# You can uncomment any the following lines to change this
+# behavior. Acceptible values are:
 #
-# You can uncomment any the following lines if you know you do
-# not want to use the GUI toolkit. Acceptible values are:
 #     True: build the extension. Exits with a warning if the
 #           required dependencies are not available
 #     False: do not build the extension
@@ -55,10 +54,11 @@
 #           otherwise skip silently. This is the default
 #           behavior
 #
-#gtk = False
-#gtkagg = False
-#tkagg = False
-#macosx = False
+#gtk = auto
+#gtkagg = auto
+#tkagg = auto
+#macosx = auto
+#windowing = auto
 
 [rc_options]
 # User-configurable options
@@ -67,7 +67,7 @@
 # FltkAgg, MacOSX, Pdf, Ps, QtAgg, Qt4Agg, SVG, TkAgg, WX, WXAgg.
 #
 # The Agg, Ps, Pdf and SVG backends do not require external
-# dependencies. Do not choose GTK, GTKAgg, GTKCairo, MacOSX, TkAgg or WXAgg
+# dependencies. Do not choose GTK, GTKAgg, GTKCairo, MacOSX, or TkAgg
 # if you have disabled the relevent extension modules.  Agg will be used
 # by default.
 #
diff --git a/setup.py b/setup.py
index e37eb485bbc3..c0b37c7c0421 100644
--- a/setup.py
+++ b/setup.py
@@ -1,12 +1,15 @@
 """
-You will need to have freetype, libpng and zlib installed to compile
-matplotlib, inlcuding the *-devel versions of these libraries if you
-are using a package manager like RPM or debian.
-
 The matplotlib build options can be modified with a setup.cfg file. See
 setup.cfg.template for more information.
 """
-from __future__ import print_function
+
+from __future__ import print_function, absolute_import
+
+# This needs to be the very first thing to use distribute
+from distribute_setup import use_setuptools
+use_setuptools()
+
+import sys
 
 # distutils is breaking our sdists for files in symlinked dirs.
 # distutils will copy if os.link is not available, so this is a hack
@@ -17,62 +20,75 @@
 except AttributeError:
     pass
 
-# This dict will be updated as we try to select the best option during
-# the build process. However, values in setup.cfg will be used, if
-# defined.
-rc = {'backend':'Agg'}
-
-# BEFORE importing disutils, remove MANIFEST. distutils doesn't properly
-# update it when the contents of directories change.
-import os
-if os.path.exists('MANIFEST'): os.remove('MANIFEST')
-
-import sys
-major, minor1, minor2, s, tmp = sys.version_info
+# This 'if' statement is needed to prevent spawning infinite processes
+# on Windows
+if __name__ == '__main__':
+    # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
+    # update it when the contents of directories change.
+    if os.path.exists('MANIFEST'):
+        os.remove('MANIFEST')
 
-if major==2 and minor1<6 or major<2:
-    raise SystemExit("""matplotlib requires Python 2.6 or later.""")
-
-import glob
-from distutils.core import setup
 try:
-    from distutils.command.build_py import build_py_2to3 as build_py
+    from setuptools.core import setup
 except ImportError:
-    from distutils.command.build_py import build_py
-from setupext import build_agg, build_gtkagg, build_tkagg,\
-     build_macosx, build_ft2font, build_image, build_windowing, build_path, \
-     build_contour, build_delaunay, build_gdk, \
-     build_ttconv, print_line, print_status, print_message, \
-     print_raw, check_for_freetype, check_for_libpng, check_for_gtk, \
-     check_for_tk, check_for_macosx, check_for_numpy, \
-     check_for_qt, check_for_qt4, check_for_pyside, check_for_cairo, \
-     check_provide_pytz, check_provide_dateutil,\
-     check_for_dvipng, check_for_ghostscript, check_for_latex, \
-     check_for_pdftops, options, build_png, build_tri, check_provide_six, \
-     check_for_tornado
-
-
-packages = [
-    'matplotlib',
-    'matplotlib.backends',
-    'matplotlib.backends.qt4_editor',
-    'matplotlib.projections',
-    'matplotlib.testing',
-    'matplotlib.testing.jpl_units',
-    'matplotlib.tests',
-    'mpl_toolkits',
-    'mpl_toolkits.mplot3d',
-    'mpl_toolkits.axes_grid',
-    'mpl_toolkits.axes_grid1',
-    'mpl_toolkits.axisartist',
-    'matplotlib.sphinxext',
-    'matplotlib.tri',
-
+    from distutils.core import setup
+
+import setupext
+from setupext import print_line, print_raw, print_message, print_status
+
+# Get the version from the source code
+__version__ = setupext.Matplotlib().check()
+
+
+# These are the packages in the order we want to display them.  This
+# list may contain strings to create section headers for the display.
+mpl_packages = [
+    'Building Matplotlib',
+    setupext.Matplotlib(),
+    setupext.Python(),
+    setupext.Platform(),
+    'Required dependencies and extensions',
+    setupext.Numpy(),
+    setupext.Dateutil(),
+    setupext.Tornado(),
+    setupext.Pyparsing(),
+    setupext.CXX(),
+    setupext.LibAgg(),
+    setupext.FreeType(),
+    setupext.FT2Font(),
+    setupext.Png(),
+    setupext.Image(),
+    setupext.TTConv(),
+    setupext.Path(),
+    setupext.Contour(),
+    setupext.Delaunay(),
+    setupext.Tri(),
+    'Optional subpackages',
+    setupext.SampleData(),
+    setupext.Toolkits(),
+    setupext.Tests(),
+    'Optional backend extensions',
+    # These backends are listed in order of preference, the first
+    # being the most preferred.  The first one that looks like it will
+    # work will be selected as the default backend.
+    setupext.BackendMacOSX(),
+    setupext.BackendQt4(),
+    setupext.BackendGtk3Agg(),
+    setupext.BackendGtk3Cairo(),
+    setupext.BackendGtkAgg(),
+    setupext.BackendTkAgg(),
+    setupext.BackendWxAgg(),
+    setupext.BackendGtk(),
+    setupext.BackendQt(),
+    setupext.BackendAgg(),
+    setupext.BackendCairo(),
+    'Optional LaTeX dependencies',
+    setupext.DviPng(),
+    setupext.Ghostscript(),
+    setupext.LaTeX(),
+    setupext.PdfToPs()
     ]
 
-py_modules = ['pylab']
-
-ext_modules = []
 
 classifiers = [
     'Development Status :: 5 - Production/Stable',
@@ -84,260 +100,132 @@
     'Topic :: Scientific/Engineering :: Visualization',
     ]
 
-for line in open('lib/matplotlib/__init__.py').readlines():
-    if (line.startswith('__version__')):
-        exec(line.strip())
-
-print_line()
-print_raw("BUILDING MATPLOTLIB")
-print_status('matplotlib', __version__)
-print_status('python', sys.version)
-print_status('platform', sys.platform)
-if sys.platform == 'win32':
-    print_status('Windows version', sys.getwindowsversion())
-print_raw("")
-print_raw("REQUIRED DEPENDENCIES")
-
-# Specify all the required mpl data
-package_data = {'matplotlib':['mpl-data/fonts/afm/*.afm',
-                              'mpl-data/fonts/pdfcorefonts/*.afm',
-                              'mpl-data/fonts/pdfcorefonts/*.txt',
-                              'mpl-data/fonts/ttf/*.ttf',
-                              'mpl-data/fonts/ttf/LICENSE_STIX',
-                              'mpl-data/fonts/ttf/COPYRIGHT.TXT',
-                              'mpl-data/fonts/ttf/README.TXT',
-                              'mpl-data/fonts/ttf/RELEASENOTES.TXT',
-                              'mpl-data/images/*.xpm',
-                              'mpl-data/images/*.svg',
-                              'mpl-data/images/*.gif',
-                              'mpl-data/images/*.png',
-                              'mpl-data/images/*.ppm',
-                              'mpl-data/example/*.npy',
-                              'mpl-data/matplotlibrc',
-                              'mpl-data/*.glade',
-                              'mpl-data/sample_data/*.*',
-                              'mpl-data/sample_data/axes_grid/*.*',
-                              'backends/Matplotlib.nib/*',
-                              'backends/web_backend/*.*',
-                              'backends/web_backend/jquery/js/*',
-                              'backends/web_backend/jquery/css/themes/base/*.*',
-                              'backends/web_backend/jquery/css/themes/base/images/*',
-                              'backends/web_backend/css/*.*',
-
-                              ]}
-
-package_dir = {'': 'lib'}
-
-if 1:
-    # TODO: exclude these when making release?
-    baseline_images = glob.glob(os.path.join('lib','matplotlib','tests',
-                                             'baseline_images','*','*'))
-    def chop_package(fname):
-        badstr = os.path.join('lib','matplotlib','')
-        assert fname.startswith(badstr)
-        result = fname[ len(badstr): ]
-        return result
-    baseline_images = [chop_package(f) for f in baseline_images]
-    package_data['matplotlib'].extend(baseline_images)
-    package_data['matplotlib'].append('tests/mpltest.ttf')
-    package_data['matplotlib'].append('tests/test_rcparams.rc')
-
-if not check_for_numpy(__version__numpy__):
-    sys.exit(1)
-
-if not check_for_freetype():
-    sys.exit(1)
-
-build_ft2font(ext_modules, packages)
-build_ttconv(ext_modules, packages)
-build_contour(ext_modules, packages)
-build_delaunay(ext_modules, packages)
-build_path(ext_modules, packages)
-build_tri(ext_modules, packages)
-
-print_raw("")
-print_raw("OPTIONAL BACKEND DEPENDENCIES")
-has_libpng = check_for_libpng()
-
-if has_libpng and options['build_agg']:
-    build_agg(ext_modules, packages)
-    rc['backend'] = 'Agg'
-else:
-    rc['backend'] = 'SVG'
-
-if has_libpng and options['build_image']:
-    build_image(ext_modules, packages)
-
-if has_libpng and options['build_agg'] or options['build_image']:
-    build_png(ext_modules, packages)
-
-if options['build_windowing'] and sys.platform=='win32':
-   build_windowing(ext_modules, packages)
-
-# the options can be True, False, or 'auto'. If True, try to build
-# regardless of the lack of dependencies. If auto, silently skip
-# when dependencies are missing.
-if options['build_tkagg']:
-    if check_for_tk() or (options['build_tkagg'] is True):
-        options['build_agg'] = 1
-        build_tkagg(ext_modules, packages)
-        rc['backend'] = 'TkAgg'
-
-hasgtk = check_for_gtk()
-if options['build_gtk']:
-    if hasgtk or (options['build_gtk'] is True):
-        build_gdk(ext_modules, packages)
-if options['build_gtkagg']:
-    if hasgtk or (options['build_gtkagg'] is True):
-        options['build_agg'] = 1
-        build_gtkagg(ext_modules, packages)
-        rc['backend'] = 'GTKAgg'
-
-if options['build_macosx']:
-    if check_for_macosx() or (options['build_macosx'] is True):
-        build_macosx(ext_modules, packages)
-        rc['backend'] = 'MacOSX'
-
-# These are informational only.  We don't build any extensions for them.
-check_for_qt()
-check_for_qt4()
-check_for_pyside()
-check_for_cairo()
-check_for_tornado()
-
-print_raw("")
-print_raw("OPTIONAL DATE/TIMEZONE DEPENDENCIES")
-
-provide_dateutil = check_provide_dateutil()
-provide_pytz = check_provide_pytz()
-provide_six = check_provide_six()
-
-def add_pytz():
-    packages.append('pytz')
-
-    resources = ['zone.tab', 'locales/pytz.pot']
-    for dirpath, dirnames, filenames in os.walk(
-        os.path.join('lib', 'pytz', 'zoneinfo')
-        ):
-
-        # remove the 'pytz' part of the path
-        basepath = os.path.join(*dirpath.split(os.path.sep)[2:])
-        #print dirpath, basepath
-        resources.extend([os.path.join(basepath, filename)
-                          for filename in filenames])
-    package_data['pytz'] = resources
-    #print resources
-    assert len(resources) > 10, 'zoneinfo files not found!'
-
-def add_dateutil():
-    packages.append('dateutil')
-    packages.append('dateutil.zoneinfo')
-    package_data['dateutil'] = ['zoneinfo/*.tar.gz']
-    if sys.version_info[0] >= 3:
-        package_dir['dateutil'] = 'lib/dateutil_py3'
-    else:
-        package_dir['dateutil'] = 'lib/dateutil_py2'
-
-def add_six():
-    py_modules.append('six')
-
-if sys.platform=='win32':
-    # always add these to the win32 installer
-    add_pytz()
-    add_dateutil()
-    add_six()
-else:
-    # only add them if we need them
-    if provide_pytz:
-        add_pytz()
-    if provide_dateutil:
-        add_dateutil()
-    if provide_six:
-        add_six()
-
-print_raw("")
-print_raw("OPTIONAL USETEX DEPENDENCIES")
-check_for_dvipng()
-check_for_ghostscript()
-check_for_latex()
-check_for_pdftops()
-
-print_raw("")
-print_raw("[Edit setup.cfg to suppress the above messages]")
-print_line()
-
-# Write the default matplotlibrc file
-if options['backend']: rc['backend'] = options['backend']
-template = open('matplotlibrc.template').read()
-open('lib/matplotlib/mpl-data/matplotlibrc', 'w').write(template%rc)
-
-try: additional_params # has setupegg.py provided
-except NameError: additional_params = {}
-
-for mod in ext_modules:
-    if options['verbose']:
-        mod.extra_compile_args.append('-DVERBOSE')
-
-if sys.version_info[0] >= 3:
-    def should_2to3(file, root):
-        file = os.path.abspath(file)[len(os.path.abspath(root))+1:]
-        if ('py3' in file or
-            file.startswith('pytz') or
-            file.startswith('dateutil') or
-            file.startswith('six')):
-            return False
-        return True
-
-    import multiprocessing
-    def refactor(x):
-        from lib2to3.refactor import RefactoringTool, get_fixers_from_package
-        class DistutilsRefactoringTool(RefactoringTool):
-            def ignore(self, msg, *args, **kw):
-                pass
-            log_error = log_message = log_debug = ignore
-        fixer_names = get_fixers_from_package('lib2to3.fixes')
-        r = DistutilsRefactoringTool(fixer_names, options=None)
-        r.refactor([x], write=True)
-
-    original_build_py = build_py
-    class build_py(original_build_py):
-        def run_2to3(self, files):
-            # We need to skip certain files that have already been
-            # converted to Python 3.x
-            filtered = [x for x in files if should_2to3(x, self.build_lib)]
-            if sys.platform.startswith('win') or 'TRAVIS' in os.environ:
-                # doing this in parallel on windows may crash your computer
-                [refactor(f) for f in filtered]
+# One doesn't normally see `if __name__ == '__main__'` blocks in a setup.py,
+# however, this is needed on Windows to avoid creating infinite subprocesses
+# when using multiprocessing.
+if __name__ == '__main__':
+    # These are distutils.setup parameters that the various packages add
+    # things to.
+    packages = []
+    py_modules = []
+    ext_modules = []
+    package_data = {}
+    package_dir = {'': 'lib'}
+    install_requires = []
+    default_backend = None
+
+
+    # Go through all of the packages and figure out which ones we are
+    # going to build/install.
+    print_line()
+    print_raw("Edit setup.cfg to change the build options")
+
+    required_failed = []
+    good_packages = []
+    for package in mpl_packages:
+        if isinstance(package, str):
+            print_raw('')
+            print_raw(package.upper())
+        else:
+            try:
+                result = package.check()
+                if result is not None:
+                    message = 'yes [%s]' % result
+                    print_status(package.name, message)
+            except setupext.CheckFailed as e:
+                print_status(package.name, 'no  [%s]' % str(e))
+                if not package.optional:
+                    required_failed.append(package)
             else:
-                p = multiprocessing.Pool()
-                for i, x in enumerate(p.imap_unordered(refactor, filtered)):
-                    print("Running 2to3... %.02f%%" %
-                          (float(i) / len(filtered) * 100.0), end='\r')
-            print()
-
-print_raw("pymods %s" % py_modules)
-print_raw("packages %s" % packages)
-distrib = setup(name="matplotlib",
-      version= __version__,
-      description = "Python plotting package",
-      author = "John D. Hunter, Michael Droettboom",
-      author_email="mdroe@stsci.edu",
-      url = "http://matplotlib.org",
-      long_description = """
-      matplotlib strives to produce publication quality 2D graphics
-      for interactive graphing, scientific publishing, user interface
-      development and web application servers targeting multiple user
-      interfaces and hardcopy output formats.  There is a 'pylab' mode
-      which emulates matlab graphics
-      """,
-      download_url="https://downloads.sourceforge.net/project/matplotlib/matplotlib/matplotlib-{0}/matplotlib-{0}.tar.gz".format(__version__),
-      packages = packages,
-      platforms='any',
-      py_modules = py_modules,
-      ext_modules = ext_modules,
-      package_dir = package_dir,
-      package_data = package_data,
-      classifiers = classifiers,
-      cmdclass = {'build_py': build_py},
-      **additional_params
-      )
+                good_packages.append(package)
+                if isinstance(package, setupext.OptionalBackendPackage):
+                    if default_backend is None:
+                        default_backend = package.name
+    print_raw('')
+
+
+    # Abort if any of the required packages can not be built.
+    if required_failed:
+        print_line()
+        print_message(
+            "The following required packages can not "
+            "be built: %s" %
+            ', '.join(x.name for x in required_failed))
+        sys.exit(1)
+
+
+    # Now collect all of the information we need to build all of the
+    # packages.
+    for package in good_packages:
+        if isinstance(package, str):
+            continue
+        packages.extend(package.get_packages())
+        py_modules.extend(package.get_py_modules())
+        ext = package.get_extension()
+        if ext is not None:
+            ext_modules.append(ext)
+        data = package.get_package_data()
+        for key, val in data.items():
+            package_data.setdefault(key, [])
+            package_data[key] = list(set(val + package_data[key]))
+        install_requires.extend(package.get_install_requires())
+
+    # Write the default matplotlibrc file
+    if default_backend is None:
+        default_backend = 'svg'
+    if setupext.options['backend']:
+        default_backend = setupext.options['backend']
+    with open('matplotlibrc.template') as fd:
+        template = fd.read()
+    with open('lib/matplotlib/mpl-data/matplotlibrc', 'w') as fd:
+        fd.write(template % {'backend': default_backend})
+
+
+    # Build in verbose mode if requested
+    if setupext.options['verbose']:
+        for mod in ext_modules:
+            mod.extra_compile_args.append('-DVERBOSE')
+
+
+    # Finally, pass this all along to distutils to do the heavy lifting.
+    distrib = setup(name="matplotlib",
+          version=__version__,
+          description="Python plotting package",
+          author="John D. Hunter, Michael Droettboom",
+          author_email="mdroe@stsci.edu",
+          url="http://matplotlib.org",
+          long_description="""
+          matplotlib strives to produce publication quality 2D graphics
+          for interactive graphing, scientific publishing, user interface
+          development and web application servers targeting multiple user
+          interfaces and hardcopy output formats.  There is a 'pylab' mode
+          which emulates matlab graphics.
+          """,
+          license="BSD",
+          packages=packages,
+          platforms='any',
+          py_modules=py_modules,
+          ext_modules=ext_modules,
+          package_dir=package_dir,
+          package_data=package_data,
+          classifiers=classifiers,
+          download_url="https://downloads.sourceforge.net/project/matplotlib/matplotlib/matplotlib-{0}/matplotlib-{0}.tar.gz".format(__version__),
+
+          # List third-party Python packages that we require
+          install_requires=install_requires,
+
+          # Automatically 2to3 source on Python 3.x
+          use_2to3=True,
+
+          # matplotlib has C/C++ extensions, so it's not zip safe.
+          # Telling setuptools this prevents it from doing an automatic
+          # check for zip safety.
+          zip_safe=False,
+
+          # Install our nose plugin so it will always be found
+          entry_points={
+              'nose.plugins.0.10': [
+                  'KnownFailure = matplotlib.testing.noseclasses:KnownFailure'
+                ]
+            },
+         )
diff --git a/setupegg.py b/setupegg.py
deleted file mode 100644
index 7552ed3787b6..000000000000
--- a/setupegg.py
+++ /dev/null
@@ -1,22 +0,0 @@
-"""
-Poor man's setuptools script...
-"""
-
-import os
-import sys
-from setuptools import setup
-
-# Setupegg assumes the install tree and source tree are exactly the same. Since
-# this is not the case, symlink the correct dateutil dir depending on which
-# version of python is used
-if not os.path.isdir('lib/dateutil'):
-    if sys.version_info[0] >= 3:
-        os.symlink('dateutil_py3', 'lib/dateutil')
-    else:
-        os.symlink('dateutil_py2', 'lib/dateutil')
-
-execfile('setup.py',
-         {'additional_params' :
-          {'namespace_packages' : ['mpl_toolkits'],
-           #'entry_points': {'nose.plugins':  ['KnownFailure =  matplotlib.testing.noseclasses:KnownFailure', ] }
-           }})
diff --git a/setupext.py b/setupext.py
index 4a3ec79e9c0d..89bf0cdd998c 100644
--- a/setupext.py
+++ b/setupext.py
@@ -1,184 +1,178 @@
-"""
-Some helper functions for building the C extensions
-
-You may need to use the "basedirlist" option in setup.cfg to point
-to the location of your required libs, eg, png, z, freetype,
-overriding the settings hard-coded in the "basedir" directory
-below.
-
-DARWIN
-
-  I have installed all of the backends on OSX.
-
-  Tk: If you want to install TkAgg, I recommend the "batteries included"
-  binary build of Tcl/Tk at
-  http://www.apple.com/downloads/macosx/unix_open_source/tcltkaqua.html
-
-  GTK: I installed GTK from src as described at
-  http://www.macgimp.org/index.php?topic=gtk.  There are several
-  packages, but all configure/make/make install w/o problem.  In
-  addition to the packages listed there, You will also need libpng,
-  libjpeg, and libtiff if you want output to these formats from GTK.
-
-WIN32 - MINGW
-
-  If you are sufficiently masochistic that you want to build this
-  yourself, download the win32_static dir from
-  http://matplotlib.sourceforge.net/win32_static.tar.gz and
-  see the README file in that dir
-
-  > python setup.py build --compiler=mingw32 bdist_wininst  > build23.out
-
-  NOTE, if you are building on python24 on win32, see
-  http://mail.python.org/pipermail/python-list/2004-December/254826.html
-
-WIN32 - VISUAL STUDIO 7.1 (2003)
-
-  This build is similar to the mingw.  Download the visual studio static
-  dependencies from
-  http://matplotlib.sourceforge.net/win32_static_vs.tar.gz and
-  see the README in that dir
-
-  > python setup.py build --compiler=msvc bdist_wininst
-
-"""
+from __future__ import print_function, absolute_import
 
+from distutils import sysconfig
+from distutils import version
+from distutils.core import Extension
+import glob
+import io
+import multiprocessing
 import os
 import re
 import subprocess
-from distutils import sysconfig, version
-from collections import defaultdict
-
-# basedir is a dictionary keyed by sys.platform, and on most platforms it is
-# set to ['/usr/local', '/usr']. Giving this defaultdict a factory that
-# returns this default removes the need to update this code every time a new
-# version of freebsd comes out, for example, provided that the default basedir
-# remains sufficient on that platform
-basedir = defaultdict(lambda: ['/usr/local', '/usr'], {
-    # execptions to the ['/usr/local', '/usr'] defaults
-    'win32'  : ['win32_static',],
-    'darwin' : ['/usr/local/', '/usr', '/usr/X11', '/opt/local'],
-    'sunos5' : [os.getenv('MPLIB_BASE') or '/usr/local',],
-    'gnu0' : ['/usr'],
-    'aix5' : ['/usr/local'],
-    })
-
 import sys
-
 from textwrap import fill
-from distutils.core import Extension
-import glob
+
+
+try:
+    from subprocess import check_output
+except ImportError:
+    # check_output is not available in Python 2.6
+    def check_output(*popenargs, **kwargs):
+        """
+        Run command with arguments and return its output as a byte
+        string.
+
+        Backported from Python 2.7 as it's implemented as pure python
+        on stdlib.
+        """
+        process = subprocess.Popen(
+            stdout=subprocess.PIPE, *popenargs, **kwargs)
+        output, unused_err = process.communicate()
+        retcode = process.poll()
+        if retcode:
+            cmd = kwargs.get("args")
+            if cmd is None:
+                cmd = popenargs[0]
+            error = subprocess.CalledProcessError(retcode, cmd)
+            error.output = output
+            raise error
+        return output
+
+
+if sys.platform != 'win32':
+    if sys.version_info[0] < 3:
+        from commands import getstatusoutput
+    else:
+        from subprocess import getstatusoutput
+
 
 if sys.version_info[0] < 3:
     import ConfigParser as configparser
-    from cStringIO import StringIO
-    if sys.platform != 'win32':
-        from commands import getstatusoutput
 else:
     import configparser
-    from io import StringIO
-    if sys.platform != 'win32':
-        from subprocess import getstatusoutput
 
-BUILT_PNG       = False
-BUILT_AGG       = False
-BUILT_FT2FONT   = False
-BUILT_TTCONV    = False
-BUILT_GTKAGG    = False
-BUILT_IMAGE     = False
-BUILT_MACOSX    = False
-BUILT_TKAGG     = False
-BUILT_WINDOWING = False
-BUILT_CONTOUR   = False
-BUILT_DELAUNAY  = False
-BUILT_CONTOUR   = False
-BUILT_GDK       = False
-BUILT_PATH      = False
-BUILT_TRI       = False
-
-AGG_VERSION = 'agg24'
-TCL_TK_CACHE = None
-
-# for nonstandard installation/build with --prefix variable
-numpy_inc_dirs = []
 
 # matplotlib build options, which can be altered using setup.cfg
-options = {'display_status': True,
-           'verbose': False,
-           'provide_pytz': 'auto',
-           'provide_dateutil': 'auto',
-           'provide_six': 'auto',
-           'build_agg': True,
-           'build_gtk': 'auto',
-           'build_gtkagg': 'auto',
-           'build_tkagg': 'auto',
-           'build_macosx': 'auto',
-           'build_image': True,
-           'build_windowing': True,
-           'backend': None,
-           'basedirlist': None}
-
-defines = [
-        ('PY_ARRAY_UNIQUE_SYMBOL', 'MPL_ARRAY_API'),
-        ('PYCXX_ISO_CPP_LIB', '1')]
-
-if sys.version_info[0] >= 3:
-    defines.append(('PYCXX_PYTHON_2TO3', '1'))
+options = {
+    'display_status': True,
+    'verbose': False,
+    'backend': None,
+    'basedirlist': None
+    }
+
 
 setup_cfg = os.environ.get('MPLSETUPCFG', 'setup.cfg')
-# Based on the contents of setup.cfg, determine the build options
 if os.path.exists(setup_cfg):
     config = configparser.SafeConfigParser()
     config.read(setup_cfg)
 
-    try: options['display_status'] = not config.getboolean("status", "suppress")
-    except: pass
+    try:
+        options['display_status'] = not config.getboolean("status", "suppress")
+    except:
+        pass
 
-    try: options['verbose'] = not config.getboolean("status", "verbose")
-    except: pass
+    try:
+        options['backend'] = config.get("rc_options", "backend")
+    except:
+        pass
 
-    try: options['provide_pytz'] = config.getboolean("provide_packages", "pytz")
-    except: options['provide_pytz'] = 'auto'
+    try:
+        options['basedirlist'] = config.get("directories", "basedirlist")
+    except:
+        pass
+else:
+    config = None
 
-    try: options['provide_dateutil'] = config.getboolean("provide_packages",
-                                                         "dateutil")
-    except: options['provide_dateutil'] = 'auto'
 
-    try: options['provide_six'] = config.getboolean("provide_packages",
-                                                    "six")
-    except: options['provide_six'] = 'auto'
+def get_win32_compiler():
+    """
+    Determine the compiler being used on win32.
+    """
+    # Used to determine mingw32 or msvc
+    # This is pretty bad logic, someone know a better way?
+    for v in sys.argv:
+        if 'mingw32' in v:
+            return 'mingw32'
+    return 'msvc'
+win32_compiler = get_win32_compiler()
 
-    try: options['build_gtk'] = config.getboolean("gui_support", "gtk")
-    except: options['build_gtk'] = 'auto'
 
-    try: options['build_gtkagg'] = config.getboolean("gui_support", "gtkagg")
-    except: options['build_gtkagg'] = 'auto'
+def extract_versions():
+    """
+    Extracts version values from the main matplotlib __init__.py and
+    returns them as a dictionary.
+    """
+    with open('lib/matplotlib/__init__.py') as fd:
+        for line in fd.readlines():
+            if (line.startswith('__version__')):
+                exec(line.strip())
+    return locals()
 
-    try: options['build_tkagg'] = config.getboolean("gui_support", "tkagg")
-    except: options['build_tkagg'] = 'auto'
 
-    try: options['build_macosx'] = config.getboolean("gui_support", "macosx")
-    except: options['build_macosx'] = 'auto'
+def has_include_file(include_dirs, filename):
+    """
+    Returns `True` if `filename` can be found in one of the
+    directories in `include_dirs`.
+    """
+    for dir in include_dirs:
+        if os.path.exists(os.path.join(dir, filename)):
+            return True
+    return False
 
-    try: options['backend'] = config.get("rc_options", "backend")
-    except: pass
 
-    try: options['basedirlist'] = config.get("directories", "basedirlist")
-    except: pass
+def check_include_file(include_dirs, filename, package):
+    """
+    Raises an exception if the given include file can not be found.
+    """
+    if sys.platform == 'win32':
+        include_dirs.extend(os.getenv('INCLUDE', '.').split(';'))
+    if not has_include_file(include_dirs, filename):
+        raise CheckFailed(
+            "The C/C++ header for %s (%s) could not be found.  You "
+            "may need to install the development package." %
+            (package, filename))
 
-# For get_base_flags:
-if options['basedirlist']:
-    basedirlist = options['basedirlist'].split()
-else:
-    basedirlist = basedir[sys.platform]
-print("basedirlist is: %s" % basedirlist)
 
-def make_extension(*args, **kwargs):
-    ext = Extension(*args, **kwargs)
-    for dir in basedirlist:
-        ext.include_dirs.append(os.path.join(dir, 'include'))
-    return ext
+def get_base_dirs():
+    """
+    Returns a list of standard base directories on this platform.
+    """
+    if options['basedirlist']:
+        return options['basedirlist']
 
+    basedir_map = {
+        'win32': ['win32_static',],
+        'darwin': ['/usr/local/', '/usr', '/usr/X11', '/opt/local'],
+        'sunos5': [os.getenv('MPLIB_BASE') or '/usr/local',],
+        'gnu0': ['/usr'],
+        'aix5': ['/usr/local'],
+        }
+    return basedir_map.get(sys.platform, ['/usr/local', '/usr'])
+
+
+def run_child_process(cmd):
+    """
+    Run a subprocess as a sanity check.
+    """
+    p = subprocess.Popen(cmd, shell=True,
+                         stdin=subprocess.PIPE,
+                         stdout=subprocess.PIPE,
+                         stderr=subprocess.STDOUT,
+                         close_fds=(sys.platform != 'win32'))
+    return p.stdin, p.stdout
+
+
+def is_min_version(found, minversion):
+    """
+    Returns `True` if `found` is at least as high a version as
+    `minversion`.
+    """
+    expected_version = version.LooseVersion(minversion)
+    found_version = version.LooseVersion(found)
+    return found_version >= expected_version
+
+
+# Define the display functions only if display_status is True.
 if options['display_status']:
     def print_line(char='='):
         print(char * 76)
@@ -203,6 +197,7 @@ def print_line(*args, **kwargs):
         pass
     print_status = print_message = print_raw = print_line
 
+
 # Remove the -Wstrict-prototypesoption, is it's not valid for C++
 customize_compiler = sysconfig.customize_compiler
 def my_customize_compiler(compiler):
@@ -216,1181 +211,1491 @@ def my_customize_compiler(compiler):
 sysconfig.customize_compiler = my_customize_compiler
 
 
+def make_extension(name, files, *args, **kwargs):
+    """
+    Make a new extension.  Automatically sets include_dirs and
+    library_dirs to the base directories appropriate for this
+    platform.
 
-def run_child_process(cmd):
-    p = subprocess.Popen(cmd, shell=True,
-                         stdin=subprocess.PIPE,
-                         stdout=subprocess.PIPE,
-                         stderr=subprocess.STDOUT,
-                         close_fds=(sys.platform != 'win32'))
-    return p.stdin, p.stdout
+    `name` is the name of the extension.
 
-class CleanUpFile:
-    """CleanUpFile deletes the specified filename when self is destroyed."""
-    def __init__(self, name):
-        self.name = name
-    def __del__(self):
-        os.remove(self.name)
+    `files` is a list of source files.
 
-def temp_copy(_from, _to):
-    """temp_copy copies a named file into a named temporary file.
-    The temporary will be deleted when the setupext module is destructed.
+    Any additional arguments are passed to the
+    `distutils.core.Extension` constructor.
     """
-    # Copy the file data from _from to _to
-    s = open(_from).read()
-    open(_to,"w+").write(s)
-    # Suppress object rebuild by preserving time stamps.
-    stats = os.stat(_from)
-    os.utime(_to, (stats.st_atime, stats.st_mtime))
-    # Make an object to eliminate the temporary file at exit time.
-    globals()["_cleanup_"+_to] = CleanUpFile(_to)
+    ext = Extension(name, files, *args, **kwargs)
+    for dir in get_base_dirs():
+        include_dir = os.path.join(dir, 'include')
+        if os.path.exists(include_dir):
+            ext.include_dirs.append(include_dir)
+        for lib in ('lib', 'lib64'):
+            lib_dir = os.path.join(dir, lib)
+            if os.path.exists(lib_dir):
+                ext.library_dirs.append(lib_dir)
+    ext.include_dirs.append('.')
 
-def get_win32_compiler():
-    # Used to determine mingw32 or msvc
-    # This is pretty bad logic, someone know a better way?
-    for v in sys.argv:
-        if 'mingw32' in v:
-            return 'mingw32'
-    return 'msvc'
-win32_compiler = get_win32_compiler()
-if sys.platform == 'win32' and win32_compiler == 'msvc':
-    std_libs = []
-else:
-    std_libs = ['stdc++', 'm']
+    return ext
 
-def set_pkgconfig_path():
-    pkgconfig_path = sysconfig.get_config_var('LIBDIR')
-    if pkgconfig_path is None:
-        return
 
-    pkgconfig_path = os.path.join(pkgconfig_path, 'pkgconfig')
-    if not os.path.isdir(pkgconfig_path):
-        return
+class PkgConfig(object):
+    """
+    This is a class for communicating with pkg-config.
+    """
+    def __init__(self):
+        """
+        Determines whether pkg-config exists on this machine.
+        """
+        if sys.platform == 'win32':
+            self.has_pkgconfig = False
+        else:
+            self.set_pkgconfig_path()
+            status, output = getstatusoutput("pkg-config --help")
+            self.has_pkgconfig = (status == 0)
 
-    try:
-        os.environ['PKG_CONFIG_PATH'] += ':' + pkgconfig_path
-    except KeyError:
-        os.environ['PKG_CONFIG_PATH'] = pkgconfig_path
+    def set_pkgconfig_path(self):
+        pkgconfig_path = sysconfig.get_config_var('LIBDIR')
+        if pkgconfig_path is None:
+            return
 
-def has_pkgconfig():
-    if has_pkgconfig.cache is not None:
-        return has_pkgconfig.cache
-    if sys.platform == 'win32':
-        has_pkgconfig.cache = False
-    else:
-        #print 'environ',  os.environ['PKG_CONFIG_PATH']
-        status, output = getstatusoutput("pkg-config --help")
-        has_pkgconfig.cache = (status == 0)
-
-        # Set the PKG_CONFIG_PATH environment variable
-        if has_pkgconfig.cache:
-            set_pkgconfig_path()
-    return has_pkgconfig.cache
-has_pkgconfig.cache = None
-
-def get_pkgconfig(module,
-                  packages,
-                  flags="--libs --cflags",
-                  pkg_config_exec='pkg-config',
-                  report_error=False):
-    """Loosely based on an article in the Python Cookbook:
-    http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/502261"""
-    if not has_pkgconfig():
-        return False
+        pkgconfig_path = os.path.join(pkgconfig_path, 'pkgconfig')
+        if not os.path.isdir(pkgconfig_path):
+            return
 
-    _flags = {'-I': 'include_dirs',
-              '-L': 'library_dirs',
-              '-l': 'libraries',
-              '-D': 'define_macros',
-              '-U': 'undef_macros'}
-
-    cmd = "%s %s %s" % (pkg_config_exec, flags, packages)
-    status, output = getstatusoutput(cmd)
-    if status == 0:
-        for token in output.split():
-            attr = _flags.get(token[:2], None)
-            if attr is not None:
-                if token[:2] == '-D':
-                    value = tuple(token[2:].split('='))
-                    if len(value) == 1:
-                        value = (value[0], None)
-                else:
-                    value = token[2:]
-                set = getattr(module, attr)
-                if value not in set:
-                    set.append(value)
+        try:
+            os.environ['PKG_CONFIG_PATH'] += ':' + pkgconfig_path
+        except KeyError:
+            os.environ['PKG_CONFIG_PATH'] = pkgconfig_path
+
+    def setup_extension(self, ext, package, default_include_dirs=[],
+                        default_library_dirs=[], default_libraries=[]):
+        """
+        Add parameters to the given `ext` for the given `package`.
+        """
+        flag_map = {
+            '-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
+        command = "pkg-config --libs --cflags " + package
+
+        use_defaults = True
+        if self.has_pkgconfig:
+            try:
+                output = check_output(command, shell=True)
+            except subprocess.CalledProcessError:
+                pass
             else:
-                if token not in module.extra_link_args:
-                    module.extra_link_args.append(token)
-        return True
-    if report_error:
-        print_status("pkg-config", "looking for %s" % packages)
-        print_message(output)
-    return False
+                output = output.decode(sys.getfilesystemencoding())
+                use_defaults = False
+                for token in output.split():
+                    attr = flag_map.get(token[:2])
+                    if attr is not None:
+                        getattr(ext, attr).append(token[2:])
+
+        if use_defaults:
+            basedirs = get_base_dirs()
+            for base in basedirs:
+                for include in default_include_dirs:
+                    dir = os.path.join(base, include)
+                    if os.path.exists(dir):
+                        ext.include_dirs.append(dir)
+                for lib in default_library_dirs:
+                    dir = os.path.join(base, lib)
+                    if os.path.exists(dir):
+                        ext.library_dirs.append(dir)
+            ext.libraries = default_libraries
+            return True
 
-def get_pkgconfig_version(package):
-    default = "found, but unknown version (no pkg-config)"
-    if not has_pkgconfig():
-        return default
+        return False
 
-    status, output = getstatusoutput(
-        "pkg-config %s --modversion" % (package))
-    if status == 0:
-        return output
-    return default
+    def get_version(self, package):
+        """
+        Get the version of the package from pkg-config.
+        """
+        if not self.has_pkgconfig:
+            return None
+
+        status, output = getstatusoutput(
+            "pkg-config %s --modversion" % (package))
+        if status == 0:
+            return output
+        return None
 
-def try_pkgconfig(module, package, fallback):
-    if not get_pkgconfig(module, package):
-        module.libraries.append(fallback)
+# The PkgConfig class should be used through this singleton
+pkg_config = PkgConfig()
 
-def find_include_file(include_dirs, filename):
-    for d in include_dirs:
-        if os.path.exists(os.path.join(d, filename)):
-            return True
-    return False
 
-def check_for_freetype():
-    module = make_extension('test', [])
-    add_base_flags(module)
-    if not get_pkgconfig(module, 'freetype2'):
-        basedirs = module.include_dirs[:]  # copy the list to avoid inf loop!
-        for d in basedirs:
-            module.include_dirs.append(os.path.join(d, 'freetype2'))
-
-    print_status("freetype2", get_pkgconfig_version('freetype2'))
-    if not find_include_file(module.include_dirs, 'ft2build.h'):
-        print_message(
-            "WARNING: Could not find 'freetype2' headers in any of %s." %
-            ", ".join(["'%s'" % x for x in module.include_dirs]))
-
-    return True
-
-def check_for_libpng():
-    module = make_extension("test", [])
-    get_pkgconfig(module, 'libpng')
-    add_base_flags(module)
-
-    print_status("libpng", get_pkgconfig_version('libpng'))
-    if not find_include_file(module.include_dirs, 'png.h'):
-        print_message(
-            "Could not find 'libpng' headers in any of %s" %
-            ", ".join(["'%s'" % x for x in module.include_dirs]))
-
-    return True
-
-def add_base_flags(module):
-    incdirs = filter(os.path.exists,
-                     [os.path.join(p, 'include') for p in basedirlist ])
-    libdirs = filter(os.path.exists,
-                     [os.path.join(p, 'lib')     for p in basedirlist ]+
-                     [os.path.join(p, 'lib64')     for p in basedirlist ] )
-
-    module.include_dirs.extend(incdirs)
-    module.include_dirs.append('.')
-    module.library_dirs.extend(libdirs)
-
-def getoutput(s):
-    'get the output of a system command'
-
-    ret =  os.popen(s).read().strip()
-    return ret
-
-def convert_qt_version(version):
-    version = '%x'%version
-    temp = []
-    while len(version) > 0:
-        version, chunk = version[:-2], version[-2:]
-        temp.insert(0, str(int(chunk, 16)))
-    return '.'.join(temp)
-
-def check_for_qt():
-    try:
-        import pyqtconfig
-    except ImportError:
-        print_status("Qt", "no")
-        return False
-    else:
+class CheckFailed(Exception):
+    """
+    Exception thrown when a `SetupPackage.check` method fails.
+    """
+    pass
+
+
+class SetupPackage(object):
+    optional = False
+
+    def check(self):
+        """
+        Checks whether the dependencies are met.  Should raise a
+        `CheckFailed` exception if the dependency could not be met,
+        otherwise return a string indicating a version number or some
+        other message indicating what was found.
+        """
+        pass
+
+    def get_packages(self):
+        """
+        Get a list of package names to add to the configuration.
+        These are added to the `packages` list passed to
+        `distutils.setup`.
+        """
+        return []
+
+    def get_py_modules(self):
+        """
+        Get a list of top-level modules to add to the configuration.
+        These are added to the `py_modules` list passed to
+        `distutils.setup`.
+        """
+        return []
+
+    def get_package_data(self):
+        """
+        Get a package data dictionary to add to the configuration.
+        These are merged into to the `package_data` list passed to
+        `distutils.setup`.
+        """
+        return {}
+
+    def get_extension(self):
+        """
+        Get a list of C extensions (`distutils.core.Extension`
+        objects) to add to the configuration.  These are added to the
+        `extensions` list passed to `distutils.setup`.
+        """
+        return None
+
+    def get_install_requires(self):
+        """
+        Get a list of Python packages that we require.
+        pip/easy_install will attempt to download and install this
+        package if it is not installed.
+        """
+        return []
+
+    def _check_for_pkg_config(self, package, include_file, min_version=None,
+                              version=None):
+        """
+        A convenience function for writing checks for a
+        pkg_config-defined dependency.
+
+        `package` is the pkg_config package name.
+
+        `include_file` is a top-level include file we expect to find.
+
+        `min_version` is the minimum version required.
+
+        `version` will override the found version if this package
+        requires an alternate method for that.
+        """
+        if version is None:
+            version = pkg_config.get_version(package)
+
+        if min_version == 'PATCH':
+            raise CheckFailed(
+                "Requires patches that have not been merged upstream.")
+
+        if min_version:
+            if (version is not None and
+                not is_min_version(version, min_version)):
+                raise CheckFailed(
+                    "Requires %s %s or later.  Found %s." %
+                    (package, min_version, version))
+
+        ext = self.get_extension()
+        if ext is None:
+            ext = make_extension('test', [])
+            pkg_config.setup_extension(ext, package)
+
+        check_include_file(ext.include_dirs, include_file, package)
+
+        return 'version %s' % version
+
+
+class OptionalPackage(SetupPackage):
+    optional = True
+
+    def get_config(self):
+        install = True
+        if config is not None:
+            try:
+                install = config.getboolean(
+                    'packages', self.name)
+            except:
+                pass
+        return install
+
+    def check(self):
+        self.install = self.get_config()
+        if not self.install:
+            raise CheckFailed("skipping due to configuration")
+        return "installing"
+
+
+class OptionalBackendPackage(SetupPackage):
+    optional = True
+
+    def get_config(self):
+        install = 'auto'
+        if config is not None:
+            try:
+                install = config.getboolean(
+                    'gui_support', self.name)
+            except:
+                install = 'auto'
+        if install is True:
+            self.optional = False
+        return install
+
+
+class Platform(SetupPackage):
+    name = "platform"
+
+    def check(self):
+        return sys.platform
+
+
+class Python(SetupPackage):
+    name = "python"
+
+    def check(self):
+        major, minor1, minor2, s, tmp = sys.version_info
+
+        if major < 2:
+            raise CheckFailed(
+                "Requires Python 2.6 or later")
+        elif major == 2 and minor1 < 6:
+            raise CheckFailed(
+                "Requires Python 2.6 or later (in the 2.x series)")
+        elif major == 3 and minor1 < 1:
+            raise CheckFailed(
+                "Requires Python 3.1 or later (in the 3.x series)")
+
+        return sys.version
+
+
+class Matplotlib(SetupPackage):
+    name = "matplotlib"
+
+    def check(self):
+        return extract_versions()['__version__']
+
+    def get_packages(self):
+        return [
+            'matplotlib',
+            'matplotlib.backends',
+            'matplotlib.backends.qt4_editor',
+            'matplotlib.projections',
+            'matplotlib.sphinxext',
+            'matplotlib.testing',
+            'matplotlib.testing.jpl_units',
+            'matplotlib.tri',
+            ]
+
+    def get_py_modules(self):
+        return ['pylab']
+
+    def get_package_data(self):
+        return {
+            'matplotlib':
+            [
+                'mpl-data/fonts/afm/*.afm',
+                'mpl-data/fonts/pdfcorefonts/*.afm',
+                'mpl-data/fonts/pdfcorefonts/*.txt',
+                'mpl-data/fonts/ttf/*.ttf',
+                'mpl-data/fonts/ttf/LICENSE_STIX',
+                'mpl-data/fonts/ttf/COPYRIGHT.TXT',
+                'mpl-data/fonts/ttf/README.TXT',
+                'mpl-data/fonts/ttf/RELEASENOTES.TXT',
+                'mpl-data/images/*.xpm',
+                'mpl-data/images/*.svg',
+                'mpl-data/images/*.gif',
+                'mpl-data/images/*.png',
+                'mpl-data/images/*.ppm',
+                'mpl-data/example/*.npy',
+                'mpl-data/matplotlibrc',
+                'backends/web_backend/*.*',
+                'backends/web_backend/jquery/js/*',
+                'backends/web_backend/jquery/css/themes/base/*.*',
+                'backends/web_backend/jquery/css/themes/base/images/*',
+                'backends/web_backend/css/*.*',
+             ]}
+
+
+class SampleData(OptionalPackage):
+    """
+    This handles the sample data that ships with matplotlib.  It is
+    technically optional, though most often will be desired.
+    """
+    name = "sample_data"
+
+    def get_package_data(self):
+        return {
+            'matplotlib':
+            [
+                'mpl-data/sample_data/*.*',
+                'mpl-data/sample_data/axes_grid/*.*',
+            ]}
+
+
+class Toolkits(OptionalPackage):
+    name = "toolkits"
+
+    def get_packages(self):
+        return [
+            'mpl_toolkits',
+            'mpl_toolkits.mplot3d',
+            'mpl_toolkits.axes_grid',
+            'mpl_toolkits.axes_grid1',
+            'mpl_toolkits.axisartist',
+            ]
+
+
+class Tests(OptionalPackage):
+    name = "tests"
+
+    def check(self):
+        super(Tests, self).check()
+
         try:
-            qt_version = pyqtconfig.Configuration().qt_version
-            qt_version = convert_qt_version(qt_version)
-        except AttributeError:
-            qt_version = ""
-        print_status("Qt", "Qt: %s, PyQt: %s" %
-                     (qt_version,
-                      pyqtconfig.Configuration().pyqt_version_str))
-        return True
+            import nose
+        except ImportError:
+            return (
+                "nose 0.11.1 or later is required to run the "
+                "matplotlib test suite")
+
+        if nose.__versioninfo__ < (0, 11, 1):
+            return (
+                "nose 0.11.1 or later is required to run the "
+                "matplotlib test suite")
+
+        return 'using nose version %s' % nose.__version__
+
+    def get_packages(self):
+        return [
+            'matplotlib.tests',
+            ]
 
-def check_for_qt4():
-    try:
-        from PyQt4 import pyqtconfig
-    except ImportError:
-        print_status("Qt4", "no")
-        return False
-    else:
-        print_status("Qt4", "Qt: %s, PyQt4: %s" %
-                     (convert_qt_version(pyqtconfig.Configuration().qt_version),
-                      pyqtconfig.Configuration().pyqt_version_str))
-        return True
+    def get_package_data(self):
+        baseline_images = [
+            'tests/baseline_images/%s/*' % x
+            for x in os.listdir('lib/matplotlib/tests/baseline_images')]
 
-def check_for_pyside():
-    try:
-        from PySide import __version__
-        from PySide import QtCore
-    except ImportError:
-        print_status("PySide", "no")
-        return False
-    else:
-        print_status("PySide", "Qt: %s, PySide: %s" %
-                     (QtCore.__version__, __version__))
-        return True
+        return {
+            'matplotlib':
+            baseline_images +
+            [
+                'tests/mpltest.ttf',
+                'tests/test_rcparams.rc'
+            ]}
 
-def check_for_cairo():
-    try:
-        import cairo
-    except ImportError:
-        print_status("Cairo", "no")
-        return False
-    else:
-        print_status("Cairo", cairo.version)
-        return True
 
-def check_for_tornado():
-    try:
-        import tornado
-    except ImportError:
-        print_status("Tornado (webagg)", "no")
-        return False
-    else:
-        print_status("Tornado (webagg)", tornado.version)
-        return True
+class Numpy(SetupPackage):
+    name = "numpy"
 
-def check_provide_pytz():
-    if options['provide_pytz'] is True:
-        print_status("pytz", "matplotlib will provide")
-        return True
-    try:
-        import pytz
-    except ImportError:
-        if options['provide_pytz']:
-            print_status("pytz", "matplotlib will provide")
-            return True
+    def check(self):
+        min_version = extract_versions()['__version__numpy__']
+        try:
+            import numpy
+        except ImportError:
+            raise CheckFailed(
+                "Requires numpy %s or later.  (Numpy not found)" %
+                min_version)
+
+        if not is_min_version(numpy.__version__, min_version):
+            raise CheckFailed(
+                "Requires numpy %s or later.  (Found %s)" %
+                (min_version, numpy.__version__))
+
+        ext = make_extension('test', [])
+        ext.include_dirs.append(numpy.get_include())
+        if not has_include_file(
+            ext.include_dirs, os.path.join("numpy", "arrayobject.h")):
+            raise CheckFailed(
+                "The C headers for numpy could not be found.  You"
+                "may need to install the development package.")
+
+        return 'version %s' % numpy.__version__
+
+    def add_flags(self, ext):
+        import numpy
+
+        ext.include_dirs.append(numpy.get_include())
+        ext.define_macros.append(('PY_ARRAY_UNIQUE_SYMBOL', 'MPL_ARRAY_API'))
+
+
+class CXX(SetupPackage):
+    name = 'pycxx'
+
+    def check(self):
+        if sys.version_info[0] >= 3:
+            # There is no version of PyCXX in the wild that will work
+            # with Python 3.x
+            self.__class__.found_external = False
+            return ("Official versions of PyCXX are not compatible with "
+                    "Python 3.x.  Using local copy")
+
+        self.__class__.found_external = True
+        old_stdout = sys.stdout
+        sys.stdout = io.BytesIO()
+        try:
+            import CXX
+        except ImportError:
+            self.__class__.found_external = False
+            return "Couldn't import.  Using local copy."
+        finally:
+            sys.stdout = old_stdout
+
+        try:
+            return self._check_for_pkg_config(
+                'PyCXX', 'CXX/Extensions.hxx', min_version='6.2.4')
+        except CheckFailed as e:
+            self.__class__.found_external = False
+            return str(e) + ' Using local copy.'
+
+    def add_flags(self, ext):
+        if self.found_external:
+            support_dir = os.path.normpath(
+                   os.path.join(
+                       sys.prefix,
+                       'share',
+                       'python%d.%d' % (
+                           sys.version_info[0], sys.version_info[1]),
+                       'CXX'))
+            if not os.path.exists(support_dir):
+                # On Fedora 17, these files are installed in /usr/share/CXX
+                support_dir = '/usr/src/CXX'
+            ext.sources.extend([
+                os.path.join(support_dir, x) for x in
+                ['cxxsupport.cxx', 'cxx_extensions.cxx',
+                 'IndirectPythonInterface.cxx',
+                 'cxxextensions.c']])
+            pkg_config.setup_extension(ext, 'PyCXX')
         else:
-            print_status("pytz", "no")
-            return False
-    else:
-        if pytz.__version__.endswith('mpl'):
-            print_status("pytz", "matplotlib will provide")
-            return True
+            ext.sources.extend(glob.glob('CXX/*.cxx'))
+            ext.sources.extend(glob.glob('CXX/*.c'))
+        ext.define_macros.append(('PYCXX_ISO_CPP_LIB', '1'))
+        if sys.version_info[0] >= 3:
+            ext.define_macros.append(('PYCXX_PYTHON_2TO3', '1'))
+        if not (sys.platform == 'win32' and win32_compiler == 'msvc'):
+            ext.libraries.append('stdc++')
+            ext.libraries.append('m')
+
+
+class LibAgg(SetupPackage):
+    name = 'libagg'
+
+    def check(self):
+        self.__class__.found_external = True
+        try:
+            return self._check_for_pkg_config(
+                'libagg', 'agg2/agg_basics.h', min_version='PATCH')
+        except CheckFailed as e:
+            self.__class__.found_external = False
+            return str(e) + ' Using local copy.'
+
+    def add_flags(self, ext):
+        if self.found_external:
+            pkg_config.setup_extension(ext, 'libagg')
         else:
-            print_status("pytz", pytz.__version__)
-            return False
+            ext.include_dirs.append('agg24/include')
+            agg_sources = [
+                'agg_bezier_arc.cpp',
+                'agg_curves.cpp',
+                'agg_image_filters.cpp',
+                'agg_trans_affine.cpp',
+                'agg_vcgen_contour.cpp',
+                'agg_vcgen_dash.cpp',
+                'agg_vcgen_stroke.cpp',
+                ]
+            ext.sources.extend(
+                os.path.join('agg24', 'src', x) for x in agg_sources)
+
+
+class FreeType(SetupPackage):
+    name = "freetype"
+
+    def check(self):
+        if sys.platform == 'win32':
+            return "Unknown version"
 
-def check_provide_dateutil():
-    if options['provide_dateutil'] is True:
-        print_status("dateutil", "matplotlib will provide")
-        return True
-    try:
-        import dateutil
-    except ImportError:
-        if options['provide_dateutil']:
-            print_status("dateutil", "matplotlib will provide")
-            return True
+        status, output = getstatusoutput("freetype-config --version")
+        if status == 0:
+            version = output
         else:
-            print_status("dateutil", "no")
-            return False
-    else:
+            version = None
+
+        return self._check_for_pkg_config(
+            'freetype2', 'ft2build.h',
+            min_version='2.4', version=version)
+
+    def add_flags(self, ext):
+        pkg_config.setup_extension(
+            ext, 'freetype2',
+            default_include_dirs=[
+                'freetype2', 'lib/freetype2/include',
+                'lib/freetype2/include/freetype2'],
+            default_library_dirs=[
+                'freetype2/lib'],
+            default_libraries=['freetype', 'z'])
+
+
+class FT2Font(SetupPackage):
+    name = 'ft2font'
+
+    def get_extension(self):
+        sources = [
+            'src/ft2font.cpp',
+            'src/mplutils.cpp'
+            ]
+        ext = make_extension('matplotlib.ft2font', sources)
+        FreeType().add_flags(ext)
+        Numpy().add_flags(ext)
+        CXX().add_flags(ext)
+        return ext
+
+
+class Png(SetupPackage):
+    name = "png"
+
+    def check(self):
+        return self._check_for_pkg_config(
+            'libpng', 'png.h',
+            min_version='1.2')
+
+    def get_extension(self):
+        sources = [
+            'src/_png.cpp', 'src/mplutils.cpp'
+            ]
+        ext = make_extension('matplotlib._png', sources)
+        pkg_config.setup_extension(
+            ext, 'libpng', default_libraries=['png', 'z'])
+        Numpy().add_flags(ext)
+        CXX().add_flags(ext)
+        return ext
+
+
+class TTConv(SetupPackage):
+    name = "ttconv"
+
+    def get_extension(self):
+        sources = [
+            'src/_ttconv.cpp',
+            'ttconv/pprdrv_tt.cpp',
+            'ttconv/pprdrv_tt2.cpp',
+            'ttconv/ttutil.cpp'
+            ]
+        ext = make_extension('matplotlib.ttconv', sources)
+        Numpy().add_flags(ext)
+        CXX().add_flags(ext)
+        return ext
+
+
+class Path(SetupPackage):
+    name = "path"
+
+    def get_extension(self):
+        sources = [
+            'src/_path.cpp',
+            'src/path_cleanup.cpp',
+            'src/agg_py_transforms.cpp'
+            ]
+
+        ext = make_extension('matplotlib._path', sources)
+        Numpy().add_flags(ext)
+        LibAgg().add_flags(ext)
+        CXX().add_flags(ext)
+        return ext
+
+
+class Image(SetupPackage):
+    name = "image"
+
+    def get_extension(self):
+        sources = [
+            'src/_image.cpp', 'src/mplutils.cpp'
+            ]
+        ext = make_extension('matplotlib._image', sources)
+        Numpy().add_flags(ext)
+        LibAgg().add_flags(ext)
+        CXX().add_flags(ext)
+        return ext
+
+
+class Contour(SetupPackage):
+    name = "contour"
+
+    def get_extension(self):
+        sources = [
+            "src/cntr.c"
+            ]
+        ext = make_extension('matplotlib._cntr', sources)
+        Numpy().add_flags(ext)
+        return ext
+
+
+class Delaunay(SetupPackage):
+    name = "delaunay"
+
+    def get_packages(self):
+        return ['matplotlib.delaunay']
+
+    def get_extension(self):
+        sources = ["_delaunay.cpp", "VoronoiDiagramGenerator.cpp",
+                   "delaunay_utils.cpp", "natneighbors.cpp"]
+        sources = [os.path.join('lib/matplotlib/delaunay', s) for s in sources]
+        ext = make_extension('matplotlib._delaunay', sources)
+        Numpy().add_flags(ext)
+        return ext
+
+
+class Tri(SetupPackage):
+    name = "tri"
+
+    def get_extension(self):
+        sources = [
+            "lib/matplotlib/tri/_tri.cpp",
+            "src/mplutils.cpp"
+            ]
+        ext = make_extension('matplotlib._tri', sources)
+        Numpy().add_flags(ext)
+        CXX().add_flags(ext)
+        return ext
+
+
+class Dateutil(SetupPackage):
+    name = "dateutil"
+
+    def check(self):
         try:
-            if dateutil.__version__.endswith('mpl'):
-                print_status("dateutil", "matplotlib will provide")
-                return True
+            import dateutil
+        except ImportError:
+            return (
+                "dateutil was not found. It is required for date axis "
+                "support. pip/easy_install may attempt to install it "
+                "after matplotlib.")
+
+        return "using dateutil version %s" % dateutil.__version__
+
+    def get_install_requires(self):
+        return ['python_dateutil']
+
+
+class Tornado(SetupPackage):
+    name = "tornado"
+
+    def check(self):
+        try:
+            import tornado
+        except ImportError:
+            return (
+                "tornado was not found. It is required for the WebAgg "
+                "backend. pip/easy_install may attempt to install it "
+                "after matplotlib.")
+
+        return "using tornado version %s" % tornado.version
+
+    def get_install_requires(self):
+        return ['tornado']
+
+
+class Pyparsing(SetupPackage):
+    name = "pyparsing"
+
+    def check(self):
+        try:
+            import pyparsing
+        except ImportError:
+            return (
+                "pyparsing was not found. It is required for mathtext "
+                "support. pip/easy_install may attempt to install it "
+                "after matplotlib.")
+
+        if sys.version_info[0] >= 3:
+            if [int(x) for x in pyparsing.__version__.split('.')] <= [1, 5, 6]:
+                return (
+                    "matplotlib requires pyparsing > 1.5.6 on Python 3.x")
+
+        return "using pyparsing version %s" % pyparsing.__version__
+
+    def get_install_requires(self):
+        return ['pyparsing']
+
+
+class BackendAgg(OptionalBackendPackage):
+    name = "agg"
+    force = False
+
+    def check(self):
+        # The Agg backend extension needs to be built even
+        # for certain GUI backends, such as TkAgg
+        config = self.get_config()
+        if config is False and self.force is False:
+            raise CheckFailed("skipping due to configuration")
+        else:
+            return "installing"
+
+    def get_extension(self):
+        sources = [
+            "src/mplutils.cpp",
+            "src/agg_py_transforms.cpp",
+            "src/_backend_agg.cpp"
+            ]
+        ext = make_extension('matplotlib.backends._backend_agg', sources)
+        Numpy().add_flags(ext)
+        LibAgg().add_flags(ext)
+        FreeType().add_flags(ext)
+        CXX().add_flags(ext)
+        return ext
+
+
+class BackendTkAgg(OptionalBackendPackage):
+    name = "tkagg"
+
+    def __init__(self):
+        self.tcl_tk_cache = None
+
+    def check(self):
+        if self.get_config() is False:
+            raise CheckFailed("skipping due to configuration")
+
+        try:
+            if sys.version_info[0] < 3:
+                import Tkinter
             else:
-                print_status("dateutil", dateutil.__version__)
-                return False
-        except AttributeError:
-            print_status("dateutil", "present, version unknown")
-            return False
+                import tkinter as Tkinter
+        except ImportError:
+            raise CheckFailed('TKAgg requires Tkinter.')
+        except RuntimeError:
+            raise CheckFailed('Tkinter present but import failed.')
+        else:
+            if Tkinter.TkVersion < 8.3:
+                raise CheckFailed("Tcl/Tk v8.3 or later required.")
 
-def check_provide_six():
-    # We don't need six on Python 2.x
-    if sys.version_info[0] < 3:
-        return
+        ext = self.get_extension()
+        check_include_file(ext.include_dirs, "tk.h", "Tk")
 
-    if options['provide_six'] is True:
-        print_status("six", "matplotlib will provide")
-        return True
-    try:
-        import six
-    except ImportError:
-        if options['provide_six']:
-            print_status("six", "matplotlib will provide")
-            return True
+        try:
+            tk_v = Tkinter.__version__.split()[-2]
+        except (AttributeError, IndexError):
+            # Tkinter.__version__ has been removed in python 3
+            tk_v = 'version not identified'
+
+        BackendAgg.force = True
+
+        return "version %s" % tk_v
+
+    def get_extension(self):
+        sources = [
+            'src/agg_py_transforms.cpp',
+            'src/_tkagg.cpp'
+            ]
+
+        ext = make_extension('matplotlib.backends._tkagg', sources)
+        self.add_flags(ext)
+        Numpy().add_flags(ext)
+        LibAgg().add_flags(ext)
+        CXX().add_flags(ext)
+        return ext
+
+    def query_tcltk(self):
+        """
+        Tries to open a Tk window in order to query the Tk object
+        about its library paths.  This should never be called more
+        than once by the same process, as Tk intricacies may cause the
+        Python interpreter to hang. The function also has a workaround
+        if no X server is running (useful for autobuild systems).
+        """
+        # Use cached values if they exist, which ensures this function
+        # only executes once
+        if self.tcl_tk_cache is not None:
+            return self.tcl_tk_cache
+
+        # By this point, we already know that Tkinter imports correctly
+        if sys.version_info[0] < 3:
+            import Tkinter
         else:
-            print_status("six", "no")
-            return False
-    else:
+            import tkinter as Tkinter
+        tcl_lib_dir = ''
+        tk_lib_dir = ''
+        # First try to open a Tk window (requires a running X server)
         try:
-            if six.__version__.endswith('mpl'):
-                print_status("six", "matplotlib will provide")
-                return True
+            tk = Tkinter.Tk()
+        except Tkinter.TclError:
+            # Next, start Tcl interpreter without opening a Tk window
+            # (no need for X server) This feature is available in
+            # python version 2.4 and up
+            try:
+                tcl = Tkinter.Tcl()
+            except AttributeError:    # Python version not high enough
+                pass
+            except Tkinter.TclError:  # Something went wrong while opening Tcl
+                pass
             else:
-                print_status("six", six.__version__)
-                return False
-        except AttributeError:
-            print_status("six", "present, version unknown")
-            return False
+                tcl_lib_dir = str(tcl.getvar('tcl_library'))
+                # Guess Tk location based on Tcl location
+                (head, tail) = os.path.split(tcl_lib_dir)
+                tail = tail.replace('Tcl', 'Tk').replace('tcl', 'tk')
+                tk_lib_dir = os.path.join(head, tail)
+                if not os.path.exists(tk_lib_dir):
+                    tk_lib_dir = tcl_lib_dir.replace(
+                        'Tcl', 'Tk').replace('tcl', 'tk')
+        else:
+            # Obtain Tcl and Tk locations from Tk widget
+            tk.withdraw()
+            tcl_lib_dir = str(tk.getvar('tcl_library'))
+            tk_lib_dir = str(tk.getvar('tk_library'))
+            tk.destroy()
 
+        # Save directories and version string to cache
+        self.tcl_tk_cache = tcl_lib_dir, tk_lib_dir, str(Tkinter.TkVersion)[:3]
+        return self.tcl_tk_cache
 
-def check_for_dvipng():
-    try:
-        stdin, stdout = run_child_process('dvipng -version')
-        print_status("dvipng", stdout.readlines()[1].decode().split()[-1])
-        return True
-    except (IndexError, ValueError):
-        print_status("dvipng", "no")
-        return False
+    def parse_tcl_config(self, tcl_lib_dir, tk_lib_dir):
+        try:
+            if sys.version_info[0] < 3:
+                import Tkinter
+            else:
+                import tkinter as Tkinter
+        except ImportError:
+            return None
+
+        tcl_poss = [tcl_lib_dir,
+                    os.path.normpath(os.path.join(tcl_lib_dir, '..')),
+                    "/usr/lib/tcl" + str(Tkinter.TclVersion),
+                    "/usr/lib"]
+        tk_poss = [tk_lib_dir,
+                    os.path.normpath(os.path.join(tk_lib_dir, '..')),
+                   "/usr/lib/tk" + str(Tkinter.TkVersion),
+                   "/usr/lib"]
+        for ptcl, ptk in zip(tcl_poss, tk_poss):
+            tcl_config = os.path.join(ptcl, "tclConfig.sh")
+            tk_config = os.path.join(ptk, "tkConfig.sh")
+            if (os.path.exists(tcl_config) and os.path.exists(tk_config)):
+                break
+        if not (os.path.exists(tcl_config) and os.path.exists(tk_config)):
+            return None
+
+        def get_var(file, varname):
+            p = subprocess.Popen(
+                '. %s ; eval echo ${%s}' % (file, varname),
+                shell=True,
+                executable="/bin/sh",
+                stdout=subprocess.PIPE)
+            result = p.communicate()[0]
+            return result.decode('ascii')
+
+        tcl_lib_dir = get_var(
+            tcl_config, 'TCL_LIB_SPEC').split()[0][2:].strip()
+        tcl_inc_dir = get_var(
+            tcl_config, 'TCL_INCLUDE_SPEC')[2:].strip()
+        tcl_lib = get_var(tcl_config, 'TCL_LIB_FLAG')[2:].strip()
+
+        tk_lib_dir = get_var(tk_config, 'TK_LIB_SPEC').split()[0][2:].strip()
+        tk_inc_dir = get_var(tk_config, 'TK_INCLUDE_SPEC').strip()
+        if tk_inc_dir == '':
+            tk_inc_dir = tcl_inc_dir
+        else:
+            tk_inc_dir = tk_inc_dir[2:]
+        tk_lib = get_var(tk_config, 'TK_LIB_FLAG')[2:].strip()
+
+        if not os.path.exists(os.path.join(tk_inc_dir, 'tk.h')):
+            return None
+
+        return (tcl_lib_dir, tcl_inc_dir, tcl_lib,
+                tk_lib_dir, tk_inc_dir, tk_lib)
+
+    def guess_tcl_config(self, tcl_lib_dir, tk_lib_dir, tk_ver):
+        if not (os.path.exists(tcl_lib_dir) and os.path.exists(tk_lib_dir)):
+            return None
+
+        tcl_lib = os.path.normpath(os.path.join(tcl_lib_dir, '../'))
+        tk_lib = os.path.normpath(os.path.join(tk_lib_dir, '../'))
+
+        tcl_inc = os.path.normpath(
+            os.path.join(tcl_lib_dir,
+                         '../../include/tcl' + tk_ver))
+        if not os.path.exists(tcl_inc):
+            tcl_inc = os.path.normpath(
+                os.path.join(tcl_lib_dir,
+                             '../../include'))
+
+        tk_inc = os.path.normpath(os.path.join(
+            tk_lib_dir,
+            '../../include/tk' + tk_ver))
+        if not os.path.exists(tk_inc):
+            tk_inc = os.path.normpath(os.path.join(
+                tk_lib_dir,
+                '../../include'))
+
+        if not os.path.exists(os.path.join(tk_inc, 'tk.h')):
+            tk_inc = tcl_inc
+
+        if not os.path.exists(tcl_inc):
+            # this is a hack for suse linux, which is broken
+            if (sys.platform.startswith('linux') and
+                os.path.exists('/usr/include/tcl.h') and
+                os.path.exists('/usr/include/tk.h')):
+                tcl_inc = '/usr/include'
+                tk_inc = '/usr/include'
+
+        if not os.path.exists(os.path.join(tk_inc, 'tk.h')):
+            return None
+
+        return tcl_lib, tcl_inc, 'tcl' + tk_ver, tk_lib, tk_inc, 'tk' + tk_ver
+
+    def hardcoded_tcl_config(self):
+        tcl_inc = "/usr/local/include"
+        tk_inc = "/usr/local/include"
+        tcl_lib = "/usr/local/lib"
+        tk_lib = "/usr/local/lib"
+        return tcl_lib, tcl_inc, 'tcl', tk_lib, tk_inc, 'tk'
+
+    def add_flags(self, ext):
+        if sys.platform == 'win32':
+            major, minor1, minor2, s, tmp = sys.version_info
+            ext.include_dirs.extend(['win32_static/include/tcl85'])
+            ext.libraries.extend(['tk85', 'tcl85'])
+            ext.library_dirs.extend([os.path.join(sys.prefix, 'dlls')])
+
+        elif sys.platform == 'darwin':
+            # this config section lifted directly from Imaging - thanks to
+            # the effbot!
+
+            # First test for a MacOSX/darwin framework install
+            from os.path import join, exists
+            framework_dirs = [
+                join(os.getenv('HOME'), '/Library/Frameworks'),
+                '/Library/Frameworks',
+                '/System/Library/Frameworks/',
+            ]
 
-def check_for_ghostscript():
-    try:
+            # Find the directory that contains the Tcl.framework and
+            # Tk.framework bundles.
+            tk_framework_found = 0
+            for F in framework_dirs:
+                # both Tcl.framework and Tk.framework should be present
+                for fw in 'Tcl', 'Tk':
+                    if not exists(join(F, fw + '.framework')):
+                        break
+                else:
+                    # ok, F is now directory with both frameworks. Continure
+                    # building
+                    tk_framework_found = 1
+                    break
+            if tk_framework_found:
+                # For 8.4a2, we must add -I options that point inside
+                # the Tcl and Tk frameworks. In later release we
+                # should hopefully be able to pass the -F option to
+                # gcc, which specifies a framework lookup path.
+
+                tk_include_dirs = [
+                    join(F, fw + '.framework', H)
+                    for fw in ('Tcl', 'Tk')
+                    for H in ('Headers', 'Versions/Current/PrivateHeaders')
+                ]
+
+                # For 8.4a2, the X11 headers are not included. Rather
+                # than include a complicated search, this is a
+                # hard-coded path. It could bail out if X11 libs are
+                # not found...
+
+                # tk_include_dirs.append('/usr/X11R6/include')
+                frameworks = ['-framework', 'Tcl', '-framework', 'Tk']
+                ext.include_dirs.extend(tk_include_dirs)
+                ext.extra_link_args.extend(frameworks)
+                ext.extra_compile_args.extend(frameworks)
+
+        # you're still here? ok we'll try it this way...
+        else:
+            # There are 3 methods to try, in decreasing order of "smartness"
+            #
+            #   1. Parse the tclConfig.sh and tkConfig.sh files that have
+            #      all the information we need
+            #
+            #   2. Guess the include and lib dirs based on the location of
+            #      Tkinter's 'tcl_library' and 'tk_library' variables.
+            #
+            #   3. Use some hardcoded locations that seem to work on a lot
+            #      of distros.
+
+            # Query Tcl/Tk system for library paths and version string
+            try:
+                tcl_lib_dir, tk_lib_dir, tk_ver = self.query_tcltk()
+            except:
+                tk_ver = ''
+                result = self.hardcoded_tcl_config()
+            else:
+                result = self.parse_tcl_config(tcl_lib_dir, tk_lib_dir)
+                if result is None:
+                    result = self.guess_tcl_config(
+                        tcl_lib_dir, tk_lib_dir, tk_ver)
+                    if result is None:
+                        result = self.hardcoded_tcl_config()
+
+            # Add final versions of directories and libraries to ext lists
+            (tcl_lib_dir, tcl_inc_dir, tcl_lib,
+             tk_lib_dir, tk_inc_dir, tk_lib) = result
+            ext.include_dirs.extend([tcl_inc_dir, tk_inc_dir])
+            ext.library_dirs.extend([tcl_lib_dir, tk_lib_dir])
+            ext.libraries.extend([tcl_lib, tk_lib])
+
+
+class BackendGtk(OptionalBackendPackage):
+    name = "gtk"
+
+    def check(self):
+        if self.get_config() is False:
+            raise CheckFailed("skipping due to configuration")
+
+        try:
+            import gtk
+        except ImportError:
+            raise CheckFailed("Requires pygtk")
+        except RuntimeError:
+            raise CheckFailed('pygtk present, but import failed.')
+        else:
+            version = (2, 2, 0)
+            if gtk.pygtk_version < version:
+                raise CheckFailed(
+                    "Requires pygtk %d.%d.%d or later. "
+                    "Found %d.%d.%d" % (version + gtk.pygtk_version))
+
+        ext = self.get_extension()
+        self.add_flags(ext)
+        check_include_file(ext.include_dirs,
+                           os.path.join("gtk", "gtk.h"),
+                           'gtk')
+        check_include_file(ext.include_dirs,
+                           os.path.join("pygtk", "pygtk.h"),
+                           'pygtk')
+
+        return 'Gtk: %s pygtk: %s' % (
+            ".".join(str(x) for x in gtk.gtk_version),
+            ".".join(str(x) for x in gtk.pygtk_version))
+
+    def get_package_data(self):
+        return {'matplotlib': ['mpl-data/*.glade']}
+
+    def get_extension(self):
+        sources = [
+            'src/_backend_gdk.c'
+            ]
+        ext = make_extension('matplotlib.backends._backend_gdk', sources)
+        self.add_flags(ext)
+        Numpy().add_flags(ext)
+        return ext
+
+    def add_flags(self, ext):
         if sys.platform == 'win32':
-            command = 'gswin32c --version'
+            def getoutput(s):
+                ret = os.popen(s).read().strip()
+                return ret
+
+            if 'PKG_CONFIG_PATH' not in os.environ:
+                # If Gtk+ is installed, pkg-config is required to be installed
+                os.environ['PKG_CONFIG_PATH'] = 'C:\\GTK\\lib\\pkgconfig'
+
+                # popen broken on my win32 plaform so I can't use pkgconfig
+                ext.library_dirs.extend(
+                    ['C:/GTK/bin', 'C:/GTK/lib'])
+
+                ext.include_dirs.extend(
+                    ['win32_static/include/pygtk-2.0',
+                     'C:/GTK/include',
+                     'C:/GTK/include/gobject',
+                     'C:/GTK/include/gext',
+                     'C:/GTK/include/glib',
+                     'C:/GTK/include/pango',
+                     'C:/GTK/include/atk',
+                     'C:/GTK/include/X11',
+                     'C:/GTK/include/cairo',
+                     'C:/GTK/include/gdk',
+                     'C:/GTK/include/gdk-pixbuf',
+                     'C:/GTK/include/gtk',
+                     ])
+
+            pygtkIncludes = getoutput(
+                'pkg-config --cflags-only-I pygtk-2.0').split()
+            gtkIncludes = getoutput(
+                'pkg-config --cflags-only-I gtk+-2.0').split()
+            includes = pygtkIncludes + gtkIncludes
+            ext.include_dirs.extend([include[2:] for include in includes])
+
+            pygtkLinker = getoutput('pkg-config --libs pygtk-2.0').split()
+            gtkLinker = getoutput('pkg-config --libs gtk+-2.0').split()
+            linkerFlags = pygtkLinker + gtkLinker
+
+            ext.libraries.extend(
+                [flag[2:] for flag in linkerFlags if flag.startswith('-l')])
+
+            ext.library_dirs.extend(
+                [flag[2:] for flag in linkerFlags if flag.startswith('-L')])
+
+            ext.extra_link_args.extend(
+                [flag for flag in linkerFlags if not
+                 (flag.startswith('-l') or flag.startswith('-L'))])
+
+            # visual studio doesn't need the math library
+            if (sys.platform == 'win32' and
+                win32_compiler == 'msvc' and
+                'm' in ext.libraries):
+                ext.libraries.remove('m')
+
+        elif sys.platform != 'win32':
+            pkg_config.setup_extension(ext, 'pygtk-2.0')
+            pkg_config.setup_extension(ext, 'gtk+-2.0')
+
+
+class BackendGtkAgg(BackendGtk):
+    name = "gtkagg"
+
+    def check(self):
+        try:
+            return super(BackendGtkAgg, self).check()
+        except:
+            raise
         else:
-            command = 'gs --version'
-        stdin, stdout = run_child_process(command)
-        print_status("ghostscript", stdout.read().decode()[:-1])
-        return True
-    except (IndexError, ValueError):
-        print_status("ghostscript", "no")
-        return False
+            BackendAgg.force = True
 
-def check_for_latex():
-    try:
-        stdin, stdout = run_child_process('latex -version')
-        line = stdout.readlines()[0].decode()
-        pattern = '(3\.1\d+)|(MiKTeX \d+.\d+)'
-        match = re.search(pattern, line)
-        print_status("latex", match.group(0))
-        return True
-    except (IndexError, ValueError, AttributeError):
-        print_status("latex", "no")
-        return False
+    def get_package_data(self):
+        return {'matplotlib': ['mpl-data/*.glade']}
+
+    def get_extension(self):
+        sources = [
+            'src/agg_py_transforms.cpp',
+            'src/_gtkagg.cpp',
+            'src/mplutils.cpp'
+            ]
+        ext = make_extension('matplotlib.backends._gtkagg', sources)
+        self.add_flags(ext)
+        LibAgg().add_flags(ext)
+        CXX().add_flags(ext)
+        Numpy().add_flags(ext)
+        return ext
 
-def check_for_pdftops():
-    try:
-        stdin, stdout = run_child_process('pdftops -v')
-        for line in stdout.readlines():
-            line = line.decode()
-            if 'version' in line:
-                print_status("pdftops", line.split()[-1])
-                return True
-    except (IndexError, ValueError):
-        print_status("pdftops", "no")
-        return False
 
-def check_for_numpy(min_version):
+def backend_gtk3agg_internal_check(x):
     try:
-        import numpy
+        from gi.repository import Gtk, Gdk, GObject
     except ImportError:
-        print_status("numpy", "no")
-        print_message("You must install numpy %s or later to build matplotlib." %
-                      min_version)
-        return False
+        return (False, "Requires pygobject to be installed.")
 
-    expected_version = version.LooseVersion(min_version)
-    found_version = version.LooseVersion(numpy.__version__)
-    if not found_version >= expected_version:
-        print_message(
-            'numpy %s or later is required; you have %s' %
-            (min_version, numpy.__version__))
-        return False
-    module = Extension('test', [])
-    add_numpy_flags(module)
-    add_base_flags(module)
+    if sys.version_info[0] >= 3:
+        return (False, "gtk3agg does not work on Python 3")
 
-    print_status("numpy", numpy.__version__)
-    if not find_include_file(module.include_dirs, os.path.join("numpy", "arrayobject.h")):
-        print_message("Could not find the headers for numpy.  You may need to install the development package.")
-        return False
-    return True
-
-def add_numpy_flags(module):
-    "Add the modules flags to build extensions which use numpy"
-    import numpy
-    module.include_dirs.append(numpy.get_include())
-
-def add_png_flags(module):
-    try_pkgconfig(module, 'libpng', 'png')
-    add_base_flags(module)
-    add_numpy_flags(module)
-    module.libraries.append('z')
-    module.include_dirs.extend(['.'])
-    module.libraries.extend(std_libs)
-
-def add_agg_flags(module):
-    'Add the module flags to build extensions which use agg'
-
-    # before adding the freetype flags since -z comes later
-    add_base_flags(module)
-    add_numpy_flags(module)
-    module.include_dirs.extend(['src', '%s/include'%AGG_VERSION, '.'])
-
-    # put these later for correct link order
-    module.libraries.extend(std_libs)
-
-def add_ft2font_flags(module):
-    'Add the module flags to ft2font extension'
-    add_numpy_flags(module)
-    if not get_pkgconfig(module, 'freetype2'):
-        module.libraries.extend(['freetype', 'z'])
-        add_base_flags(module)
-
-        basedirs = module.include_dirs[:]  # copy the list to avoid inf loop!
-        for d in basedirs:
-            module.include_dirs.append(os.path.join(d, 'freetype2'))
-            p = os.path.join(d, 'lib/freetype2/include')
-            if os.path.exists(p): module.include_dirs.append(p)
-            p = os.path.join(d, 'lib/freetype2/include/freetype2')
-            if os.path.exists(p): module.include_dirs.append(p)
-
-        basedirs = module.library_dirs[:]  # copy the list to avoid inf loop!
-        for d in basedirs:
-            p = os.path.join(d, 'freetype2/lib')
-            if os.path.exists(p): module.library_dirs.append(p)
-    else:
-        add_base_flags(module)
-        module.libraries.append('z')
+    return (True, "version %s.%s.%s" % (
+        Gtk.get_major_version(),
+        Gtk.get_micro_version(),
+        Gtk.get_minor_version()))
 
-    # put this last for library link order
-    module.libraries.extend(std_libs)
 
-def check_for_gtk():
-    'check for the presence of pygtk'
-    gotit = False
-    explanation = None
-    try:
-        import gtk
-    except ImportError:
-        explanation = 'Building for Gtk+ requires pygtk; you must be able to "import gtk" in your build/install environment'
-    except RuntimeError:
-        explanation = 'pygtk present but import failed'
-    else:
-        version = (2,2,0)
-        if gtk.pygtk_version < version:
-            explanation = "Error: GTK backend requires PyGTK %d.%d.%d (or later), " \
-                  "%d.%d.%d was detected." % (
-                version + gtk.pygtk_version)
-        else:
-            gotit = True
-
-    if gotit:
-        module = make_extension('test', [])
-        add_pygtk_flags(module)
-        if not find_include_file(module.include_dirs, os.path.join("gtk", "gtk.h")):
-            explanation = (
-                "Could not find Gtk+ headers in any of %s" %
-                ", ".join(["'%s'" % x for x in module.include_dirs]))
-            gotit = False
-
-    def ver2str(tup):
-        return ".".join([str(x) for x in tup])
-
-    if gotit:
-        import gobject
-        if hasattr(gobject, 'pygobject_version'):
-            pygobject_version = ver2str(gobject.pygobject_version)
+class BackendGtk3Agg(OptionalBackendPackage):
+    name = "gtk3agg"
+
+    def check(self):
+        if 'TRAVIS' in os.environ:
+            raise CheckFailed("Can't build with Travis")
+
+        # This check needs to be performed out-of-process, because
+        # importing gi and then importing regular old pygtk afterward
+        # segfaults the interpreter.
+        p = multiprocessing.Pool()
+        success, msg = p.map(backend_gtk3agg_internal_check, [0])[0]
+        p.close()
+        p.join()
+        if success:
+            BackendAgg.force = True
+
+            return msg
         else:
-            pygobject_version = '[pre-pygobject]'
-        print_status("Gtk+", "gtk+: %s, glib: %s, pygtk: %s, pygobject: %s" %
-                     (ver2str(gtk.gtk_version), ver2str(gobject.glib_version),
-                      ver2str(gtk.pygtk_version), pygobject_version))
-    else:
-        print_status("Gtk+", "no")
+            raise CheckFailed(msg)
 
-    if explanation is not None:
-        print_message(explanation)
+    def get_package_data(self):
+        return {'matplotlib': ['mpl-data/*.glade']}
 
-    # Switch off the event loop for PyGTK >= 2.15.0
-    if gotit:
-        try:
-            gtk.set_interactive(False)
-        except AttributeError: # PyGTK < 2.15.0
-            pass
 
-    return gotit
-
-def add_pygtk_flags(module):
-    'Add the module flags to build extensions which use gtk'
-
-    if sys.platform=='win32':
-        # popen broken on my win32 plaform so I can't use pkgconfig
-        module.library_dirs.extend(
-            ['C:/GTK/bin', 'C:/GTK/lib'])
-
-        module.include_dirs.extend(
-            ['win32_static/include/pygtk-2.0',
-             'C:/GTK/include',
-             'C:/GTK/include/gobject',
-             'C:/GTK/include/gmodule',
-             'C:/GTK/include/glib',
-             'C:/GTK/include/pango',
-             'C:/GTK/include/atk',
-             'C:/GTK/include/X11',
-             'C:/GTK/include/cairo',
-             'C:/GTK/include/gdk',
-             'C:/GTK/include/gdk-pixbuf',
-             'C:/GTK/include/gtk',
-             ])
-
-        add_base_flags(module)
-
-        if 'PKG_CONFIG_PATH' not in os.environ:
-            # If Gtk+ is installed, pkg-config is required to be installed
-            os.environ['PKG_CONFIG_PATH'] = 'C:\GTK\lib\pkgconfig'
-
-        pygtkIncludes = getoutput('pkg-config --cflags-only-I pygtk-2.0').split()
-        gtkIncludes = getoutput('pkg-config --cflags-only-I gtk+-2.0').split()
-        includes = pygtkIncludes + gtkIncludes
-        module.include_dirs.extend([include[2:] for include in includes])
-
-        pygtkLinker = getoutput('pkg-config --libs pygtk-2.0').split()
-        gtkLinker =  getoutput('pkg-config --libs gtk+-2.0').split()
-        linkerFlags = pygtkLinker + gtkLinker
-
-        module.libraries.extend(
-            [flag[2:] for flag in linkerFlags if flag.startswith('-l')])
-
-        module.library_dirs.extend(
-            [flag[2:] for flag in linkerFlags if flag.startswith('-L')])
-
-        module.extra_link_args.extend(
-            [flag for flag in linkerFlags if not
-             (flag.startswith('-l') or flag.startswith('-L'))])
-
-        # visual studio doesn't need the math library
-        if sys.platform == 'win32' and win32_compiler == 'msvc' and 'm' in module.libraries:
-            module.libraries.remove('m')
-
-    if sys.platform != 'win32':
-        # If Gtk+ is installed, pkg-config is required to be installed
-        add_base_flags(module)
-        ok = get_pkgconfig(module, 'pygtk-2.0 gtk+-2.0', report_error=True)
-        if not ok:
-            print_message(
-                "You may need to install 'dev' package(s) to provide header files.")
-    # visual studio doesn't need the math library
-    if sys.platform == 'win32' and win32_compiler == 'msvc' and 'm' in module.libraries:
-        module.libraries.remove('m')
-
-# Make sure you use the Tk version given by Tkinter.TkVersion
-# or else you'll build for a wrong version of the Tcl
-# interpreter (leading to nasty segfaults).
-def check_for_tk():
-    gotit = False
-    explanation = None
+def backend_gtk3cairo_internal_check(x):
     try:
-        if sys.version_info[0] < 3:
-            import Tkinter
-        else:
-            import tkinter as Tkinter
+        import cairo
     except ImportError:
-        explanation = 'TKAgg requires Tkinter'
-    except RuntimeError:
-        explanation = 'Tkinter present but import failed'
-    else:
-        if Tkinter.TkVersion < 8.3:
-            explanation = "Tcl/Tk v8.3 or later required"
+        return (False, "Requires cairo to be installed.")
+
+    try:
+        from gi.repository import Gtk, Gdk, GObject
+    except ImportError:
+        return (False, "Requires pygobject to be installed.")
+
+    return (True, "version %s.%s.%s" % (
+        Gtk.get_major_version(),
+        Gtk.get_micro_version(),
+        Gtk.get_minor_version()))
+
+
+class BackendGtk3Cairo(OptionalBackendPackage):
+    name = "gtk3cairo"
+
+    def check(self):
+        if 'TRAVIS' in os.environ:
+            raise CheckFailed("Can't build with Travis")
+
+        # This check needs to be performed out-of-process, because
+        # importing gi and then importing regular old pygtk afterward
+        # segfaults the interpreter.
+        p = multiprocessing.Pool()
+        success, msg = p.map(backend_gtk3cairo_internal_check, [0])[0]
+        p.close()
+        p.join()
+        if success:
+            BackendAgg.force = True
+
+            return msg
         else:
-            gotit = True
+            raise CheckFailed(msg)
+
+    def get_package_data(self):
+        return {'matplotlib': ['mpl-data/*.glade']}
 
-    if gotit:
-        module = make_extension('test', [])
+
+class BackendWxAgg(OptionalBackendPackage):
+    name = "wxagg"
+
+    def check(self):
         try:
-            explanation = add_tk_flags(module)
-        # except RuntimeError:
-        #     # This deals with the change in exception handling syntax in
-        #     # python 3. If we only need to support >= 2.6, we can just use the
-        #     # commented out lines below.
-        #     exc_type,exc,tb = sys.exc_info()
-        #     explanation = str(exc)
-        #     gotit = False
-        except RuntimeError as e:
-            explanation = str(e)
-        else:
-            if not find_include_file(module.include_dirs, "tk.h"):
-                message = 'Tkinter present, but header files are not found. ' + \
-                          'You may need to install development packages.'
-                if explanation is not None:
-                    explanation += '\n' + message
-                else:
-                    explanation = message
-                gotit = False
+            import wxversion
+        except ImportError:
+            raise CheckFailed("requires wxPython")
 
-    if gotit:
         try:
-            tk_v = Tkinter.__version__.split()[-2]
-        except (AttributeError, IndexError):
-            # Tkinter.__version__ has been removed in python 3
-            tk_v = 'version not identified'
-        print_status("Tkinter", "Tkinter: %s, Tk: %s, Tcl: %s" %
-                     (tk_v, Tkinter.TkVersion, Tkinter.TclVersion))
-    else:
-        print_status("Tkinter", "no")
-    if explanation is not None:
-        print_message(explanation)
-    return gotit
-
-def check_for_macosx():
-    gotit = False
-    import sys
-    if sys.platform=='darwin':
-        gotit = True
-    if gotit:
-        print_status("Mac OS X native", "yes")
-    else:
-        print_status("Mac OS X native", "no")
-    return gotit
-
-def query_tcltk():
-    """Tries to open a Tk window in order to query the Tk object about its library paths.
-       This should never be called more than once by the same process, as Tk intricacies
-       may cause the Python interpreter to hang. The function also has a workaround if
-       no X server is running (useful for autobuild systems)."""
-    global TCL_TK_CACHE
-    # Use cached values if they exist, which ensures this function only executes once
-    if TCL_TK_CACHE is not None:
-        return TCL_TK_CACHE
-
-    # By this point, we already know that Tkinter imports correctly
-    if sys.version_info[0] < 3:
-        import Tkinter
-    else:
-        import tkinter as Tkinter
-    tcl_lib_dir = ''
-    tk_lib_dir = ''
-    # First try to open a Tk window (requires a running X server)
-    try:
-        tk = Tkinter.Tk()
-    except Tkinter.TclError:
-        # Next, start Tcl interpreter without opening a Tk window (no need for X server)
-        # This feature is available in python version 2.4 and up
+            _wx_ensure_failed = wxversion.AlreadyImportedError
+        except AttributeError:
+            _wx_ensure_failed = wxversion.VersionError
+
         try:
-            tcl = Tkinter.Tcl()
-        except AttributeError:    # Python version not high enough
-            pass
-        except Tkinter.TclError:  # Something went wrong while opening Tcl
+            wxversion.ensureMinimal('2.8')
+        except _wx_ensure_failed:
             pass
+
+        try:
+            import wx
+            backend_version = wx.VERSION_STRING
+        except ImportError:
+            raise CheckFailed("requires wxPython")
+
+        # Extra version check in case wxversion lacks AlreadyImportedError;
+        # then VersionError might have been raised and ignored when
+        # there really *is* a problem with the version.
+        major, minor = [int(n) for n in backend_version.split('.')[:2]]
+        if major < 2 or (major < 3 and minor < 8):
+            raise CheckFailed(
+                "Requires wxPython 2.8, found %s" % backend_version)
+
+        BackendAgg.force = True
+
+        return "version %s" % backend_version
+
+
+class BackendMacOSX(OptionalBackendPackage):
+    name = 'macosx'
+
+    def check(self):
+        if self.get_config() is False:
+            raise CheckFailed("skipping due to configuration")
+
+        if sys.platform != 'darwin':
+            raise CheckFailed("Mac OS-X only")
+
+    def get_package_data(self):
+        return {'matplotlib': ['backends/Matplotlib.nib/*']}
+
+    def get_extension(self):
+        sources = [
+            'src/_macosx.m',
+            'src/agg_py_transforms.cpp',
+            'src/path_cleanup.cpp'
+            ]
+
+        ext = make_extension('matplotlib.backends._macosx', sources)
+        Numpy().add_flags(ext)
+        LibAgg().add_flags(ext)
+        CXX().add_flags(ext)
+        return ext
+
+
+class Windowing(OptionalBackendPackage):
+    """
+    Builds the windowing extension.
+    """
+    name = "windowing"
+
+    def check(self):
+        if sys.platform != 'win32':
+            raise CheckFailed("Microsoft Windows only")
+        config = self.get_config()
+        if config is False:
+            raise CheckFailed("skipping due to configuration")
+        return "installing"
+
+    def get_extension(self):
+        sources = [
+            "src/_windowing.cpp"
+            ]
+        ext = make_extension('matplotlib._windowing', sources)
+        ext.include_dirs.extend(['C:/include'])
+        ext.libraries.extend(['user32'])
+        ext.library_dirs.extend(['C:/lib'])
+        ext.extra_link_args.append("-mwindows")
+        return ext
+
+
+class BackendQt(OptionalBackendPackage):
+    name = "qtagg"
+
+    def convert_qt_version(self, version):
+        version = '%x' % version
+        temp = []
+        while len(version) > 0:
+            version, chunk = version[:-2], version[-2:]
+            temp.insert(0, str(int(chunk, 16)))
+        return '.'.join(temp)
+
+    def check(self):
+        try:
+            import pyqtconfig
+        except ImportError:
+            raise CheckFailed("not found")
         else:
-            tcl_lib_dir = str(tcl.getvar('tcl_library'))
-            # Guess Tk location based on Tcl location
-            (head, tail) = os.path.split(tcl_lib_dir)
-            tail = tail.replace('Tcl', 'Tk').replace('tcl', 'tk')
-            tk_lib_dir = os.path.join(head, tail)
-            if not os.path.exists(tk_lib_dir):
-                tk_lib_dir = tcl_lib_dir.replace('Tcl', 'Tk').replace('tcl', 'tk')
-    else:
-        # Obtain Tcl and Tk locations from Tk widget
-        tk.withdraw()
-        tcl_lib_dir = str(tk.getvar('tcl_library'))
-        tk_lib_dir = str(tk.getvar('tk_library'))
-        tk.destroy()
+            try:
+                qt_version = pyqtconfig.Configuration().qt_version
+                qt_version = self.convert_qt_version(qt_version)
+            except AttributeError:
+                qt_version = ""
 
-    # Save directories and version string to cache
-    TCL_TK_CACHE = tcl_lib_dir, tk_lib_dir, str(Tkinter.TkVersion)[:3]
-    return TCL_TK_CACHE
+            BackendAgg.force = True
 
-def parse_tcl_config(tcl_lib_dir, tk_lib_dir):
-    try:
-        if sys.version_info[0] < 3:
-            import Tkinter
+            return ("Qt: %s, PyQt: %s" %
+                    (qt_version,
+                     pyqtconfig.Configuration().pyqt_version_str))
+
+
+class BackendQt4(OptionalBackendPackage):
+    name = "qt4agg"
+
+    def convert_qt_version(self, version):
+        version = '%x' % version
+        temp = []
+        while len(version) > 0:
+            version, chunk = version[:-2], version[-2:]
+            temp.insert(0, str(int(chunk, 16)))
+        return '.'.join(temp)
+
+    def check(self):
+        try:
+            from PyQt4 import pyqtconfig
+        except ImportError:
+            raise CheckFailed("not found")
         else:
-            import tkinter as Tkinter
-    except ImportError:
-        return None
 
-    tcl_poss = [tcl_lib_dir,
-                os.path.normpath(os.path.join(tcl_lib_dir, '..')),
-                "/usr/lib/tcl"+str(Tkinter.TclVersion),
-                "/usr/lib"]
-    tk_poss = [tk_lib_dir,
-                os.path.normpath(os.path.join(tk_lib_dir, '..')),
-               "/usr/lib/tk"+str(Tkinter.TkVersion),
-               "/usr/lib"]
-    for ptcl, ptk in zip(tcl_poss, tk_poss):
-        tcl_config = os.path.join(ptcl, "tclConfig.sh")
-        tk_config = os.path.join(ptk, "tkConfig.sh")
-        if (os.path.exists(tcl_config) and os.path.exists(tk_config)):
-            break
-    if not (os.path.exists(tcl_config) and os.path.exists(tk_config)):
-        return None
+            BackendAgg.force = True
 
-    def get_var(file, varname):
-        p = subprocess.Popen(
-            '. %s ; eval echo ${%s}' % (file, varname),
-            shell=True,
-            executable="/bin/sh",
-            stdout=subprocess.PIPE)
-        result = p.communicate()[0]
-        return result.decode('ascii')
-
-    tcl_lib_dir = get_var(tcl_config, 'TCL_LIB_SPEC').split()[0][2:].strip()
-    tcl_inc_dir = get_var(tcl_config, 'TCL_INCLUDE_SPEC')[2:].strip()
-    tcl_lib = get_var(tcl_config, 'TCL_LIB_FLAG')[2:].strip()
-
-    tk_lib_dir = get_var(tk_config, 'TK_LIB_SPEC').split()[0][2:].strip()
-    tk_inc_dir = get_var(tk_config, 'TK_INCLUDE_SPEC').strip()
-    if tk_inc_dir == '':
-        tk_inc_dir = tcl_inc_dir
-    else:
-        tk_inc_dir = tk_inc_dir[2:]
-    tk_lib = get_var(tk_config, 'TK_LIB_FLAG')[2:].strip()
+            return ("Qt: %s, PyQt4: %s" %
+                    (self.convert_qt_version(
+                        pyqtconfig.Configuration().qt_version),
+                     pyqtconfig.Configuration().pyqt_version_str))
 
-    if not os.path.exists(os.path.join(tk_inc_dir, 'tk.h')):
-        return None
 
-    return tcl_lib_dir, tcl_inc_dir, tcl_lib, tk_lib_dir, tk_inc_dir, tk_lib
+class BackendPySide(OptionalBackendPackage):
+    name = "pyside"
 
-def guess_tcl_config(tcl_lib_dir, tk_lib_dir, tk_ver):
-    if not (os.path.exists(tcl_lib_dir) and os.path.exists(tk_lib_dir)):
-        return None
+    def check(self):
+        try:
+            from PySide import __version__
+            from PySide import QtCore
+        except ImportError:
+            raise CheckFailed("not found")
+        else:
+            BackendAgg.force = True
 
-    tcl_lib = os.path.normpath(os.path.join(tcl_lib_dir, '../'))
-    tk_lib = os.path.normpath(os.path.join(tk_lib_dir, '../'))
-
-    tcl_inc = os.path.normpath(os.path.join(tcl_lib_dir,
-                                                    '../../include/tcl' + tk_ver))
-    if not os.path.exists(tcl_inc):
-        tcl_inc = os.path.normpath(os.path.join(tcl_lib_dir,
-                                                '../../include'))
-
-    tk_inc = os.path.normpath(os.path.join(tk_lib_dir,
-                                           '../../include/tk' + tk_ver))
-    if not os.path.exists(tk_inc):
-        tk_inc = os.path.normpath(os.path.join(tk_lib_dir,
-                                                       '../../include'))
-
-    if not os.path.exists(os.path.join(tk_inc, 'tk.h')):
-        tk_inc = tcl_inc
-
-    if not os.path.exists(tcl_inc):
-        # this is a hack for suse linux, which is broken
-        if (sys.platform.startswith('linux') and
-            os.path.exists('/usr/include/tcl.h') and
-            os.path.exists('/usr/include/tk.h')):
-            tcl_inc = '/usr/include'
-            tk_inc = '/usr/include'
-
-    if not os.path.exists(os.path.join(tk_inc, 'tk.h')):
-        return None
+            return ("Qt: %s, PySide: %s" %
+                    (QtCore.__version__, __version__))
 
-    return tcl_lib, tcl_inc, 'tcl' + tk_ver, tk_lib, tk_inc, 'tk' + tk_ver
 
-def hardcoded_tcl_config():
-    tcl_inc = "/usr/local/include"
-    tk_inc = "/usr/local/include"
-    tcl_lib = "/usr/local/lib"
-    tk_lib = "/usr/local/lib"
-    return tcl_lib, tcl_inc, 'tcl', tk_lib, tk_inc, 'tk'
+class BackendCairo(OptionalBackendPackage):
+    name = "cairo"
 
-def add_tk_flags(module):
-    'Add the module flags to build extensions which use tk'
-    message = None
-    if sys.platform == 'win32':
-        major, minor1, minor2, s, tmp = sys.version_info
-        if (2, 6) <= (major, minor1) <= (3, 3):
-            module.include_dirs.extend(['win32_static/include/tcl85'])
-            module.libraries.extend(['tk85', 'tcl85'])
-        elif major == 2 and minor1 in [3, 4, 5]:
-            module.include_dirs.extend(['win32_static/include/tcl84'])
-            module.libraries.extend(['tk84', 'tcl84'])
-        elif major == 2 and minor1 == 2:
-            module.include_dirs.extend(['win32_static/include/tcl83'])
-            module.libraries.extend(['tk83', 'tcl83'])
+    def check(self):
+        try:
+            import cairo
+        except ImportError:
+            raise CheckFailed("not found")
         else:
-            raise RuntimeError('No tk/win32 support for this python version yet')
-        module.library_dirs.extend([os.path.join(sys.prefix, 'dlls')])
-
-    elif sys.platform == 'darwin':
-        # this config section lifted directly from Imaging - thanks to
-        # the effbot!
-
-        # First test for a MacOSX/darwin framework install
-        from os.path import join, exists
-        framework_dirs = [
-            join(os.getenv('HOME'), '/Library/Frameworks'),
-            '/Library/Frameworks',
-            '/System/Library/Frameworks/',
-        ]
-
-        # Find the directory that contains the Tcl.framework and Tk.framework
-        # bundles.
-        # XXX distutils should support -F!
-        tk_framework_found = 0
-        for F in framework_dirs:
-            # both Tcl.framework and Tk.framework should be present
-            for fw in 'Tcl', 'Tk':
-                if not exists(join(F, fw + '.framework')):
-                    break
+            return "version %s" % cairo.version
+
+
+class DviPng(SetupPackage):
+    name = "dvipng"
+    optional = True
+
+    def check(self):
+        try:
+            stdin, stdout = run_child_process('dvipng -version')
+            return "version %s" % stdout.readlines()[1].decode().split()[-1]
+        except (IndexError, ValueError):
+            raise CheckFailed()
+
+
+class Ghostscript(SetupPackage):
+    name = "ghostscript"
+    optional = True
+
+    def check(self):
+        try:
+            if sys.platform == 'win32':
+                command = 'gswin32c --version'
             else:
-                # ok, F is now directory with both frameworks. Continue
-                # building
-                tk_framework_found = 1
-                break
-        if tk_framework_found:
-            # For 8.4a2, we must add -I options that point inside the Tcl and Tk
-            # frameworks. In later release we should hopefully be able to pass
-            # the -F option to gcc, which specifies a framework lookup path.
-            #
-            tk_include_dirs = [
-                join(F, fw + '.framework', H)
-                for fw in ('Tcl', 'Tk')
-                for H in ('Headers', 'Versions/Current/PrivateHeaders')
-            ]
+                command = 'gs --version'
+            stdin, stdout = run_child_process(command)
+            return "version %s" % stdout.read().decode()[:-1]
+        except (IndexError, ValueError):
+            raise CheckFailed()
 
-            # For 8.4a2, the X11 headers are not included. Rather than include a
-            # complicated search, this is a hard-coded path. It could bail out
-            # if X11 libs are not found...
-            # tk_include_dirs.append('/usr/X11R6/include')
-            frameworks = ['-framework', 'Tcl', '-framework', 'Tk']
-            module.include_dirs.extend(tk_include_dirs)
-            module.extra_link_args.extend(frameworks)
-            module.extra_compile_args.extend(frameworks)
 
-    # you're still here? ok we'll try it this way...
-    else:
-        success = False
-        # There are 3 methods to try, in decreasing order of "smartness"
-        #
-        #   1. Parse the tclConfig.sh and tkConfig.sh files that have
-        #      all the information we need
-        #
-        #   2. Guess the include and lib dirs based on the location of
-        #      Tkinter's 'tcl_library' and 'tk_library' variables.
-        #
-        #   3. Use some hardcoded locations that seem to work on a lot
-        #      of distros.
-
-        # Query Tcl/Tk system for library paths and version string
+class LaTeX(SetupPackage):
+    name = "latex"
+    optional = True
+
+    def check(self):
         try:
-            tcl_lib_dir, tk_lib_dir, tk_ver = query_tcltk()
-        except:
-            tk_ver = ''
-            result = hardcoded_tcl_config()
-        else:
-            result = parse_tcl_config(tcl_lib_dir, tk_lib_dir)
-            if result is None:
-                message = """\
-Guessing the library and include directories for Tcl and Tk because the
-tclConfig.sh and tkConfig.sh could not be found and/or parsed."""
-                result = guess_tcl_config(tcl_lib_dir, tk_lib_dir, tk_ver)
-                if result is None:
-                    message = """\
-Using default library and include directories for Tcl and Tk because a
-Tk window failed to open.  You may need to define DISPLAY for Tk to work
-so that setup can determine where your libraries are located."""
-                    result = hardcoded_tcl_config()
-
-        # Add final versions of directories and libraries to module lists
-        tcl_lib_dir, tcl_inc_dir, tcl_lib, tk_lib_dir, tk_inc_dir, tk_lib = result
-        module.include_dirs.extend([tcl_inc_dir, tk_inc_dir])
-        module.library_dirs.extend([tcl_lib_dir, tk_lib_dir])
-        module.libraries.extend([tcl_lib, tk_lib])
-
-    return message
-
-def add_windowing_flags(module):
-    'Add the module flags to build extensions using windowing api'
-    module.include_dirs.extend(['C:/include'])
-    module.libraries.extend(['user32'])
-    module.library_dirs.extend(['C:/lib'])
-    module.extra_link_args.append("-mwindows")
-
-def build_windowing(ext_modules, packages):
-    """windowing is optional and provides functions for managing
-       windows better, .e.g.  maintaining focus on win32"""
-    global BUILT_WINDOWING
-    if BUILT_WINDOWING: return # only build it if you you haven't already
-    module = make_extension('matplotlib._windowing',
-                       ['src/_windowing.cpp'],
-                       )
-    add_windowing_flags(module)
-    ext_modules.append(module)
-    BUILT_WINDOWING = True
-
-def build_ft2font(ext_modules, packages):
-    global BUILT_FT2FONT
-    if BUILT_FT2FONT: return # only build it if you you haven't already
-    deps = ['src/ft2font.cpp', 'src/mplutils.cpp']
-    deps.extend(glob.glob('CXX/*.cxx'))
-    deps.extend(glob.glob('CXX/*.c'))
-
-    module = make_extension('matplotlib.ft2font', deps,
-                       define_macros=defines)
-    add_ft2font_flags(module)
-    ext_modules.append(module)
-    BUILT_FT2FONT = True
-
-def build_ttconv(ext_modules, packages):
-    global BUILT_TTCONV
-    if BUILT_TTCONV: return # only build it if you you haven't already
-    deps = ['src/_ttconv.cpp',
-            'ttconv/pprdrv_tt.cpp',
-            'ttconv/pprdrv_tt2.cpp',
-            'ttconv/ttutil.cpp']
-
-    module = make_extension('matplotlib.ttconv', deps,
-                       define_macros=defines)
-    add_base_flags(module)
-    ext_modules.append(module)
-    BUILT_TTCONV = True
-
-def build_gtkagg(ext_modules, packages):
-    global BUILT_GTKAGG
-    if BUILT_GTKAGG: return # only build it if you you haven't already
-    deps = ['src/agg_py_transforms.cpp', 'src/_gtkagg.cpp', 'src/mplutils.cpp']
-    deps.extend(glob.glob('CXX/*.cxx'))
-    deps.extend(glob.glob('CXX/*.c'))
-
-    module = make_extension('matplotlib.backends._gtkagg',
-                       deps,
-                       define_macros=defines
-                       )
-
-    # add agg flags before pygtk because agg only supports freetype1
-    # and pygtk includes freetype2.  This is a bit fragile.
-
-    add_agg_flags(module)
-    add_ft2font_flags(module)
-    add_pygtk_flags(module)
-    add_numpy_flags(module)
-
-    ext_modules.append(module)
-    BUILT_GTKAGG = True
-
-def build_tkagg(ext_modules, packages):
-    global BUILT_TKAGG
-    if BUILT_TKAGG: return # only build it if you you haven't already
-    deps = ['src/agg_py_transforms.cpp', 'src/_tkagg.cpp']
-    deps.extend(glob.glob('CXX/*.cxx'))
-    deps.extend(glob.glob('CXX/*.c'))
-
-    module = make_extension('matplotlib.backends._tkagg',
-                       deps,
-                       define_macros=defines
-                       )
-
-    add_tk_flags(module) # do this first
-    add_agg_flags(module)
-    add_ft2font_flags(module)
-
-    ext_modules.append(module)
-    BUILT_TKAGG = True
-
-
-def build_macosx(ext_modules, packages):
-    global BUILT_MACOSX
-    if BUILT_MACOSX: return # only build it if you you haven't already
-    deps = ['src/_macosx.m',
-            'CXX/cxx_extensions.cxx',
-            'CXX/cxxextensions.c',
-            'CXX/cxxsupport.cxx',
-            'CXX/IndirectPythonInterface.cxx',
-            'src/agg_py_transforms.cpp',
-            'src/path_cleanup.cpp']
-    module = make_extension('matplotlib.backends._macosx',
-                       deps,
-                       extra_link_args = ['-framework','Cocoa'],
-                       define_macros=defines
-                      )
-    add_numpy_flags(module)
-    add_agg_flags(module)
-    ext_modules.append(module)
-    BUILT_MACOSX = True
-
-def build_png(ext_modules, packages):
-    global BUILT_PNG
-    if BUILT_PNG: return # only build it if you you haven't already
-
-    deps = ['src/_png.cpp', 'src/mplutils.cpp']
-    deps.extend(glob.glob('CXX/*.cxx'))
-    deps.extend(glob.glob('CXX/*.c'))
-
-    module = make_extension(
-        'matplotlib._png',
-        deps,
-        include_dirs=numpy_inc_dirs,
-        define_macros=defines
-        )
-
-    add_png_flags(module)
-    ext_modules.append(module)
-
-    BUILT_PNG = True
-
-
-def build_agg(ext_modules, packages):
-    global BUILT_AGG
-    if BUILT_AGG: return # only build it if you you haven't already
-
-    agg = (
-           'agg_trans_affine.cpp',
-           'agg_bezier_arc.cpp',
-           'agg_curves.cpp',
-           'agg_vcgen_dash.cpp',
-           'agg_vcgen_stroke.cpp',
-           'agg_image_filters.cpp',
-           )
-
-    deps = ['%s/src/%s'%(AGG_VERSION, name) for name in agg]
-    deps.extend(['src/mplutils.cpp', 'src/agg_py_transforms.cpp'])
-    deps.extend(glob.glob('CXX/*.cxx'))
-    deps.extend(glob.glob('CXX/*.c'))
-    temp_copy('src/_backend_agg.cpp', 'src/backend_agg.cpp')
-    deps.append('src/backend_agg.cpp')
-    module = make_extension(
-        'matplotlib.backends._backend_agg',
-        deps,
-        include_dirs=numpy_inc_dirs,
-        define_macros=defines
-        )
-
-    add_numpy_flags(module)
-    add_agg_flags(module)
-    add_ft2font_flags(module)
-    ext_modules.append(module)
-
-    BUILT_AGG = True
-
-def build_path(ext_modules, packages):
-    global BUILT_PATH
-    if BUILT_PATH: return # only build it if you you haven't already
-
-    agg = (
-           'agg_vcgen_contour.cpp',
-           'agg_curves.cpp',
-           'agg_bezier_arc.cpp',
-           'agg_trans_affine.cpp',
-           'agg_vcgen_stroke.cpp',
-           )
-
-    deps = ['%s/src/%s'%(AGG_VERSION, name) for name in agg]
-    deps.extend(glob.glob('CXX/*.cxx'))
-    deps.extend(glob.glob('CXX/*.c'))
-
-    temp_copy('src/_path.cpp', 'src/path.cpp')
-    deps.extend(['src/agg_py_transforms.cpp',
-                 'src/path_cleanup.cpp',
-                 'src/path.cpp'])
-    module = make_extension(
-        'matplotlib._path',
-        deps,
-        include_dirs=numpy_inc_dirs,
-        define_macros=defines
-        )
-
-    add_numpy_flags(module)
-
-    add_agg_flags(module)
-    ext_modules.append(module)
-
-    BUILT_PATH = True
-
-def build_image(ext_modules, packages):
-    global BUILT_IMAGE
-    if BUILT_IMAGE: return # only build it if you you haven't already
-
-    agg = ('agg_trans_affine.cpp',
-           'agg_image_filters.cpp',
-           'agg_bezier_arc.cpp',
-           )
-
-    temp_copy('src/_image.cpp', 'src/image.cpp')
-    deps = ['src/image.cpp', 'src/mplutils.cpp']
-    deps.extend(['%s/src/%s'%(AGG_VERSION,name) for name in agg])
-    deps.extend(glob.glob('CXX/*.cxx'))
-    deps.extend(glob.glob('CXX/*.c'))
-
-    module = make_extension(
-        'matplotlib._image',
-        deps,
-        include_dirs=numpy_inc_dirs,
-        define_macros=defines
-        )
-
-    add_numpy_flags(module)
-    add_agg_flags(module)
-    ext_modules.append(module)
-
-    BUILT_IMAGE = True
-
-
-
-def build_delaunay(ext_modules, packages):
-    global BUILT_DELAUNAY
-    if BUILT_DELAUNAY:
-        return # only build it if you you haven't already
-
-    sourcefiles=["_delaunay.cpp", "VoronoiDiagramGenerator.cpp",
-                 "delaunay_utils.cpp", "natneighbors.cpp"]
-    sourcefiles = [os.path.join('lib/matplotlib/delaunay',s) for s in sourcefiles]
-    delaunay = make_extension('matplotlib._delaunay',sourcefiles,
-                         include_dirs=numpy_inc_dirs,
-                         define_macros=defines
-                         )
-    add_numpy_flags(delaunay)
-    add_base_flags(delaunay)
-    ext_modules.append(delaunay)
-    packages.extend(['matplotlib.delaunay'])
-    BUILT_DELAUNAY = True
-
-
-def build_contour(ext_modules, packages):
-    global BUILT_CONTOUR
-    if BUILT_CONTOUR: return # only build it if you you haven't already
-
-    module = make_extension(
-        'matplotlib._cntr',
-        [ 'src/cntr.c'],
-        include_dirs=numpy_inc_dirs,
-        define_macros=defines
-        )
-    add_numpy_flags(module)
-    add_base_flags(module)
-    ext_modules.append(module)
-
-    BUILT_CONTOUR = True
-
-
-def build_gdk(ext_modules, packages):
-    global BUILT_GDK
-    if BUILT_GDK: return # only build it if you you haven't already
-
-    temp_copy('src/_backend_gdk.c', 'src/backend_gdk.c')
-    module = make_extension(
-        'matplotlib.backends._backend_gdk',
-        ['src/backend_gdk.c'],
-        libraries = [],
-        include_dirs=numpy_inc_dirs,
-        define_macros=defines
-        )
-
-    add_numpy_flags(module)
-    add_base_flags(module)
-    add_pygtk_flags(module)
-    ext_modules.append(module)
-
-    BUILT_GDK = True
-
-
-def build_tri(ext_modules, packages):
-    global BUILT_TRI
-    if BUILT_TRI: return # only build it if you you haven't already
-
-    deps = ['lib/matplotlib/tri/_tri.cpp', 'src/mplutils.cpp']
-    deps.extend(glob.glob('CXX/*.cxx'))
-    deps.extend(glob.glob('CXX/*.c'))
-
-    module = make_extension('matplotlib._tri', deps,
-                       define_macros=defines)
-    add_numpy_flags(module)
-    add_base_flags(module)
-    ext_modules.append(module)
-    BUILT_TRI = True
+            stdin, stdout = run_child_process('latex -version')
+            line = stdout.readlines()[0].decode()
+            pattern = '(3\.1\d+)|(MiKTeX \d+.\d+)'
+            match = re.search(pattern, line)
+            return "version %s" % match.group(0)
+        except (IndexError, ValueError, AttributeError):
+            raise CheckFailed()
+
+
+class PdfToPs(SetupPackage):
+    name = "pdftops"
+    optional = True
+
+    def check(self):
+        try:
+            stdin, stdout = run_child_process('pdftops -v')
+            for line in stdout.readlines():
+                line = line.decode()
+                if 'version' in line:
+                    return "version %s" % line.split()[2]
+        except (IndexError, ValueError):
+            pass
+
+        raise CheckFailed()
diff --git a/src/_backend_agg.cpp b/src/_backend_agg.cpp
index 42089f9d5be4..7ebc34843eb2 100644
--- a/src/_backend_agg.cpp
+++ b/src/_backend_agg.cpp
@@ -6,6 +6,7 @@
 /* Python API mandates Python.h is included *first* */
 #include "Python.h"
 
+/* TODO: Remove this dependency */
 #include "ft2font.h"
 #include "_image.h"
 #include "_backend_agg.h"
@@ -618,7 +619,11 @@ RendererAgg::render_clippath(const Py::Object& clippath,
         rendererBaseAlphaMask.clear(agg::gray8(0, 0));
         transformed_path_t transformed_clippath(clippath_iter, trans);
         agg::conv_curve curved_clippath(transformed_clippath);
-        theRasterizer.add_path(curved_clippath);
+        try {
+            theRasterizer.add_path(curved_clippath);
+        } catch (std::overflow_error &e) {
+            throw Py::OverflowError(e.what());
+        }
         rendererAlphaMask.color(agg::gray8(255, 255));
         agg::render_scanlines(theRasterizer, scanlineAlphaMask, rendererAlphaMask);
         lastclippath = clippath;
@@ -697,7 +702,11 @@ RendererAgg::draw_markers(const Py::Tuple& args)
         unsigned fillSize = 0;
         if (face.first)
         {
-            theRasterizer.add_path(marker_path_curve);
+            try {
+                theRasterizer.add_path(marker_path_curve);
+            } catch (std::overflow_error &e) {
+                throw Py::OverflowError(e.what());
+            }
             agg::render_scanlines(theRasterizer, slineP8, scanlines);
             fillSize = scanlines.byte_size();
             if (fillSize >= MARKER_CACHE_SIZE)
@@ -712,7 +721,11 @@ RendererAgg::draw_markers(const Py::Tuple& args)
         stroke.line_cap(gc.cap);
         stroke.line_join(gc.join);
         theRasterizer.reset();
-        theRasterizer.add_path(stroke);
+        try {
+            theRasterizer.add_path(stroke);
+        } catch (std::overflow_error &e) {
+            throw Py::OverflowError(e.what());
+        }
         agg::render_scanlines(theRasterizer, slineP8, scanlines);
         unsigned strokeSize = scanlines.byte_size();
         if (strokeSize >= MARKER_CACHE_SIZE)
@@ -979,7 +992,11 @@ RendererAgg::draw_text_image(const Py::Tuple& args)
     span_gen_type output_span_generator(&image_span_generator, gc.color);
     renderer_type ri(rendererBase, sa, output_span_generator);
 
-    theRasterizer.add_path(rect2);
+    try {
+        theRasterizer.add_path(rect2);
+    } catch (std::overflow_error &e) {
+        throw Py::OverflowError(e.what());
+    }
     agg::render_scanlines(theRasterizer, slineP8, ri);
 
     return Py::Object();
@@ -1116,7 +1133,11 @@ RendererAgg::draw_image(const Py::Tuple& args)
             amask_ren_type r(pfa);
             renderer_type_alpha ri(r, sa, spans);
 
-            theRasterizer.add_path(rect2);
+            try {
+                theRasterizer.add_path(rect2);
+            } catch (std::overflow_error &e) {
+                throw Py::OverflowError(e.what());
+            }
             agg::render_scanlines(theRasterizer, slineP8, ri);
         }
         else
@@ -1130,7 +1151,11 @@ RendererAgg::draw_image(const Py::Tuple& args)
             ren_type r(pixFmt);
             renderer_type ri(r, sa, spans);
 
-            theRasterizer.add_path(rect2);
+            try {
+                theRasterizer.add_path(rect2);
+            } catch (std::overflow_error &e) {
+                throw Py::OverflowError(e.what());
+            }
             agg::render_scanlines(theRasterizer, slineP8, ri);
         }
 
@@ -1165,7 +1190,11 @@ void RendererAgg::_draw_path(path_t& path, bool has_clippath,
     // Render face
     if (face.first)
     {
-        theRasterizer.add_path(path);
+        try {
+            theRasterizer.add_path(path);
+        } catch (std::overflow_error &e) {
+            throw Py::OverflowError(e.what());
+        }
 
         if (gc.isaa)
         {
@@ -1232,9 +1261,17 @@ void RendererAgg::_draw_path(path_t& path, bool has_clippath,
         rb.clear(agg::rgba(0.0, 0.0, 0.0, 0.0));
         rs.color(gc.color);
 
-        theRasterizer.add_path(hatch_path_curve);
+        try {
+            theRasterizer.add_path(hatch_path_curve);
+        } catch (std::overflow_error &e) {
+            throw Py::OverflowError(e.what());
+        }
         agg::render_scanlines(theRasterizer, slineP8, rs);
-        theRasterizer.add_path(hatch_path_stroke);
+        try {
+            theRasterizer.add_path(hatch_path_stroke);
+        } catch (std::overflow_error &e) {
+            throw Py::OverflowError(e.what());
+        }
         agg::render_scanlines(theRasterizer, slineP8, rs);
 
         // Put clipping back on, if originally set on entry to this
@@ -1251,7 +1288,11 @@ void RendererAgg::_draw_path(path_t& path, bool has_clippath,
         agg::span_allocator sa;
         img_source_type img_src(hatch_img_pixf);
         span_gen_type sg(img_src, 0, 0);
-        theRasterizer.add_path(path);
+        try {
+            theRasterizer.add_path(path);
+        } catch (std::overflow_error &e) {
+            throw Py::OverflowError(e.what());
+        }
         agg::render_scanlines_aa(theRasterizer, slineP8, rendererBase, sa, sg);
     }
 
@@ -1269,7 +1310,11 @@ void RendererAgg::_draw_path(path_t& path, bool has_clippath,
             stroke.width(linewidth);
             stroke.line_cap(gc.cap);
             stroke.line_join(gc.join);
-            theRasterizer.add_path(stroke);
+            try {
+                theRasterizer.add_path(stroke);
+            } catch (std::overflow_error &e) {
+                throw Py::OverflowError(e.what());
+            }
         }
         else
         {
@@ -1290,7 +1335,11 @@ void RendererAgg::_draw_path(path_t& path, bool has_clippath,
             stroke.line_cap(gc.cap);
             stroke.line_join(gc.join);
             stroke.width(linewidth);
-            theRasterizer.add_path(stroke);
+            try {
+                theRasterizer.add_path(stroke);
+            } catch (std::overflow_error &e) {
+                throw Py::OverflowError(e.what());
+            }
         }
 
         if (gc.isaa)
@@ -1887,7 +1936,12 @@ RendererAgg::_draw_gouraud_triangle(const double* points,
         tpoints[4], tpoints[5],
         0.5);
 
-    theRasterizer.add_path(span_gen);
+    try {
+        theRasterizer.add_path(span_gen);
+    } catch (std::overflow_error &e) {
+        throw Py::OverflowError(e.what()
+                                );
+    }
 
     if (has_clippath)
     {