Created starter files for the project.
This commit is contained in:
commit
73f0c0db42
1992 changed files with 769897 additions and 0 deletions
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,36 @@
|
|||
"""
|
||||
This module provides means to detect the App Engine environment.
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
|
||||
def is_appengine():
|
||||
return is_local_appengine() or is_prod_appengine()
|
||||
|
||||
|
||||
def is_appengine_sandbox():
|
||||
"""Reports if the app is running in the first generation sandbox.
|
||||
|
||||
The second generation runtimes are technically still in a sandbox, but it
|
||||
is much less restrictive, so generally you shouldn't need to check for it.
|
||||
see https://cloud.google.com/appengine/docs/standard/runtimes
|
||||
"""
|
||||
return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27"
|
||||
|
||||
|
||||
def is_local_appengine():
|
||||
return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
|
||||
"SERVER_SOFTWARE", ""
|
||||
).startswith("Development/")
|
||||
|
||||
|
||||
def is_prod_appengine():
|
||||
return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
|
||||
"SERVER_SOFTWARE", ""
|
||||
).startswith("Google App Engine/")
|
||||
|
||||
|
||||
def is_prod_appengine_mvms():
|
||||
"""Deprecated."""
|
||||
return False
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,493 @@
|
|||
"""
|
||||
This module uses ctypes to bind a whole bunch of functions and constants from
|
||||
SecureTransport. The goal here is to provide the low-level API to
|
||||
SecureTransport. These are essentially the C-level functions and constants, and
|
||||
they're pretty gross to work with.
|
||||
|
||||
This code is a bastardised version of the code found in Will Bond's oscrypto
|
||||
library. An enormous debt is owed to him for blazing this trail for us. For
|
||||
that reason, this code should be considered to be covered both by urllib3's
|
||||
license and by oscrypto's:
|
||||
|
||||
Copyright (c) 2015-2016 Will Bond <will@wbond.net>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the "Software"),
|
||||
to deal in the Software without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import platform
|
||||
from ctypes.util import find_library
|
||||
from ctypes import (
|
||||
c_void_p,
|
||||
c_int32,
|
||||
c_char_p,
|
||||
c_size_t,
|
||||
c_byte,
|
||||
c_uint32,
|
||||
c_ulong,
|
||||
c_long,
|
||||
c_bool,
|
||||
)
|
||||
from ctypes import CDLL, POINTER, CFUNCTYPE
|
||||
|
||||
|
||||
security_path = find_library("Security")
|
||||
if not security_path:
|
||||
raise ImportError("The library Security could not be found")
|
||||
|
||||
|
||||
core_foundation_path = find_library("CoreFoundation")
|
||||
if not core_foundation_path:
|
||||
raise ImportError("The library CoreFoundation could not be found")
|
||||
|
||||
|
||||
version = platform.mac_ver()[0]
|
||||
version_info = tuple(map(int, version.split(".")))
|
||||
if version_info < (10, 8):
|
||||
raise OSError(
|
||||
"Only OS X 10.8 and newer are supported, not %s.%s"
|
||||
% (version_info[0], version_info[1])
|
||||
)
|
||||
|
||||
Security = CDLL(security_path, use_errno=True)
|
||||
CoreFoundation = CDLL(core_foundation_path, use_errno=True)
|
||||
|
||||
Boolean = c_bool
|
||||
CFIndex = c_long
|
||||
CFStringEncoding = c_uint32
|
||||
CFData = c_void_p
|
||||
CFString = c_void_p
|
||||
CFArray = c_void_p
|
||||
CFMutableArray = c_void_p
|
||||
CFDictionary = c_void_p
|
||||
CFError = c_void_p
|
||||
CFType = c_void_p
|
||||
CFTypeID = c_ulong
|
||||
|
||||
CFTypeRef = POINTER(CFType)
|
||||
CFAllocatorRef = c_void_p
|
||||
|
||||
OSStatus = c_int32
|
||||
|
||||
CFDataRef = POINTER(CFData)
|
||||
CFStringRef = POINTER(CFString)
|
||||
CFArrayRef = POINTER(CFArray)
|
||||
CFMutableArrayRef = POINTER(CFMutableArray)
|
||||
CFDictionaryRef = POINTER(CFDictionary)
|
||||
CFArrayCallBacks = c_void_p
|
||||
CFDictionaryKeyCallBacks = c_void_p
|
||||
CFDictionaryValueCallBacks = c_void_p
|
||||
|
||||
SecCertificateRef = POINTER(c_void_p)
|
||||
SecExternalFormat = c_uint32
|
||||
SecExternalItemType = c_uint32
|
||||
SecIdentityRef = POINTER(c_void_p)
|
||||
SecItemImportExportFlags = c_uint32
|
||||
SecItemImportExportKeyParameters = c_void_p
|
||||
SecKeychainRef = POINTER(c_void_p)
|
||||
SSLProtocol = c_uint32
|
||||
SSLCipherSuite = c_uint32
|
||||
SSLContextRef = POINTER(c_void_p)
|
||||
SecTrustRef = POINTER(c_void_p)
|
||||
SSLConnectionRef = c_uint32
|
||||
SecTrustResultType = c_uint32
|
||||
SecTrustOptionFlags = c_uint32
|
||||
SSLProtocolSide = c_uint32
|
||||
SSLConnectionType = c_uint32
|
||||
SSLSessionOption = c_uint32
|
||||
|
||||
|
||||
try:
|
||||
Security.SecItemImport.argtypes = [
|
||||
CFDataRef,
|
||||
CFStringRef,
|
||||
POINTER(SecExternalFormat),
|
||||
POINTER(SecExternalItemType),
|
||||
SecItemImportExportFlags,
|
||||
POINTER(SecItemImportExportKeyParameters),
|
||||
SecKeychainRef,
|
||||
POINTER(CFArrayRef),
|
||||
]
|
||||
Security.SecItemImport.restype = OSStatus
|
||||
|
||||
Security.SecCertificateGetTypeID.argtypes = []
|
||||
Security.SecCertificateGetTypeID.restype = CFTypeID
|
||||
|
||||
Security.SecIdentityGetTypeID.argtypes = []
|
||||
Security.SecIdentityGetTypeID.restype = CFTypeID
|
||||
|
||||
Security.SecKeyGetTypeID.argtypes = []
|
||||
Security.SecKeyGetTypeID.restype = CFTypeID
|
||||
|
||||
Security.SecCertificateCreateWithData.argtypes = [CFAllocatorRef, CFDataRef]
|
||||
Security.SecCertificateCreateWithData.restype = SecCertificateRef
|
||||
|
||||
Security.SecCertificateCopyData.argtypes = [SecCertificateRef]
|
||||
Security.SecCertificateCopyData.restype = CFDataRef
|
||||
|
||||
Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
|
||||
Security.SecCopyErrorMessageString.restype = CFStringRef
|
||||
|
||||
Security.SecIdentityCreateWithCertificate.argtypes = [
|
||||
CFTypeRef,
|
||||
SecCertificateRef,
|
||||
POINTER(SecIdentityRef),
|
||||
]
|
||||
Security.SecIdentityCreateWithCertificate.restype = OSStatus
|
||||
|
||||
Security.SecKeychainCreate.argtypes = [
|
||||
c_char_p,
|
||||
c_uint32,
|
||||
c_void_p,
|
||||
Boolean,
|
||||
c_void_p,
|
||||
POINTER(SecKeychainRef),
|
||||
]
|
||||
Security.SecKeychainCreate.restype = OSStatus
|
||||
|
||||
Security.SecKeychainDelete.argtypes = [SecKeychainRef]
|
||||
Security.SecKeychainDelete.restype = OSStatus
|
||||
|
||||
Security.SecPKCS12Import.argtypes = [
|
||||
CFDataRef,
|
||||
CFDictionaryRef,
|
||||
POINTER(CFArrayRef),
|
||||
]
|
||||
Security.SecPKCS12Import.restype = OSStatus
|
||||
|
||||
SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
|
||||
SSLWriteFunc = CFUNCTYPE(
|
||||
OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)
|
||||
)
|
||||
|
||||
Security.SSLSetIOFuncs.argtypes = [SSLContextRef, SSLReadFunc, SSLWriteFunc]
|
||||
Security.SSLSetIOFuncs.restype = OSStatus
|
||||
|
||||
Security.SSLSetPeerID.argtypes = [SSLContextRef, c_char_p, c_size_t]
|
||||
Security.SSLSetPeerID.restype = OSStatus
|
||||
|
||||
Security.SSLSetCertificate.argtypes = [SSLContextRef, CFArrayRef]
|
||||
Security.SSLSetCertificate.restype = OSStatus
|
||||
|
||||
Security.SSLSetCertificateAuthorities.argtypes = [SSLContextRef, CFTypeRef, Boolean]
|
||||
Security.SSLSetCertificateAuthorities.restype = OSStatus
|
||||
|
||||
Security.SSLSetConnection.argtypes = [SSLContextRef, SSLConnectionRef]
|
||||
Security.SSLSetConnection.restype = OSStatus
|
||||
|
||||
Security.SSLSetPeerDomainName.argtypes = [SSLContextRef, c_char_p, c_size_t]
|
||||
Security.SSLSetPeerDomainName.restype = OSStatus
|
||||
|
||||
Security.SSLHandshake.argtypes = [SSLContextRef]
|
||||
Security.SSLHandshake.restype = OSStatus
|
||||
|
||||
Security.SSLRead.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
|
||||
Security.SSLRead.restype = OSStatus
|
||||
|
||||
Security.SSLWrite.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
|
||||
Security.SSLWrite.restype = OSStatus
|
||||
|
||||
Security.SSLClose.argtypes = [SSLContextRef]
|
||||
Security.SSLClose.restype = OSStatus
|
||||
|
||||
Security.SSLGetNumberSupportedCiphers.argtypes = [SSLContextRef, POINTER(c_size_t)]
|
||||
Security.SSLGetNumberSupportedCiphers.restype = OSStatus
|
||||
|
||||
Security.SSLGetSupportedCiphers.argtypes = [
|
||||
SSLContextRef,
|
||||
POINTER(SSLCipherSuite),
|
||||
POINTER(c_size_t),
|
||||
]
|
||||
Security.SSLGetSupportedCiphers.restype = OSStatus
|
||||
|
||||
Security.SSLSetEnabledCiphers.argtypes = [
|
||||
SSLContextRef,
|
||||
POINTER(SSLCipherSuite),
|
||||
c_size_t,
|
||||
]
|
||||
Security.SSLSetEnabledCiphers.restype = OSStatus
|
||||
|
||||
Security.SSLGetNumberEnabledCiphers.argtype = [SSLContextRef, POINTER(c_size_t)]
|
||||
Security.SSLGetNumberEnabledCiphers.restype = OSStatus
|
||||
|
||||
Security.SSLGetEnabledCiphers.argtypes = [
|
||||
SSLContextRef,
|
||||
POINTER(SSLCipherSuite),
|
||||
POINTER(c_size_t),
|
||||
]
|
||||
Security.SSLGetEnabledCiphers.restype = OSStatus
|
||||
|
||||
Security.SSLGetNegotiatedCipher.argtypes = [SSLContextRef, POINTER(SSLCipherSuite)]
|
||||
Security.SSLGetNegotiatedCipher.restype = OSStatus
|
||||
|
||||
Security.SSLGetNegotiatedProtocolVersion.argtypes = [
|
||||
SSLContextRef,
|
||||
POINTER(SSLProtocol),
|
||||
]
|
||||
Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
|
||||
|
||||
Security.SSLCopyPeerTrust.argtypes = [SSLContextRef, POINTER(SecTrustRef)]
|
||||
Security.SSLCopyPeerTrust.restype = OSStatus
|
||||
|
||||
Security.SecTrustSetAnchorCertificates.argtypes = [SecTrustRef, CFArrayRef]
|
||||
Security.SecTrustSetAnchorCertificates.restype = OSStatus
|
||||
|
||||
Security.SecTrustSetAnchorCertificatesOnly.argstypes = [SecTrustRef, Boolean]
|
||||
Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
|
||||
|
||||
Security.SecTrustEvaluate.argtypes = [SecTrustRef, POINTER(SecTrustResultType)]
|
||||
Security.SecTrustEvaluate.restype = OSStatus
|
||||
|
||||
Security.SecTrustGetCertificateCount.argtypes = [SecTrustRef]
|
||||
Security.SecTrustGetCertificateCount.restype = CFIndex
|
||||
|
||||
Security.SecTrustGetCertificateAtIndex.argtypes = [SecTrustRef, CFIndex]
|
||||
Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
|
||||
|
||||
Security.SSLCreateContext.argtypes = [
|
||||
CFAllocatorRef,
|
||||
SSLProtocolSide,
|
||||
SSLConnectionType,
|
||||
]
|
||||
Security.SSLCreateContext.restype = SSLContextRef
|
||||
|
||||
Security.SSLSetSessionOption.argtypes = [SSLContextRef, SSLSessionOption, Boolean]
|
||||
Security.SSLSetSessionOption.restype = OSStatus
|
||||
|
||||
Security.SSLSetProtocolVersionMin.argtypes = [SSLContextRef, SSLProtocol]
|
||||
Security.SSLSetProtocolVersionMin.restype = OSStatus
|
||||
|
||||
Security.SSLSetProtocolVersionMax.argtypes = [SSLContextRef, SSLProtocol]
|
||||
Security.SSLSetProtocolVersionMax.restype = OSStatus
|
||||
|
||||
Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
|
||||
Security.SecCopyErrorMessageString.restype = CFStringRef
|
||||
|
||||
Security.SSLReadFunc = SSLReadFunc
|
||||
Security.SSLWriteFunc = SSLWriteFunc
|
||||
Security.SSLContextRef = SSLContextRef
|
||||
Security.SSLProtocol = SSLProtocol
|
||||
Security.SSLCipherSuite = SSLCipherSuite
|
||||
Security.SecIdentityRef = SecIdentityRef
|
||||
Security.SecKeychainRef = SecKeychainRef
|
||||
Security.SecTrustRef = SecTrustRef
|
||||
Security.SecTrustResultType = SecTrustResultType
|
||||
Security.SecExternalFormat = SecExternalFormat
|
||||
Security.OSStatus = OSStatus
|
||||
|
||||
Security.kSecImportExportPassphrase = CFStringRef.in_dll(
|
||||
Security, "kSecImportExportPassphrase"
|
||||
)
|
||||
Security.kSecImportItemIdentity = CFStringRef.in_dll(
|
||||
Security, "kSecImportItemIdentity"
|
||||
)
|
||||
|
||||
# CoreFoundation time!
|
||||
CoreFoundation.CFRetain.argtypes = [CFTypeRef]
|
||||
CoreFoundation.CFRetain.restype = CFTypeRef
|
||||
|
||||
CoreFoundation.CFRelease.argtypes = [CFTypeRef]
|
||||
CoreFoundation.CFRelease.restype = None
|
||||
|
||||
CoreFoundation.CFGetTypeID.argtypes = [CFTypeRef]
|
||||
CoreFoundation.CFGetTypeID.restype = CFTypeID
|
||||
|
||||
CoreFoundation.CFStringCreateWithCString.argtypes = [
|
||||
CFAllocatorRef,
|
||||
c_char_p,
|
||||
CFStringEncoding,
|
||||
]
|
||||
CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
|
||||
|
||||
CoreFoundation.CFStringGetCStringPtr.argtypes = [CFStringRef, CFStringEncoding]
|
||||
CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
|
||||
|
||||
CoreFoundation.CFStringGetCString.argtypes = [
|
||||
CFStringRef,
|
||||
c_char_p,
|
||||
CFIndex,
|
||||
CFStringEncoding,
|
||||
]
|
||||
CoreFoundation.CFStringGetCString.restype = c_bool
|
||||
|
||||
CoreFoundation.CFDataCreate.argtypes = [CFAllocatorRef, c_char_p, CFIndex]
|
||||
CoreFoundation.CFDataCreate.restype = CFDataRef
|
||||
|
||||
CoreFoundation.CFDataGetLength.argtypes = [CFDataRef]
|
||||
CoreFoundation.CFDataGetLength.restype = CFIndex
|
||||
|
||||
CoreFoundation.CFDataGetBytePtr.argtypes = [CFDataRef]
|
||||
CoreFoundation.CFDataGetBytePtr.restype = c_void_p
|
||||
|
||||
CoreFoundation.CFDictionaryCreate.argtypes = [
|
||||
CFAllocatorRef,
|
||||
POINTER(CFTypeRef),
|
||||
POINTER(CFTypeRef),
|
||||
CFIndex,
|
||||
CFDictionaryKeyCallBacks,
|
||||
CFDictionaryValueCallBacks,
|
||||
]
|
||||
CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
|
||||
|
||||
CoreFoundation.CFDictionaryGetValue.argtypes = [CFDictionaryRef, CFTypeRef]
|
||||
CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
|
||||
|
||||
CoreFoundation.CFArrayCreate.argtypes = [
|
||||
CFAllocatorRef,
|
||||
POINTER(CFTypeRef),
|
||||
CFIndex,
|
||||
CFArrayCallBacks,
|
||||
]
|
||||
CoreFoundation.CFArrayCreate.restype = CFArrayRef
|
||||
|
||||
CoreFoundation.CFArrayCreateMutable.argtypes = [
|
||||
CFAllocatorRef,
|
||||
CFIndex,
|
||||
CFArrayCallBacks,
|
||||
]
|
||||
CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
|
||||
|
||||
CoreFoundation.CFArrayAppendValue.argtypes = [CFMutableArrayRef, c_void_p]
|
||||
CoreFoundation.CFArrayAppendValue.restype = None
|
||||
|
||||
CoreFoundation.CFArrayGetCount.argtypes = [CFArrayRef]
|
||||
CoreFoundation.CFArrayGetCount.restype = CFIndex
|
||||
|
||||
CoreFoundation.CFArrayGetValueAtIndex.argtypes = [CFArrayRef, CFIndex]
|
||||
CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
|
||||
|
||||
CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
|
||||
CoreFoundation, "kCFAllocatorDefault"
|
||||
)
|
||||
CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(
|
||||
CoreFoundation, "kCFTypeArrayCallBacks"
|
||||
)
|
||||
CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
|
||||
CoreFoundation, "kCFTypeDictionaryKeyCallBacks"
|
||||
)
|
||||
CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
|
||||
CoreFoundation, "kCFTypeDictionaryValueCallBacks"
|
||||
)
|
||||
|
||||
CoreFoundation.CFTypeRef = CFTypeRef
|
||||
CoreFoundation.CFArrayRef = CFArrayRef
|
||||
CoreFoundation.CFStringRef = CFStringRef
|
||||
CoreFoundation.CFDictionaryRef = CFDictionaryRef
|
||||
|
||||
except (AttributeError):
|
||||
raise ImportError("Error initializing ctypes")
|
||||
|
||||
|
||||
class CFConst(object):
|
||||
"""
|
||||
A class object that acts as essentially a namespace for CoreFoundation
|
||||
constants.
|
||||
"""
|
||||
|
||||
kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
|
||||
|
||||
|
||||
class SecurityConst(object):
|
||||
"""
|
||||
A class object that acts as essentially a namespace for Security constants.
|
||||
"""
|
||||
|
||||
kSSLSessionOptionBreakOnServerAuth = 0
|
||||
|
||||
kSSLProtocol2 = 1
|
||||
kSSLProtocol3 = 2
|
||||
kTLSProtocol1 = 4
|
||||
kTLSProtocol11 = 7
|
||||
kTLSProtocol12 = 8
|
||||
# SecureTransport does not support TLS 1.3 even if there's a constant for it
|
||||
kTLSProtocol13 = 10
|
||||
kTLSProtocolMaxSupported = 999
|
||||
|
||||
kSSLClientSide = 1
|
||||
kSSLStreamType = 0
|
||||
|
||||
kSecFormatPEMSequence = 10
|
||||
|
||||
kSecTrustResultInvalid = 0
|
||||
kSecTrustResultProceed = 1
|
||||
# This gap is present on purpose: this was kSecTrustResultConfirm, which
|
||||
# is deprecated.
|
||||
kSecTrustResultDeny = 3
|
||||
kSecTrustResultUnspecified = 4
|
||||
kSecTrustResultRecoverableTrustFailure = 5
|
||||
kSecTrustResultFatalTrustFailure = 6
|
||||
kSecTrustResultOtherError = 7
|
||||
|
||||
errSSLProtocol = -9800
|
||||
errSSLWouldBlock = -9803
|
||||
errSSLClosedGraceful = -9805
|
||||
errSSLClosedNoNotify = -9816
|
||||
errSSLClosedAbort = -9806
|
||||
|
||||
errSSLXCertChainInvalid = -9807
|
||||
errSSLCrypto = -9809
|
||||
errSSLInternal = -9810
|
||||
errSSLCertExpired = -9814
|
||||
errSSLCertNotYetValid = -9815
|
||||
errSSLUnknownRootCert = -9812
|
||||
errSSLNoRootCert = -9813
|
||||
errSSLHostNameMismatch = -9843
|
||||
errSSLPeerHandshakeFail = -9824
|
||||
errSSLPeerUserCancelled = -9839
|
||||
errSSLWeakPeerEphemeralDHKey = -9850
|
||||
errSSLServerAuthCompleted = -9841
|
||||
errSSLRecordOverflow = -9847
|
||||
|
||||
errSecVerifyFailed = -67808
|
||||
errSecNoTrustSettings = -25263
|
||||
errSecItemNotFound = -25300
|
||||
errSecInvalidTrustSettings = -25262
|
||||
|
||||
# Cipher suites. We only pick the ones our default cipher string allows.
|
||||
# Source: https://developer.apple.com/documentation/security/1550981-ssl_cipher_suite_values
|
||||
TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
|
||||
TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
|
||||
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
|
||||
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
|
||||
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA9
|
||||
TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA8
|
||||
TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
|
||||
TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
|
||||
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
|
||||
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
|
||||
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
|
||||
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
|
||||
TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
|
||||
TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
|
||||
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
|
||||
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
|
||||
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
|
||||
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
|
||||
TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
|
||||
TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
|
||||
TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
|
||||
TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
|
||||
TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
|
||||
TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
|
||||
TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
|
||||
TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
|
||||
TLS_AES_128_GCM_SHA256 = 0x1301
|
||||
TLS_AES_256_GCM_SHA384 = 0x1302
|
||||
TLS_AES_128_CCM_8_SHA256 = 0x1305
|
||||
TLS_AES_128_CCM_SHA256 = 0x1304
|
|
@ -0,0 +1,328 @@
|
|||
"""
|
||||
Low-level helpers for the SecureTransport bindings.
|
||||
|
||||
These are Python functions that are not directly related to the high-level APIs
|
||||
but are necessary to get them to work. They include a whole bunch of low-level
|
||||
CoreFoundation messing about and memory management. The concerns in this module
|
||||
are almost entirely about trying to avoid memory leaks and providing
|
||||
appropriate and useful assistance to the higher-level code.
|
||||
"""
|
||||
import base64
|
||||
import ctypes
|
||||
import itertools
|
||||
import re
|
||||
import os
|
||||
import ssl
|
||||
import tempfile
|
||||
|
||||
from .bindings import Security, CoreFoundation, CFConst
|
||||
|
||||
|
||||
# This regular expression is used to grab PEM data out of a PEM bundle.
|
||||
_PEM_CERTS_RE = re.compile(
|
||||
b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
|
||||
)
|
||||
|
||||
|
||||
def _cf_data_from_bytes(bytestring):
|
||||
"""
|
||||
Given a bytestring, create a CFData object from it. This CFData object must
|
||||
be CFReleased by the caller.
|
||||
"""
|
||||
return CoreFoundation.CFDataCreate(
|
||||
CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
|
||||
)
|
||||
|
||||
|
||||
def _cf_dictionary_from_tuples(tuples):
|
||||
"""
|
||||
Given a list of Python tuples, create an associated CFDictionary.
|
||||
"""
|
||||
dictionary_size = len(tuples)
|
||||
|
||||
# We need to get the dictionary keys and values out in the same order.
|
||||
keys = (t[0] for t in tuples)
|
||||
values = (t[1] for t in tuples)
|
||||
cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
|
||||
cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
|
||||
|
||||
return CoreFoundation.CFDictionaryCreate(
|
||||
CoreFoundation.kCFAllocatorDefault,
|
||||
cf_keys,
|
||||
cf_values,
|
||||
dictionary_size,
|
||||
CoreFoundation.kCFTypeDictionaryKeyCallBacks,
|
||||
CoreFoundation.kCFTypeDictionaryValueCallBacks,
|
||||
)
|
||||
|
||||
|
||||
def _cf_string_to_unicode(value):
|
||||
"""
|
||||
Creates a Unicode string from a CFString object. Used entirely for error
|
||||
reporting.
|
||||
|
||||
Yes, it annoys me quite a lot that this function is this complex.
|
||||
"""
|
||||
value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
|
||||
|
||||
string = CoreFoundation.CFStringGetCStringPtr(
|
||||
value_as_void_p, CFConst.kCFStringEncodingUTF8
|
||||
)
|
||||
if string is None:
|
||||
buffer = ctypes.create_string_buffer(1024)
|
||||
result = CoreFoundation.CFStringGetCString(
|
||||
value_as_void_p, buffer, 1024, CFConst.kCFStringEncodingUTF8
|
||||
)
|
||||
if not result:
|
||||
raise OSError("Error copying C string from CFStringRef")
|
||||
string = buffer.value
|
||||
if string is not None:
|
||||
string = string.decode("utf-8")
|
||||
return string
|
||||
|
||||
|
||||
def _assert_no_error(error, exception_class=None):
|
||||
"""
|
||||
Checks the return code and throws an exception if there is an error to
|
||||
report
|
||||
"""
|
||||
if error == 0:
|
||||
return
|
||||
|
||||
cf_error_string = Security.SecCopyErrorMessageString(error, None)
|
||||
output = _cf_string_to_unicode(cf_error_string)
|
||||
CoreFoundation.CFRelease(cf_error_string)
|
||||
|
||||
if output is None or output == u"":
|
||||
output = u"OSStatus %s" % error
|
||||
|
||||
if exception_class is None:
|
||||
exception_class = ssl.SSLError
|
||||
|
||||
raise exception_class(output)
|
||||
|
||||
|
||||
def _cert_array_from_pem(pem_bundle):
|
||||
"""
|
||||
Given a bundle of certs in PEM format, turns them into a CFArray of certs
|
||||
that can be used to validate a cert chain.
|
||||
"""
|
||||
# Normalize the PEM bundle's line endings.
|
||||
pem_bundle = pem_bundle.replace(b"\r\n", b"\n")
|
||||
|
||||
der_certs = [
|
||||
base64.b64decode(match.group(1)) for match in _PEM_CERTS_RE.finditer(pem_bundle)
|
||||
]
|
||||
if not der_certs:
|
||||
raise ssl.SSLError("No root certificates specified")
|
||||
|
||||
cert_array = CoreFoundation.CFArrayCreateMutable(
|
||||
CoreFoundation.kCFAllocatorDefault,
|
||||
0,
|
||||
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
|
||||
)
|
||||
if not cert_array:
|
||||
raise ssl.SSLError("Unable to allocate memory!")
|
||||
|
||||
try:
|
||||
for der_bytes in der_certs:
|
||||
certdata = _cf_data_from_bytes(der_bytes)
|
||||
if not certdata:
|
||||
raise ssl.SSLError("Unable to allocate memory!")
|
||||
cert = Security.SecCertificateCreateWithData(
|
||||
CoreFoundation.kCFAllocatorDefault, certdata
|
||||
)
|
||||
CoreFoundation.CFRelease(certdata)
|
||||
if not cert:
|
||||
raise ssl.SSLError("Unable to build cert object!")
|
||||
|
||||
CoreFoundation.CFArrayAppendValue(cert_array, cert)
|
||||
CoreFoundation.CFRelease(cert)
|
||||
except Exception:
|
||||
# We need to free the array before the exception bubbles further.
|
||||
# We only want to do that if an error occurs: otherwise, the caller
|
||||
# should free.
|
||||
CoreFoundation.CFRelease(cert_array)
|
||||
|
||||
return cert_array
|
||||
|
||||
|
||||
def _is_cert(item):
|
||||
"""
|
||||
Returns True if a given CFTypeRef is a certificate.
|
||||
"""
|
||||
expected = Security.SecCertificateGetTypeID()
|
||||
return CoreFoundation.CFGetTypeID(item) == expected
|
||||
|
||||
|
||||
def _is_identity(item):
|
||||
"""
|
||||
Returns True if a given CFTypeRef is an identity.
|
||||
"""
|
||||
expected = Security.SecIdentityGetTypeID()
|
||||
return CoreFoundation.CFGetTypeID(item) == expected
|
||||
|
||||
|
||||
def _temporary_keychain():
|
||||
"""
|
||||
This function creates a temporary Mac keychain that we can use to work with
|
||||
credentials. This keychain uses a one-time password and a temporary file to
|
||||
store the data. We expect to have one keychain per socket. The returned
|
||||
SecKeychainRef must be freed by the caller, including calling
|
||||
SecKeychainDelete.
|
||||
|
||||
Returns a tuple of the SecKeychainRef and the path to the temporary
|
||||
directory that contains it.
|
||||
"""
|
||||
# Unfortunately, SecKeychainCreate requires a path to a keychain. This
|
||||
# means we cannot use mkstemp to use a generic temporary file. Instead,
|
||||
# we're going to create a temporary directory and a filename to use there.
|
||||
# This filename will be 8 random bytes expanded into base64. We also need
|
||||
# some random bytes to password-protect the keychain we're creating, so we
|
||||
# ask for 40 random bytes.
|
||||
random_bytes = os.urandom(40)
|
||||
filename = base64.b16encode(random_bytes[:8]).decode("utf-8")
|
||||
password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8
|
||||
tempdirectory = tempfile.mkdtemp()
|
||||
|
||||
keychain_path = os.path.join(tempdirectory, filename).encode("utf-8")
|
||||
|
||||
# We now want to create the keychain itself.
|
||||
keychain = Security.SecKeychainRef()
|
||||
status = Security.SecKeychainCreate(
|
||||
keychain_path, len(password), password, False, None, ctypes.byref(keychain)
|
||||
)
|
||||
_assert_no_error(status)
|
||||
|
||||
# Having created the keychain, we want to pass it off to the caller.
|
||||
return keychain, tempdirectory
|
||||
|
||||
|
||||
def _load_items_from_file(keychain, path):
|
||||
"""
|
||||
Given a single file, loads all the trust objects from it into arrays and
|
||||
the keychain.
|
||||
Returns a tuple of lists: the first list is a list of identities, the
|
||||
second a list of certs.
|
||||
"""
|
||||
certificates = []
|
||||
identities = []
|
||||
result_array = None
|
||||
|
||||
with open(path, "rb") as f:
|
||||
raw_filedata = f.read()
|
||||
|
||||
try:
|
||||
filedata = CoreFoundation.CFDataCreate(
|
||||
CoreFoundation.kCFAllocatorDefault, raw_filedata, len(raw_filedata)
|
||||
)
|
||||
result_array = CoreFoundation.CFArrayRef()
|
||||
result = Security.SecItemImport(
|
||||
filedata, # cert data
|
||||
None, # Filename, leaving it out for now
|
||||
None, # What the type of the file is, we don't care
|
||||
None, # what's in the file, we don't care
|
||||
0, # import flags
|
||||
None, # key params, can include passphrase in the future
|
||||
keychain, # The keychain to insert into
|
||||
ctypes.byref(result_array), # Results
|
||||
)
|
||||
_assert_no_error(result)
|
||||
|
||||
# A CFArray is not very useful to us as an intermediary
|
||||
# representation, so we are going to extract the objects we want
|
||||
# and then free the array. We don't need to keep hold of keys: the
|
||||
# keychain already has them!
|
||||
result_count = CoreFoundation.CFArrayGetCount(result_array)
|
||||
for index in range(result_count):
|
||||
item = CoreFoundation.CFArrayGetValueAtIndex(result_array, index)
|
||||
item = ctypes.cast(item, CoreFoundation.CFTypeRef)
|
||||
|
||||
if _is_cert(item):
|
||||
CoreFoundation.CFRetain(item)
|
||||
certificates.append(item)
|
||||
elif _is_identity(item):
|
||||
CoreFoundation.CFRetain(item)
|
||||
identities.append(item)
|
||||
finally:
|
||||
if result_array:
|
||||
CoreFoundation.CFRelease(result_array)
|
||||
|
||||
CoreFoundation.CFRelease(filedata)
|
||||
|
||||
return (identities, certificates)
|
||||
|
||||
|
||||
def _load_client_cert_chain(keychain, *paths):
|
||||
"""
|
||||
Load certificates and maybe keys from a number of files. Has the end goal
|
||||
of returning a CFArray containing one SecIdentityRef, and then zero or more
|
||||
SecCertificateRef objects, suitable for use as a client certificate trust
|
||||
chain.
|
||||
"""
|
||||
# Ok, the strategy.
|
||||
#
|
||||
# This relies on knowing that macOS will not give you a SecIdentityRef
|
||||
# unless you have imported a key into a keychain. This is a somewhat
|
||||
# artificial limitation of macOS (for example, it doesn't necessarily
|
||||
# affect iOS), but there is nothing inside Security.framework that lets you
|
||||
# get a SecIdentityRef without having a key in a keychain.
|
||||
#
|
||||
# So the policy here is we take all the files and iterate them in order.
|
||||
# Each one will use SecItemImport to have one or more objects loaded from
|
||||
# it. We will also point at a keychain that macOS can use to work with the
|
||||
# private key.
|
||||
#
|
||||
# Once we have all the objects, we'll check what we actually have. If we
|
||||
# already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
|
||||
# we'll take the first certificate (which we assume to be our leaf) and
|
||||
# ask the keychain to give us a SecIdentityRef with that cert's associated
|
||||
# key.
|
||||
#
|
||||
# We'll then return a CFArray containing the trust chain: one
|
||||
# SecIdentityRef and then zero-or-more SecCertificateRef objects. The
|
||||
# responsibility for freeing this CFArray will be with the caller. This
|
||||
# CFArray must remain alive for the entire connection, so in practice it
|
||||
# will be stored with a single SSLSocket, along with the reference to the
|
||||
# keychain.
|
||||
certificates = []
|
||||
identities = []
|
||||
|
||||
# Filter out bad paths.
|
||||
paths = (path for path in paths if path)
|
||||
|
||||
try:
|
||||
for file_path in paths:
|
||||
new_identities, new_certs = _load_items_from_file(keychain, file_path)
|
||||
identities.extend(new_identities)
|
||||
certificates.extend(new_certs)
|
||||
|
||||
# Ok, we have everything. The question is: do we have an identity? If
|
||||
# not, we want to grab one from the first cert we have.
|
||||
if not identities:
|
||||
new_identity = Security.SecIdentityRef()
|
||||
status = Security.SecIdentityCreateWithCertificate(
|
||||
keychain, certificates[0], ctypes.byref(new_identity)
|
||||
)
|
||||
_assert_no_error(status)
|
||||
identities.append(new_identity)
|
||||
|
||||
# We now want to release the original certificate, as we no longer
|
||||
# need it.
|
||||
CoreFoundation.CFRelease(certificates.pop(0))
|
||||
|
||||
# We now need to build a new CFArray that holds the trust chain.
|
||||
trust_chain = CoreFoundation.CFArrayCreateMutable(
|
||||
CoreFoundation.kCFAllocatorDefault,
|
||||
0,
|
||||
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
|
||||
)
|
||||
for item in itertools.chain(identities, certificates):
|
||||
# ArrayAppendValue does a CFRetain on the item. That's fine,
|
||||
# because the finally block will release our other refs to them.
|
||||
CoreFoundation.CFArrayAppendValue(trust_chain, item)
|
||||
|
||||
return trust_chain
|
||||
finally:
|
||||
for obj in itertools.chain(identities, certificates):
|
||||
CoreFoundation.CFRelease(obj)
|
314
venv/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py
Normal file
314
venv/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py
Normal file
|
@ -0,0 +1,314 @@
|
|||
"""
|
||||
This module provides a pool manager that uses Google App Engine's
|
||||
`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
|
||||
|
||||
Example usage::
|
||||
|
||||
from pip._vendor.urllib3 import PoolManager
|
||||
from pip._vendor.urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
|
||||
|
||||
if is_appengine_sandbox():
|
||||
# AppEngineManager uses AppEngine's URLFetch API behind the scenes
|
||||
http = AppEngineManager()
|
||||
else:
|
||||
# PoolManager uses a socket-level API behind the scenes
|
||||
http = PoolManager()
|
||||
|
||||
r = http.request('GET', 'https://google.com/')
|
||||
|
||||
There are `limitations <https://cloud.google.com/appengine/docs/python/\
|
||||
urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
|
||||
the best choice for your application. There are three options for using
|
||||
urllib3 on Google App Engine:
|
||||
|
||||
1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
|
||||
cost-effective in many circumstances as long as your usage is within the
|
||||
limitations.
|
||||
2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
|
||||
Sockets also have `limitations and restrictions
|
||||
<https://cloud.google.com/appengine/docs/python/sockets/\
|
||||
#limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
|
||||
To use sockets, be sure to specify the following in your ``app.yaml``::
|
||||
|
||||
env_variables:
|
||||
GAE_USE_SOCKETS_HTTPLIB : 'true'
|
||||
|
||||
3. If you are using `App Engine Flexible
|
||||
<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
|
||||
:class:`PoolManager` without any configuration or special environment variables.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
import io
|
||||
import logging
|
||||
import warnings
|
||||
from ..packages.six.moves.urllib.parse import urljoin
|
||||
|
||||
from ..exceptions import (
|
||||
HTTPError,
|
||||
HTTPWarning,
|
||||
MaxRetryError,
|
||||
ProtocolError,
|
||||
TimeoutError,
|
||||
SSLError,
|
||||
)
|
||||
|
||||
from ..request import RequestMethods
|
||||
from ..response import HTTPResponse
|
||||
from ..util.timeout import Timeout
|
||||
from ..util.retry import Retry
|
||||
from . import _appengine_environ
|
||||
|
||||
try:
|
||||
from google.appengine.api import urlfetch
|
||||
except ImportError:
|
||||
urlfetch = None
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AppEnginePlatformWarning(HTTPWarning):
|
||||
pass
|
||||
|
||||
|
||||
class AppEnginePlatformError(HTTPError):
|
||||
pass
|
||||
|
||||
|
||||
class AppEngineManager(RequestMethods):
|
||||
"""
|
||||
Connection manager for Google App Engine sandbox applications.
|
||||
|
||||
This manager uses the URLFetch service directly instead of using the
|
||||
emulated httplib, and is subject to URLFetch limitations as described in
|
||||
the App Engine documentation `here
|
||||
<https://cloud.google.com/appengine/docs/python/urlfetch>`_.
|
||||
|
||||
Notably it will raise an :class:`AppEnginePlatformError` if:
|
||||
* URLFetch is not available.
|
||||
* If you attempt to use this on App Engine Flexible, as full socket
|
||||
support is available.
|
||||
* If a request size is more than 10 megabytes.
|
||||
* If a response size is more than 32 megabtyes.
|
||||
* If you use an unsupported request method such as OPTIONS.
|
||||
|
||||
Beyond those cases, it will raise normal urllib3 errors.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
headers=None,
|
||||
retries=None,
|
||||
validate_certificate=True,
|
||||
urlfetch_retries=True,
|
||||
):
|
||||
if not urlfetch:
|
||||
raise AppEnginePlatformError(
|
||||
"URLFetch is not available in this environment."
|
||||
)
|
||||
|
||||
warnings.warn(
|
||||
"urllib3 is using URLFetch on Google App Engine sandbox instead "
|
||||
"of sockets. To use sockets directly instead of URLFetch see "
|
||||
"https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
|
||||
AppEnginePlatformWarning,
|
||||
)
|
||||
|
||||
RequestMethods.__init__(self, headers)
|
||||
self.validate_certificate = validate_certificate
|
||||
self.urlfetch_retries = urlfetch_retries
|
||||
|
||||
self.retries = retries or Retry.DEFAULT
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
# Return False to re-raise any potential exceptions
|
||||
return False
|
||||
|
||||
def urlopen(
|
||||
self,
|
||||
method,
|
||||
url,
|
||||
body=None,
|
||||
headers=None,
|
||||
retries=None,
|
||||
redirect=True,
|
||||
timeout=Timeout.DEFAULT_TIMEOUT,
|
||||
**response_kw
|
||||
):
|
||||
|
||||
retries = self._get_retries(retries, redirect)
|
||||
|
||||
try:
|
||||
follow_redirects = redirect and retries.redirect != 0 and retries.total
|
||||
response = urlfetch.fetch(
|
||||
url,
|
||||
payload=body,
|
||||
method=method,
|
||||
headers=headers or {},
|
||||
allow_truncated=False,
|
||||
follow_redirects=self.urlfetch_retries and follow_redirects,
|
||||
deadline=self._get_absolute_timeout(timeout),
|
||||
validate_certificate=self.validate_certificate,
|
||||
)
|
||||
except urlfetch.DeadlineExceededError as e:
|
||||
raise TimeoutError(self, e)
|
||||
|
||||
except urlfetch.InvalidURLError as e:
|
||||
if "too large" in str(e):
|
||||
raise AppEnginePlatformError(
|
||||
"URLFetch request too large, URLFetch only "
|
||||
"supports requests up to 10mb in size.",
|
||||
e,
|
||||
)
|
||||
raise ProtocolError(e)
|
||||
|
||||
except urlfetch.DownloadError as e:
|
||||
if "Too many redirects" in str(e):
|
||||
raise MaxRetryError(self, url, reason=e)
|
||||
raise ProtocolError(e)
|
||||
|
||||
except urlfetch.ResponseTooLargeError as e:
|
||||
raise AppEnginePlatformError(
|
||||
"URLFetch response too large, URLFetch only supports"
|
||||
"responses up to 32mb in size.",
|
||||
e,
|
||||
)
|
||||
|
||||
except urlfetch.SSLCertificateError as e:
|
||||
raise SSLError(e)
|
||||
|
||||
except urlfetch.InvalidMethodError as e:
|
||||
raise AppEnginePlatformError(
|
||||
"URLFetch does not support method: %s" % method, e
|
||||
)
|
||||
|
||||
http_response = self._urlfetch_response_to_http_response(
|
||||
response, retries=retries, **response_kw
|
||||
)
|
||||
|
||||
# Handle redirect?
|
||||
redirect_location = redirect and http_response.get_redirect_location()
|
||||
if redirect_location:
|
||||
# Check for redirect response
|
||||
if self.urlfetch_retries and retries.raise_on_redirect:
|
||||
raise MaxRetryError(self, url, "too many redirects")
|
||||
else:
|
||||
if http_response.status == 303:
|
||||
method = "GET"
|
||||
|
||||
try:
|
||||
retries = retries.increment(
|
||||
method, url, response=http_response, _pool=self
|
||||
)
|
||||
except MaxRetryError:
|
||||
if retries.raise_on_redirect:
|
||||
raise MaxRetryError(self, url, "too many redirects")
|
||||
return http_response
|
||||
|
||||
retries.sleep_for_retry(http_response)
|
||||
log.debug("Redirecting %s -> %s", url, redirect_location)
|
||||
redirect_url = urljoin(url, redirect_location)
|
||||
return self.urlopen(
|
||||
method,
|
||||
redirect_url,
|
||||
body,
|
||||
headers,
|
||||
retries=retries,
|
||||
redirect=redirect,
|
||||
timeout=timeout,
|
||||
**response_kw
|
||||
)
|
||||
|
||||
# Check if we should retry the HTTP response.
|
||||
has_retry_after = bool(http_response.getheader("Retry-After"))
|
||||
if retries.is_retry(method, http_response.status, has_retry_after):
|
||||
retries = retries.increment(method, url, response=http_response, _pool=self)
|
||||
log.debug("Retry: %s", url)
|
||||
retries.sleep(http_response)
|
||||
return self.urlopen(
|
||||
method,
|
||||
url,
|
||||
body=body,
|
||||
headers=headers,
|
||||
retries=retries,
|
||||
redirect=redirect,
|
||||
timeout=timeout,
|
||||
**response_kw
|
||||
)
|
||||
|
||||
return http_response
|
||||
|
||||
def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
|
||||
|
||||
if is_prod_appengine():
|
||||
# Production GAE handles deflate encoding automatically, but does
|
||||
# not remove the encoding header.
|
||||
content_encoding = urlfetch_resp.headers.get("content-encoding")
|
||||
|
||||
if content_encoding == "deflate":
|
||||
del urlfetch_resp.headers["content-encoding"]
|
||||
|
||||
transfer_encoding = urlfetch_resp.headers.get("transfer-encoding")
|
||||
# We have a full response's content,
|
||||
# so let's make sure we don't report ourselves as chunked data.
|
||||
if transfer_encoding == "chunked":
|
||||
encodings = transfer_encoding.split(",")
|
||||
encodings.remove("chunked")
|
||||
urlfetch_resp.headers["transfer-encoding"] = ",".join(encodings)
|
||||
|
||||
original_response = HTTPResponse(
|
||||
# In order for decoding to work, we must present the content as
|
||||
# a file-like object.
|
||||
body=io.BytesIO(urlfetch_resp.content),
|
||||
msg=urlfetch_resp.header_msg,
|
||||
headers=urlfetch_resp.headers,
|
||||
status=urlfetch_resp.status_code,
|
||||
**response_kw
|
||||
)
|
||||
|
||||
return HTTPResponse(
|
||||
body=io.BytesIO(urlfetch_resp.content),
|
||||
headers=urlfetch_resp.headers,
|
||||
status=urlfetch_resp.status_code,
|
||||
original_response=original_response,
|
||||
**response_kw
|
||||
)
|
||||
|
||||
def _get_absolute_timeout(self, timeout):
|
||||
if timeout is Timeout.DEFAULT_TIMEOUT:
|
||||
return None # Defer to URLFetch's default.
|
||||
if isinstance(timeout, Timeout):
|
||||
if timeout._read is not None or timeout._connect is not None:
|
||||
warnings.warn(
|
||||
"URLFetch does not support granular timeout settings, "
|
||||
"reverting to total or default URLFetch timeout.",
|
||||
AppEnginePlatformWarning,
|
||||
)
|
||||
return timeout.total
|
||||
return timeout
|
||||
|
||||
def _get_retries(self, retries, redirect):
|
||||
if not isinstance(retries, Retry):
|
||||
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
|
||||
|
||||
if retries.connect or retries.read or retries.redirect:
|
||||
warnings.warn(
|
||||
"URLFetch only supports total retries and does not "
|
||||
"recognize connect, read, or redirect retry parameters.",
|
||||
AppEnginePlatformWarning,
|
||||
)
|
||||
|
||||
return retries
|
||||
|
||||
|
||||
# Alias methods from _appengine_environ to maintain public API interface.
|
||||
|
||||
is_appengine = _appengine_environ.is_appengine
|
||||
is_appengine_sandbox = _appengine_environ.is_appengine_sandbox
|
||||
is_local_appengine = _appengine_environ.is_local_appengine
|
||||
is_prod_appengine = _appengine_environ.is_prod_appengine
|
||||
is_prod_appengine_mvms = _appengine_environ.is_prod_appengine_mvms
|
121
venv/Lib/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py
Normal file
121
venv/Lib/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py
Normal file
|
@ -0,0 +1,121 @@
|
|||
"""
|
||||
NTLM authenticating pool, contributed by erikcederstran
|
||||
|
||||
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from logging import getLogger
|
||||
from ntlm import ntlm
|
||||
|
||||
from .. import HTTPSConnectionPool
|
||||
from ..packages.six.moves.http_client import HTTPSConnection
|
||||
|
||||
|
||||
log = getLogger(__name__)
|
||||
|
||||
|
||||
class NTLMConnectionPool(HTTPSConnectionPool):
|
||||
"""
|
||||
Implements an NTLM authentication version of an urllib3 connection pool
|
||||
"""
|
||||
|
||||
scheme = "https"
|
||||
|
||||
def __init__(self, user, pw, authurl, *args, **kwargs):
|
||||
"""
|
||||
authurl is a random URL on the server that is protected by NTLM.
|
||||
user is the Windows user, probably in the DOMAIN\\username format.
|
||||
pw is the password for the user.
|
||||
"""
|
||||
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
|
||||
self.authurl = authurl
|
||||
self.rawuser = user
|
||||
user_parts = user.split("\\", 1)
|
||||
self.domain = user_parts[0].upper()
|
||||
self.user = user_parts[1]
|
||||
self.pw = pw
|
||||
|
||||
def _new_conn(self):
|
||||
# Performs the NTLM handshake that secures the connection. The socket
|
||||
# must be kept open while requests are performed.
|
||||
self.num_connections += 1
|
||||
log.debug(
|
||||
"Starting NTLM HTTPS connection no. %d: https://%s%s",
|
||||
self.num_connections,
|
||||
self.host,
|
||||
self.authurl,
|
||||
)
|
||||
|
||||
headers = {"Connection": "Keep-Alive"}
|
||||
req_header = "Authorization"
|
||||
resp_header = "www-authenticate"
|
||||
|
||||
conn = HTTPSConnection(host=self.host, port=self.port)
|
||||
|
||||
# Send negotiation message
|
||||
headers[req_header] = "NTLM %s" % ntlm.create_NTLM_NEGOTIATE_MESSAGE(
|
||||
self.rawuser
|
||||
)
|
||||
log.debug("Request headers: %s", headers)
|
||||
conn.request("GET", self.authurl, None, headers)
|
||||
res = conn.getresponse()
|
||||
reshdr = dict(res.getheaders())
|
||||
log.debug("Response status: %s %s", res.status, res.reason)
|
||||
log.debug("Response headers: %s", reshdr)
|
||||
log.debug("Response data: %s [...]", res.read(100))
|
||||
|
||||
# Remove the reference to the socket, so that it can not be closed by
|
||||
# the response object (we want to keep the socket open)
|
||||
res.fp = None
|
||||
|
||||
# Server should respond with a challenge message
|
||||
auth_header_values = reshdr[resp_header].split(", ")
|
||||
auth_header_value = None
|
||||
for s in auth_header_values:
|
||||
if s[:5] == "NTLM ":
|
||||
auth_header_value = s[5:]
|
||||
if auth_header_value is None:
|
||||
raise Exception(
|
||||
"Unexpected %s response header: %s" % (resp_header, reshdr[resp_header])
|
||||
)
|
||||
|
||||
# Send authentication message
|
||||
ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE(
|
||||
auth_header_value
|
||||
)
|
||||
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(
|
||||
ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags
|
||||
)
|
||||
headers[req_header] = "NTLM %s" % auth_msg
|
||||
log.debug("Request headers: %s", headers)
|
||||
conn.request("GET", self.authurl, None, headers)
|
||||
res = conn.getresponse()
|
||||
log.debug("Response status: %s %s", res.status, res.reason)
|
||||
log.debug("Response headers: %s", dict(res.getheaders()))
|
||||
log.debug("Response data: %s [...]", res.read()[:100])
|
||||
if res.status != 200:
|
||||
if res.status == 401:
|
||||
raise Exception("Server rejected request: wrong username or password")
|
||||
raise Exception("Wrong server response: %s %s" % (res.status, res.reason))
|
||||
|
||||
res.fp = None
|
||||
log.debug("Connection established")
|
||||
return conn
|
||||
|
||||
def urlopen(
|
||||
self,
|
||||
method,
|
||||
url,
|
||||
body=None,
|
||||
headers=None,
|
||||
retries=3,
|
||||
redirect=True,
|
||||
assert_same_host=True,
|
||||
):
|
||||
if headers is None:
|
||||
headers = {}
|
||||
headers["Connection"] = "Keep-Alive"
|
||||
return super(NTLMConnectionPool, self).urlopen(
|
||||
method, url, body, headers, retries, redirect, assert_same_host
|
||||
)
|
501
venv/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py
Normal file
501
venv/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py
Normal file
|
@ -0,0 +1,501 @@
|
|||
"""
|
||||
SSL with SNI_-support for Python 2. Follow these instructions if you would
|
||||
like to verify SSL certificates in Python 2. Note, the default libraries do
|
||||
*not* do certificate checking; you need to do additional work to validate
|
||||
certificates yourself.
|
||||
|
||||
This needs the following packages installed:
|
||||
|
||||
* pyOpenSSL (tested with 16.0.0)
|
||||
* cryptography (minimum 1.3.4, from pyopenssl)
|
||||
* idna (minimum 2.0, from cryptography)
|
||||
|
||||
However, pyopenssl depends on cryptography, which depends on idna, so while we
|
||||
use all three directly here we end up having relatively few packages required.
|
||||
|
||||
You can install them with the following command:
|
||||
|
||||
pip install pyopenssl cryptography idna
|
||||
|
||||
To activate certificate checking, call
|
||||
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
|
||||
before you begin making HTTP requests. This can be done in a ``sitecustomize``
|
||||
module, or at any other time before your application begins using ``urllib3``,
|
||||
like this::
|
||||
|
||||
try:
|
||||
import urllib3.contrib.pyopenssl
|
||||
urllib3.contrib.pyopenssl.inject_into_urllib3()
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
|
||||
when the required modules are installed.
|
||||
|
||||
Activating this module also has the positive side effect of disabling SSL/TLS
|
||||
compression in Python 2 (see `CRIME attack`_).
|
||||
|
||||
If you want to configure the default list of supported cipher suites, you can
|
||||
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
|
||||
|
||||
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
|
||||
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import OpenSSL.SSL
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.backends.openssl import backend as openssl_backend
|
||||
from cryptography.hazmat.backends.openssl.x509 import _Certificate
|
||||
|
||||
try:
|
||||
from cryptography.x509 import UnsupportedExtension
|
||||
except ImportError:
|
||||
# UnsupportedExtension is gone in cryptography >= 2.1.0
|
||||
class UnsupportedExtension(Exception):
|
||||
pass
|
||||
|
||||
|
||||
from socket import timeout, error as SocketError
|
||||
from io import BytesIO
|
||||
|
||||
try: # Platform-specific: Python 2
|
||||
from socket import _fileobject
|
||||
except ImportError: # Platform-specific: Python 3
|
||||
_fileobject = None
|
||||
from ..packages.backports.makefile import backport_makefile
|
||||
|
||||
import logging
|
||||
import ssl
|
||||
from ..packages import six
|
||||
import sys
|
||||
|
||||
from .. import util
|
||||
|
||||
|
||||
__all__ = ["inject_into_urllib3", "extract_from_urllib3"]
|
||||
|
||||
# SNI always works.
|
||||
HAS_SNI = True
|
||||
|
||||
# Map from urllib3 to PyOpenSSL compatible parameter-values.
|
||||
_openssl_versions = {
|
||||
util.PROTOCOL_TLS: OpenSSL.SSL.SSLv23_METHOD,
|
||||
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
|
||||
}
|
||||
|
||||
if hasattr(ssl, "PROTOCOL_SSLv3") and hasattr(OpenSSL.SSL, "SSLv3_METHOD"):
|
||||
_openssl_versions[ssl.PROTOCOL_SSLv3] = OpenSSL.SSL.SSLv3_METHOD
|
||||
|
||||
if hasattr(ssl, "PROTOCOL_TLSv1_1") and hasattr(OpenSSL.SSL, "TLSv1_1_METHOD"):
|
||||
_openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
|
||||
|
||||
if hasattr(ssl, "PROTOCOL_TLSv1_2") and hasattr(OpenSSL.SSL, "TLSv1_2_METHOD"):
|
||||
_openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
|
||||
|
||||
|
||||
_stdlib_to_openssl_verify = {
|
||||
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
|
||||
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
|
||||
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
|
||||
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
|
||||
}
|
||||
_openssl_to_stdlib_verify = dict((v, k) for k, v in _stdlib_to_openssl_verify.items())
|
||||
|
||||
# OpenSSL will only write 16K at a time
|
||||
SSL_WRITE_BLOCKSIZE = 16384
|
||||
|
||||
orig_util_HAS_SNI = util.HAS_SNI
|
||||
orig_util_SSLContext = util.ssl_.SSLContext
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def inject_into_urllib3():
|
||||
"Monkey-patch urllib3 with PyOpenSSL-backed SSL-support."
|
||||
|
||||
_validate_dependencies_met()
|
||||
|
||||
util.SSLContext = PyOpenSSLContext
|
||||
util.ssl_.SSLContext = PyOpenSSLContext
|
||||
util.HAS_SNI = HAS_SNI
|
||||
util.ssl_.HAS_SNI = HAS_SNI
|
||||
util.IS_PYOPENSSL = True
|
||||
util.ssl_.IS_PYOPENSSL = True
|
||||
|
||||
|
||||
def extract_from_urllib3():
|
||||
"Undo monkey-patching by :func:`inject_into_urllib3`."
|
||||
|
||||
util.SSLContext = orig_util_SSLContext
|
||||
util.ssl_.SSLContext = orig_util_SSLContext
|
||||
util.HAS_SNI = orig_util_HAS_SNI
|
||||
util.ssl_.HAS_SNI = orig_util_HAS_SNI
|
||||
util.IS_PYOPENSSL = False
|
||||
util.ssl_.IS_PYOPENSSL = False
|
||||
|
||||
|
||||
def _validate_dependencies_met():
|
||||
"""
|
||||
Verifies that PyOpenSSL's package-level dependencies have been met.
|
||||
Throws `ImportError` if they are not met.
|
||||
"""
|
||||
# Method added in `cryptography==1.1`; not available in older versions
|
||||
from cryptography.x509.extensions import Extensions
|
||||
|
||||
if getattr(Extensions, "get_extension_for_class", None) is None:
|
||||
raise ImportError(
|
||||
"'cryptography' module missing required functionality. "
|
||||
"Try upgrading to v1.3.4 or newer."
|
||||
)
|
||||
|
||||
# pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
|
||||
# attribute is only present on those versions.
|
||||
from OpenSSL.crypto import X509
|
||||
|
||||
x509 = X509()
|
||||
if getattr(x509, "_x509", None) is None:
|
||||
raise ImportError(
|
||||
"'pyOpenSSL' module missing required functionality. "
|
||||
"Try upgrading to v0.14 or newer."
|
||||
)
|
||||
|
||||
|
||||
def _dnsname_to_stdlib(name):
|
||||
"""
|
||||
Converts a dNSName SubjectAlternativeName field to the form used by the
|
||||
standard library on the given Python version.
|
||||
|
||||
Cryptography produces a dNSName as a unicode string that was idna-decoded
|
||||
from ASCII bytes. We need to idna-encode that string to get it back, and
|
||||
then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
|
||||
uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
|
||||
|
||||
If the name cannot be idna-encoded then we return None signalling that
|
||||
the name given should be skipped.
|
||||
"""
|
||||
|
||||
def idna_encode(name):
|
||||
"""
|
||||
Borrowed wholesale from the Python Cryptography Project. It turns out
|
||||
that we can't just safely call `idna.encode`: it can explode for
|
||||
wildcard names. This avoids that problem.
|
||||
"""
|
||||
from pip._vendor import idna
|
||||
|
||||
try:
|
||||
for prefix in [u"*.", u"."]:
|
||||
if name.startswith(prefix):
|
||||
name = name[len(prefix) :]
|
||||
return prefix.encode("ascii") + idna.encode(name)
|
||||
return idna.encode(name)
|
||||
except idna.core.IDNAError:
|
||||
return None
|
||||
|
||||
# Don't send IPv6 addresses through the IDNA encoder.
|
||||
if ":" in name:
|
||||
return name
|
||||
|
||||
name = idna_encode(name)
|
||||
if name is None:
|
||||
return None
|
||||
elif sys.version_info >= (3, 0):
|
||||
name = name.decode("utf-8")
|
||||
return name
|
||||
|
||||
|
||||
def get_subj_alt_name(peer_cert):
|
||||
"""
|
||||
Given an PyOpenSSL certificate, provides all the subject alternative names.
|
||||
"""
|
||||
# Pass the cert to cryptography, which has much better APIs for this.
|
||||
if hasattr(peer_cert, "to_cryptography"):
|
||||
cert = peer_cert.to_cryptography()
|
||||
else:
|
||||
# This is technically using private APIs, but should work across all
|
||||
# relevant versions before PyOpenSSL got a proper API for this.
|
||||
cert = _Certificate(openssl_backend, peer_cert._x509)
|
||||
|
||||
# We want to find the SAN extension. Ask Cryptography to locate it (it's
|
||||
# faster than looping in Python)
|
||||
try:
|
||||
ext = cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value
|
||||
except x509.ExtensionNotFound:
|
||||
# No such extension, return the empty list.
|
||||
return []
|
||||
except (
|
||||
x509.DuplicateExtension,
|
||||
UnsupportedExtension,
|
||||
x509.UnsupportedGeneralNameType,
|
||||
UnicodeError,
|
||||
) as e:
|
||||
# A problem has been found with the quality of the certificate. Assume
|
||||
# no SAN field is present.
|
||||
log.warning(
|
||||
"A problem was encountered with the certificate that prevented "
|
||||
"urllib3 from finding the SubjectAlternativeName field. This can "
|
||||
"affect certificate validation. The error was %s",
|
||||
e,
|
||||
)
|
||||
return []
|
||||
|
||||
# We want to return dNSName and iPAddress fields. We need to cast the IPs
|
||||
# back to strings because the match_hostname function wants them as
|
||||
# strings.
|
||||
# Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
|
||||
# decoded. This is pretty frustrating, but that's what the standard library
|
||||
# does with certificates, and so we need to attempt to do the same.
|
||||
# We also want to skip over names which cannot be idna encoded.
|
||||
names = [
|
||||
("DNS", name)
|
||||
for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName))
|
||||
if name is not None
|
||||
]
|
||||
names.extend(
|
||||
("IP Address", str(name)) for name in ext.get_values_for_type(x509.IPAddress)
|
||||
)
|
||||
|
||||
return names
|
||||
|
||||
|
||||
class WrappedSocket(object):
|
||||
"""API-compatibility wrapper for Python OpenSSL's Connection-class.
|
||||
|
||||
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
|
||||
collector of pypy.
|
||||
"""
|
||||
|
||||
def __init__(self, connection, socket, suppress_ragged_eofs=True):
|
||||
self.connection = connection
|
||||
self.socket = socket
|
||||
self.suppress_ragged_eofs = suppress_ragged_eofs
|
||||
self._makefile_refs = 0
|
||||
self._closed = False
|
||||
|
||||
def fileno(self):
|
||||
return self.socket.fileno()
|
||||
|
||||
# Copy-pasted from Python 3.5 source code
|
||||
def _decref_socketios(self):
|
||||
if self._makefile_refs > 0:
|
||||
self._makefile_refs -= 1
|
||||
if self._closed:
|
||||
self.close()
|
||||
|
||||
def recv(self, *args, **kwargs):
|
||||
try:
|
||||
data = self.connection.recv(*args, **kwargs)
|
||||
except OpenSSL.SSL.SysCallError as e:
|
||||
if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"):
|
||||
return b""
|
||||
else:
|
||||
raise SocketError(str(e))
|
||||
except OpenSSL.SSL.ZeroReturnError:
|
||||
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
|
||||
return b""
|
||||
else:
|
||||
raise
|
||||
except OpenSSL.SSL.WantReadError:
|
||||
if not util.wait_for_read(self.socket, self.socket.gettimeout()):
|
||||
raise timeout("The read operation timed out")
|
||||
else:
|
||||
return self.recv(*args, **kwargs)
|
||||
|
||||
# TLS 1.3 post-handshake authentication
|
||||
except OpenSSL.SSL.Error as e:
|
||||
raise ssl.SSLError("read error: %r" % e)
|
||||
else:
|
||||
return data
|
||||
|
||||
def recv_into(self, *args, **kwargs):
|
||||
try:
|
||||
return self.connection.recv_into(*args, **kwargs)
|
||||
except OpenSSL.SSL.SysCallError as e:
|
||||
if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"):
|
||||
return 0
|
||||
else:
|
||||
raise SocketError(str(e))
|
||||
except OpenSSL.SSL.ZeroReturnError:
|
||||
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
|
||||
return 0
|
||||
else:
|
||||
raise
|
||||
except OpenSSL.SSL.WantReadError:
|
||||
if not util.wait_for_read(self.socket, self.socket.gettimeout()):
|
||||
raise timeout("The read operation timed out")
|
||||
else:
|
||||
return self.recv_into(*args, **kwargs)
|
||||
|
||||
# TLS 1.3 post-handshake authentication
|
||||
except OpenSSL.SSL.Error as e:
|
||||
raise ssl.SSLError("read error: %r" % e)
|
||||
|
||||
def settimeout(self, timeout):
|
||||
return self.socket.settimeout(timeout)
|
||||
|
||||
def _send_until_done(self, data):
|
||||
while True:
|
||||
try:
|
||||
return self.connection.send(data)
|
||||
except OpenSSL.SSL.WantWriteError:
|
||||
if not util.wait_for_write(self.socket, self.socket.gettimeout()):
|
||||
raise timeout()
|
||||
continue
|
||||
except OpenSSL.SSL.SysCallError as e:
|
||||
raise SocketError(str(e))
|
||||
|
||||
def sendall(self, data):
|
||||
total_sent = 0
|
||||
while total_sent < len(data):
|
||||
sent = self._send_until_done(
|
||||
data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE]
|
||||
)
|
||||
total_sent += sent
|
||||
|
||||
def shutdown(self):
|
||||
# FIXME rethrow compatible exceptions should we ever use this
|
||||
self.connection.shutdown()
|
||||
|
||||
def close(self):
|
||||
if self._makefile_refs < 1:
|
||||
try:
|
||||
self._closed = True
|
||||
return self.connection.close()
|
||||
except OpenSSL.SSL.Error:
|
||||
return
|
||||
else:
|
||||
self._makefile_refs -= 1
|
||||
|
||||
def getpeercert(self, binary_form=False):
|
||||
x509 = self.connection.get_peer_certificate()
|
||||
|
||||
if not x509:
|
||||
return x509
|
||||
|
||||
if binary_form:
|
||||
return OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, x509)
|
||||
|
||||
return {
|
||||
"subject": ((("commonName", x509.get_subject().CN),),),
|
||||
"subjectAltName": get_subj_alt_name(x509),
|
||||
}
|
||||
|
||||
def version(self):
|
||||
return self.connection.get_protocol_version_name()
|
||||
|
||||
def _reuse(self):
|
||||
self._makefile_refs += 1
|
||||
|
||||
def _drop(self):
|
||||
if self._makefile_refs < 1:
|
||||
self.close()
|
||||
else:
|
||||
self._makefile_refs -= 1
|
||||
|
||||
|
||||
if _fileobject: # Platform-specific: Python 2
|
||||
|
||||
def makefile(self, mode, bufsize=-1):
|
||||
self._makefile_refs += 1
|
||||
return _fileobject(self, mode, bufsize, close=True)
|
||||
|
||||
|
||||
else: # Platform-specific: Python 3
|
||||
makefile = backport_makefile
|
||||
|
||||
WrappedSocket.makefile = makefile
|
||||
|
||||
|
||||
class PyOpenSSLContext(object):
|
||||
"""
|
||||
I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
|
||||
for translating the interface of the standard library ``SSLContext`` object
|
||||
to calls into PyOpenSSL.
|
||||
"""
|
||||
|
||||
def __init__(self, protocol):
|
||||
self.protocol = _openssl_versions[protocol]
|
||||
self._ctx = OpenSSL.SSL.Context(self.protocol)
|
||||
self._options = 0
|
||||
self.check_hostname = False
|
||||
|
||||
@property
|
||||
def options(self):
|
||||
return self._options
|
||||
|
||||
@options.setter
|
||||
def options(self, value):
|
||||
self._options = value
|
||||
self._ctx.set_options(value)
|
||||
|
||||
@property
|
||||
def verify_mode(self):
|
||||
return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
|
||||
|
||||
@verify_mode.setter
|
||||
def verify_mode(self, value):
|
||||
self._ctx.set_verify(_stdlib_to_openssl_verify[value], _verify_callback)
|
||||
|
||||
def set_default_verify_paths(self):
|
||||
self._ctx.set_default_verify_paths()
|
||||
|
||||
def set_ciphers(self, ciphers):
|
||||
if isinstance(ciphers, six.text_type):
|
||||
ciphers = ciphers.encode("utf-8")
|
||||
self._ctx.set_cipher_list(ciphers)
|
||||
|
||||
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
|
||||
if cafile is not None:
|
||||
cafile = cafile.encode("utf-8")
|
||||
if capath is not None:
|
||||
capath = capath.encode("utf-8")
|
||||
try:
|
||||
self._ctx.load_verify_locations(cafile, capath)
|
||||
if cadata is not None:
|
||||
self._ctx.load_verify_locations(BytesIO(cadata))
|
||||
except OpenSSL.SSL.Error as e:
|
||||
raise ssl.SSLError("unable to load trusted certificates: %r" % e)
|
||||
|
||||
def load_cert_chain(self, certfile, keyfile=None, password=None):
|
||||
self._ctx.use_certificate_chain_file(certfile)
|
||||
if password is not None:
|
||||
if not isinstance(password, six.binary_type):
|
||||
password = password.encode("utf-8")
|
||||
self._ctx.set_passwd_cb(lambda *_: password)
|
||||
self._ctx.use_privatekey_file(keyfile or certfile)
|
||||
|
||||
def wrap_socket(
|
||||
self,
|
||||
sock,
|
||||
server_side=False,
|
||||
do_handshake_on_connect=True,
|
||||
suppress_ragged_eofs=True,
|
||||
server_hostname=None,
|
||||
):
|
||||
cnx = OpenSSL.SSL.Connection(self._ctx, sock)
|
||||
|
||||
if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
|
||||
server_hostname = server_hostname.encode("utf-8")
|
||||
|
||||
if server_hostname is not None:
|
||||
cnx.set_tlsext_host_name(server_hostname)
|
||||
|
||||
cnx.set_connect_state()
|
||||
|
||||
while True:
|
||||
try:
|
||||
cnx.do_handshake()
|
||||
except OpenSSL.SSL.WantReadError:
|
||||
if not util.wait_for_read(sock, sock.gettimeout()):
|
||||
raise timeout("select timed out")
|
||||
continue
|
||||
except OpenSSL.SSL.Error as e:
|
||||
raise ssl.SSLError("bad handshake: %r" % e)
|
||||
break
|
||||
|
||||
return WrappedSocket(cnx, sock)
|
||||
|
||||
|
||||
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
|
||||
return err_no == 0
|
|
@ -0,0 +1,864 @@
|
|||
"""
|
||||
SecureTranport support for urllib3 via ctypes.
|
||||
|
||||
This makes platform-native TLS available to urllib3 users on macOS without the
|
||||
use of a compiler. This is an important feature because the Python Package
|
||||
Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
|
||||
that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
|
||||
this is to give macOS users an alternative solution to the problem, and that
|
||||
solution is to use SecureTransport.
|
||||
|
||||
We use ctypes here because this solution must not require a compiler. That's
|
||||
because pip is not allowed to require a compiler either.
|
||||
|
||||
This is not intended to be a seriously long-term solution to this problem.
|
||||
The hope is that PEP 543 will eventually solve this issue for us, at which
|
||||
point we can retire this contrib module. But in the short term, we need to
|
||||
solve the impending tire fire that is Python on Mac without this kind of
|
||||
contrib module. So...here we are.
|
||||
|
||||
To use this module, simply import and inject it::
|
||||
|
||||
import urllib3.contrib.securetransport
|
||||
urllib3.contrib.securetransport.inject_into_urllib3()
|
||||
|
||||
Happy TLSing!
|
||||
|
||||
This code is a bastardised version of the code found in Will Bond's oscrypto
|
||||
library. An enormous debt is owed to him for blazing this trail for us. For
|
||||
that reason, this code should be considered to be covered both by urllib3's
|
||||
license and by oscrypto's:
|
||||
|
||||
Copyright (c) 2015-2016 Will Bond <will@wbond.net>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the "Software"),
|
||||
to deal in the Software without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import contextlib
|
||||
import ctypes
|
||||
import errno
|
||||
import os.path
|
||||
import shutil
|
||||
import socket
|
||||
import ssl
|
||||
import threading
|
||||
import weakref
|
||||
|
||||
from .. import util
|
||||
from ._securetransport.bindings import Security, SecurityConst, CoreFoundation
|
||||
from ._securetransport.low_level import (
|
||||
_assert_no_error,
|
||||
_cert_array_from_pem,
|
||||
_temporary_keychain,
|
||||
_load_client_cert_chain,
|
||||
)
|
||||
|
||||
try: # Platform-specific: Python 2
|
||||
from socket import _fileobject
|
||||
except ImportError: # Platform-specific: Python 3
|
||||
_fileobject = None
|
||||
from ..packages.backports.makefile import backport_makefile
|
||||
|
||||
__all__ = ["inject_into_urllib3", "extract_from_urllib3"]
|
||||
|
||||
# SNI always works
|
||||
HAS_SNI = True
|
||||
|
||||
orig_util_HAS_SNI = util.HAS_SNI
|
||||
orig_util_SSLContext = util.ssl_.SSLContext
|
||||
|
||||
# This dictionary is used by the read callback to obtain a handle to the
|
||||
# calling wrapped socket. This is a pretty silly approach, but for now it'll
|
||||
# do. I feel like I should be able to smuggle a handle to the wrapped socket
|
||||
# directly in the SSLConnectionRef, but for now this approach will work I
|
||||
# guess.
|
||||
#
|
||||
# We need to lock around this structure for inserts, but we don't do it for
|
||||
# reads/writes in the callbacks. The reasoning here goes as follows:
|
||||
#
|
||||
# 1. It is not possible to call into the callbacks before the dictionary is
|
||||
# populated, so once in the callback the id must be in the dictionary.
|
||||
# 2. The callbacks don't mutate the dictionary, they only read from it, and
|
||||
# so cannot conflict with any of the insertions.
|
||||
#
|
||||
# This is good: if we had to lock in the callbacks we'd drastically slow down
|
||||
# the performance of this code.
|
||||
_connection_refs = weakref.WeakValueDictionary()
|
||||
_connection_ref_lock = threading.Lock()
|
||||
|
||||
# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
|
||||
# for no better reason than we need *a* limit, and this one is right there.
|
||||
SSL_WRITE_BLOCKSIZE = 16384
|
||||
|
||||
# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
|
||||
# individual cipher suites. We need to do this because this is how
|
||||
# SecureTransport wants them.
|
||||
CIPHER_SUITES = [
|
||||
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
SecurityConst.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||
SecurityConst.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||
SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
|
||||
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
|
||||
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
|
||||
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
|
||||
SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||
SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
SecurityConst.TLS_AES_256_GCM_SHA384,
|
||||
SecurityConst.TLS_AES_128_GCM_SHA256,
|
||||
SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||
SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
|
||||
SecurityConst.TLS_AES_128_CCM_8_SHA256,
|
||||
SecurityConst.TLS_AES_128_CCM_SHA256,
|
||||
SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
|
||||
SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
|
||||
SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
]
|
||||
|
||||
# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
|
||||
# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
|
||||
# TLSv1 to 1.2 are supported on macOS 10.8+
|
||||
_protocol_to_min_max = {
|
||||
util.PROTOCOL_TLS: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12)
|
||||
}
|
||||
|
||||
if hasattr(ssl, "PROTOCOL_SSLv2"):
|
||||
_protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
|
||||
SecurityConst.kSSLProtocol2,
|
||||
SecurityConst.kSSLProtocol2,
|
||||
)
|
||||
if hasattr(ssl, "PROTOCOL_SSLv3"):
|
||||
_protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
|
||||
SecurityConst.kSSLProtocol3,
|
||||
SecurityConst.kSSLProtocol3,
|
||||
)
|
||||
if hasattr(ssl, "PROTOCOL_TLSv1"):
|
||||
_protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
|
||||
SecurityConst.kTLSProtocol1,
|
||||
SecurityConst.kTLSProtocol1,
|
||||
)
|
||||
if hasattr(ssl, "PROTOCOL_TLSv1_1"):
|
||||
_protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
|
||||
SecurityConst.kTLSProtocol11,
|
||||
SecurityConst.kTLSProtocol11,
|
||||
)
|
||||
if hasattr(ssl, "PROTOCOL_TLSv1_2"):
|
||||
_protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
|
||||
SecurityConst.kTLSProtocol12,
|
||||
SecurityConst.kTLSProtocol12,
|
||||
)
|
||||
|
||||
|
||||
def inject_into_urllib3():
|
||||
"""
|
||||
Monkey-patch urllib3 with SecureTransport-backed SSL-support.
|
||||
"""
|
||||
util.SSLContext = SecureTransportContext
|
||||
util.ssl_.SSLContext = SecureTransportContext
|
||||
util.HAS_SNI = HAS_SNI
|
||||
util.ssl_.HAS_SNI = HAS_SNI
|
||||
util.IS_SECURETRANSPORT = True
|
||||
util.ssl_.IS_SECURETRANSPORT = True
|
||||
|
||||
|
||||
def extract_from_urllib3():
|
||||
"""
|
||||
Undo monkey-patching by :func:`inject_into_urllib3`.
|
||||
"""
|
||||
util.SSLContext = orig_util_SSLContext
|
||||
util.ssl_.SSLContext = orig_util_SSLContext
|
||||
util.HAS_SNI = orig_util_HAS_SNI
|
||||
util.ssl_.HAS_SNI = orig_util_HAS_SNI
|
||||
util.IS_SECURETRANSPORT = False
|
||||
util.ssl_.IS_SECURETRANSPORT = False
|
||||
|
||||
|
||||
def _read_callback(connection_id, data_buffer, data_length_pointer):
|
||||
"""
|
||||
SecureTransport read callback. This is called by ST to request that data
|
||||
be returned from the socket.
|
||||
"""
|
||||
wrapped_socket = None
|
||||
try:
|
||||
wrapped_socket = _connection_refs.get(connection_id)
|
||||
if wrapped_socket is None:
|
||||
return SecurityConst.errSSLInternal
|
||||
base_socket = wrapped_socket.socket
|
||||
|
||||
requested_length = data_length_pointer[0]
|
||||
|
||||
timeout = wrapped_socket.gettimeout()
|
||||
error = None
|
||||
read_count = 0
|
||||
|
||||
try:
|
||||
while read_count < requested_length:
|
||||
if timeout is None or timeout >= 0:
|
||||
if not util.wait_for_read(base_socket, timeout):
|
||||
raise socket.error(errno.EAGAIN, "timed out")
|
||||
|
||||
remaining = requested_length - read_count
|
||||
buffer = (ctypes.c_char * remaining).from_address(
|
||||
data_buffer + read_count
|
||||
)
|
||||
chunk_size = base_socket.recv_into(buffer, remaining)
|
||||
read_count += chunk_size
|
||||
if not chunk_size:
|
||||
if not read_count:
|
||||
return SecurityConst.errSSLClosedGraceful
|
||||
break
|
||||
except (socket.error) as e:
|
||||
error = e.errno
|
||||
|
||||
if error is not None and error != errno.EAGAIN:
|
||||
data_length_pointer[0] = read_count
|
||||
if error == errno.ECONNRESET or error == errno.EPIPE:
|
||||
return SecurityConst.errSSLClosedAbort
|
||||
raise
|
||||
|
||||
data_length_pointer[0] = read_count
|
||||
|
||||
if read_count != requested_length:
|
||||
return SecurityConst.errSSLWouldBlock
|
||||
|
||||
return 0
|
||||
except Exception as e:
|
||||
if wrapped_socket is not None:
|
||||
wrapped_socket._exception = e
|
||||
return SecurityConst.errSSLInternal
|
||||
|
||||
|
||||
def _write_callback(connection_id, data_buffer, data_length_pointer):
|
||||
"""
|
||||
SecureTransport write callback. This is called by ST to request that data
|
||||
actually be sent on the network.
|
||||
"""
|
||||
wrapped_socket = None
|
||||
try:
|
||||
wrapped_socket = _connection_refs.get(connection_id)
|
||||
if wrapped_socket is None:
|
||||
return SecurityConst.errSSLInternal
|
||||
base_socket = wrapped_socket.socket
|
||||
|
||||
bytes_to_write = data_length_pointer[0]
|
||||
data = ctypes.string_at(data_buffer, bytes_to_write)
|
||||
|
||||
timeout = wrapped_socket.gettimeout()
|
||||
error = None
|
||||
sent = 0
|
||||
|
||||
try:
|
||||
while sent < bytes_to_write:
|
||||
if timeout is None or timeout >= 0:
|
||||
if not util.wait_for_write(base_socket, timeout):
|
||||
raise socket.error(errno.EAGAIN, "timed out")
|
||||
chunk_sent = base_socket.send(data)
|
||||
sent += chunk_sent
|
||||
|
||||
# This has some needless copying here, but I'm not sure there's
|
||||
# much value in optimising this data path.
|
||||
data = data[chunk_sent:]
|
||||
except (socket.error) as e:
|
||||
error = e.errno
|
||||
|
||||
if error is not None and error != errno.EAGAIN:
|
||||
data_length_pointer[0] = sent
|
||||
if error == errno.ECONNRESET or error == errno.EPIPE:
|
||||
return SecurityConst.errSSLClosedAbort
|
||||
raise
|
||||
|
||||
data_length_pointer[0] = sent
|
||||
|
||||
if sent != bytes_to_write:
|
||||
return SecurityConst.errSSLWouldBlock
|
||||
|
||||
return 0
|
||||
except Exception as e:
|
||||
if wrapped_socket is not None:
|
||||
wrapped_socket._exception = e
|
||||
return SecurityConst.errSSLInternal
|
||||
|
||||
|
||||
# We need to keep these two objects references alive: if they get GC'd while
|
||||
# in use then SecureTransport could attempt to call a function that is in freed
|
||||
# memory. That would be...uh...bad. Yeah, that's the word. Bad.
|
||||
_read_callback_pointer = Security.SSLReadFunc(_read_callback)
|
||||
_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
|
||||
|
||||
|
||||
class WrappedSocket(object):
|
||||
"""
|
||||
API-compatibility wrapper for Python's OpenSSL wrapped socket object.
|
||||
|
||||
Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
|
||||
collector of PyPy.
|
||||
"""
|
||||
|
||||
def __init__(self, socket):
|
||||
self.socket = socket
|
||||
self.context = None
|
||||
self._makefile_refs = 0
|
||||
self._closed = False
|
||||
self._exception = None
|
||||
self._keychain = None
|
||||
self._keychain_dir = None
|
||||
self._client_cert_chain = None
|
||||
|
||||
# We save off the previously-configured timeout and then set it to
|
||||
# zero. This is done because we use select and friends to handle the
|
||||
# timeouts, but if we leave the timeout set on the lower socket then
|
||||
# Python will "kindly" call select on that socket again for us. Avoid
|
||||
# that by forcing the timeout to zero.
|
||||
self._timeout = self.socket.gettimeout()
|
||||
self.socket.settimeout(0)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _raise_on_error(self):
|
||||
"""
|
||||
A context manager that can be used to wrap calls that do I/O from
|
||||
SecureTransport. If any of the I/O callbacks hit an exception, this
|
||||
context manager will correctly propagate the exception after the fact.
|
||||
This avoids silently swallowing those exceptions.
|
||||
|
||||
It also correctly forces the socket closed.
|
||||
"""
|
||||
self._exception = None
|
||||
|
||||
# We explicitly don't catch around this yield because in the unlikely
|
||||
# event that an exception was hit in the block we don't want to swallow
|
||||
# it.
|
||||
yield
|
||||
if self._exception is not None:
|
||||
exception, self._exception = self._exception, None
|
||||
self.close()
|
||||
raise exception
|
||||
|
||||
def _set_ciphers(self):
|
||||
"""
|
||||
Sets up the allowed ciphers. By default this matches the set in
|
||||
util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
|
||||
custom and doesn't allow changing at this time, mostly because parsing
|
||||
OpenSSL cipher strings is going to be a freaking nightmare.
|
||||
"""
|
||||
ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
|
||||
result = Security.SSLSetEnabledCiphers(
|
||||
self.context, ciphers, len(CIPHER_SUITES)
|
||||
)
|
||||
_assert_no_error(result)
|
||||
|
||||
def _custom_validate(self, verify, trust_bundle):
|
||||
"""
|
||||
Called when we have set custom validation. We do this in two cases:
|
||||
first, when cert validation is entirely disabled; and second, when
|
||||
using a custom trust DB.
|
||||
"""
|
||||
# If we disabled cert validation, just say: cool.
|
||||
if not verify:
|
||||
return
|
||||
|
||||
# We want data in memory, so load it up.
|
||||
if os.path.isfile(trust_bundle):
|
||||
with open(trust_bundle, "rb") as f:
|
||||
trust_bundle = f.read()
|
||||
|
||||
cert_array = None
|
||||
trust = Security.SecTrustRef()
|
||||
|
||||
try:
|
||||
# Get a CFArray that contains the certs we want.
|
||||
cert_array = _cert_array_from_pem(trust_bundle)
|
||||
|
||||
# Ok, now the hard part. We want to get the SecTrustRef that ST has
|
||||
# created for this connection, shove our CAs into it, tell ST to
|
||||
# ignore everything else it knows, and then ask if it can build a
|
||||
# chain. This is a buuuunch of code.
|
||||
result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
|
||||
_assert_no_error(result)
|
||||
if not trust:
|
||||
raise ssl.SSLError("Failed to copy trust reference")
|
||||
|
||||
result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
|
||||
_assert_no_error(result)
|
||||
|
||||
result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
|
||||
_assert_no_error(result)
|
||||
|
||||
trust_result = Security.SecTrustResultType()
|
||||
result = Security.SecTrustEvaluate(trust, ctypes.byref(trust_result))
|
||||
_assert_no_error(result)
|
||||
finally:
|
||||
if trust:
|
||||
CoreFoundation.CFRelease(trust)
|
||||
|
||||
if cert_array is not None:
|
||||
CoreFoundation.CFRelease(cert_array)
|
||||
|
||||
# Ok, now we can look at what the result was.
|
||||
successes = (
|
||||
SecurityConst.kSecTrustResultUnspecified,
|
||||
SecurityConst.kSecTrustResultProceed,
|
||||
)
|
||||
if trust_result.value not in successes:
|
||||
raise ssl.SSLError(
|
||||
"certificate verify failed, error code: %d" % trust_result.value
|
||||
)
|
||||
|
||||
def handshake(
|
||||
self,
|
||||
server_hostname,
|
||||
verify,
|
||||
trust_bundle,
|
||||
min_version,
|
||||
max_version,
|
||||
client_cert,
|
||||
client_key,
|
||||
client_key_passphrase,
|
||||
):
|
||||
"""
|
||||
Actually performs the TLS handshake. This is run automatically by
|
||||
wrapped socket, and shouldn't be needed in user code.
|
||||
"""
|
||||
# First, we do the initial bits of connection setup. We need to create
|
||||
# a context, set its I/O funcs, and set the connection reference.
|
||||
self.context = Security.SSLCreateContext(
|
||||
None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
|
||||
)
|
||||
result = Security.SSLSetIOFuncs(
|
||||
self.context, _read_callback_pointer, _write_callback_pointer
|
||||
)
|
||||
_assert_no_error(result)
|
||||
|
||||
# Here we need to compute the handle to use. We do this by taking the
|
||||
# id of self modulo 2**31 - 1. If this is already in the dictionary, we
|
||||
# just keep incrementing by one until we find a free space.
|
||||
with _connection_ref_lock:
|
||||
handle = id(self) % 2147483647
|
||||
while handle in _connection_refs:
|
||||
handle = (handle + 1) % 2147483647
|
||||
_connection_refs[handle] = self
|
||||
|
||||
result = Security.SSLSetConnection(self.context, handle)
|
||||
_assert_no_error(result)
|
||||
|
||||
# If we have a server hostname, we should set that too.
|
||||
if server_hostname:
|
||||
if not isinstance(server_hostname, bytes):
|
||||
server_hostname = server_hostname.encode("utf-8")
|
||||
|
||||
result = Security.SSLSetPeerDomainName(
|
||||
self.context, server_hostname, len(server_hostname)
|
||||
)
|
||||
_assert_no_error(result)
|
||||
|
||||
# Setup the ciphers.
|
||||
self._set_ciphers()
|
||||
|
||||
# Set the minimum and maximum TLS versions.
|
||||
result = Security.SSLSetProtocolVersionMin(self.context, min_version)
|
||||
_assert_no_error(result)
|
||||
|
||||
result = Security.SSLSetProtocolVersionMax(self.context, max_version)
|
||||
_assert_no_error(result)
|
||||
|
||||
# If there's a trust DB, we need to use it. We do that by telling
|
||||
# SecureTransport to break on server auth. We also do that if we don't
|
||||
# want to validate the certs at all: we just won't actually do any
|
||||
# authing in that case.
|
||||
if not verify or trust_bundle is not None:
|
||||
result = Security.SSLSetSessionOption(
|
||||
self.context, SecurityConst.kSSLSessionOptionBreakOnServerAuth, True
|
||||
)
|
||||
_assert_no_error(result)
|
||||
|
||||
# If there's a client cert, we need to use it.
|
||||
if client_cert:
|
||||
self._keychain, self._keychain_dir = _temporary_keychain()
|
||||
self._client_cert_chain = _load_client_cert_chain(
|
||||
self._keychain, client_cert, client_key
|
||||
)
|
||||
result = Security.SSLSetCertificate(self.context, self._client_cert_chain)
|
||||
_assert_no_error(result)
|
||||
|
||||
while True:
|
||||
with self._raise_on_error():
|
||||
result = Security.SSLHandshake(self.context)
|
||||
|
||||
if result == SecurityConst.errSSLWouldBlock:
|
||||
raise socket.timeout("handshake timed out")
|
||||
elif result == SecurityConst.errSSLServerAuthCompleted:
|
||||
self._custom_validate(verify, trust_bundle)
|
||||
continue
|
||||
else:
|
||||
_assert_no_error(result)
|
||||
break
|
||||
|
||||
def fileno(self):
|
||||
return self.socket.fileno()
|
||||
|
||||
# Copy-pasted from Python 3.5 source code
|
||||
def _decref_socketios(self):
|
||||
if self._makefile_refs > 0:
|
||||
self._makefile_refs -= 1
|
||||
if self._closed:
|
||||
self.close()
|
||||
|
||||
def recv(self, bufsiz):
|
||||
buffer = ctypes.create_string_buffer(bufsiz)
|
||||
bytes_read = self.recv_into(buffer, bufsiz)
|
||||
data = buffer[:bytes_read]
|
||||
return data
|
||||
|
||||
def recv_into(self, buffer, nbytes=None):
|
||||
# Read short on EOF.
|
||||
if self._closed:
|
||||
return 0
|
||||
|
||||
if nbytes is None:
|
||||
nbytes = len(buffer)
|
||||
|
||||
buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
|
||||
processed_bytes = ctypes.c_size_t(0)
|
||||
|
||||
with self._raise_on_error():
|
||||
result = Security.SSLRead(
|
||||
self.context, buffer, nbytes, ctypes.byref(processed_bytes)
|
||||
)
|
||||
|
||||
# There are some result codes that we want to treat as "not always
|
||||
# errors". Specifically, those are errSSLWouldBlock,
|
||||
# errSSLClosedGraceful, and errSSLClosedNoNotify.
|
||||
if result == SecurityConst.errSSLWouldBlock:
|
||||
# If we didn't process any bytes, then this was just a time out.
|
||||
# However, we can get errSSLWouldBlock in situations when we *did*
|
||||
# read some data, and in those cases we should just read "short"
|
||||
# and return.
|
||||
if processed_bytes.value == 0:
|
||||
# Timed out, no data read.
|
||||
raise socket.timeout("recv timed out")
|
||||
elif result in (
|
||||
SecurityConst.errSSLClosedGraceful,
|
||||
SecurityConst.errSSLClosedNoNotify,
|
||||
):
|
||||
# The remote peer has closed this connection. We should do so as
|
||||
# well. Note that we don't actually return here because in
|
||||
# principle this could actually be fired along with return data.
|
||||
# It's unlikely though.
|
||||
self.close()
|
||||
else:
|
||||
_assert_no_error(result)
|
||||
|
||||
# Ok, we read and probably succeeded. We should return whatever data
|
||||
# was actually read.
|
||||
return processed_bytes.value
|
||||
|
||||
def settimeout(self, timeout):
|
||||
self._timeout = timeout
|
||||
|
||||
def gettimeout(self):
|
||||
return self._timeout
|
||||
|
||||
def send(self, data):
|
||||
processed_bytes = ctypes.c_size_t(0)
|
||||
|
||||
with self._raise_on_error():
|
||||
result = Security.SSLWrite(
|
||||
self.context, data, len(data), ctypes.byref(processed_bytes)
|
||||
)
|
||||
|
||||
if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
|
||||
# Timed out
|
||||
raise socket.timeout("send timed out")
|
||||
else:
|
||||
_assert_no_error(result)
|
||||
|
||||
# We sent, and probably succeeded. Tell them how much we sent.
|
||||
return processed_bytes.value
|
||||
|
||||
def sendall(self, data):
|
||||
total_sent = 0
|
||||
while total_sent < len(data):
|
||||
sent = self.send(data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE])
|
||||
total_sent += sent
|
||||
|
||||
def shutdown(self):
|
||||
with self._raise_on_error():
|
||||
Security.SSLClose(self.context)
|
||||
|
||||
def close(self):
|
||||
# TODO: should I do clean shutdown here? Do I have to?
|
||||
if self._makefile_refs < 1:
|
||||
self._closed = True
|
||||
if self.context:
|
||||
CoreFoundation.CFRelease(self.context)
|
||||
self.context = None
|
||||
if self._client_cert_chain:
|
||||
CoreFoundation.CFRelease(self._client_cert_chain)
|
||||
self._client_cert_chain = None
|
||||
if self._keychain:
|
||||
Security.SecKeychainDelete(self._keychain)
|
||||
CoreFoundation.CFRelease(self._keychain)
|
||||
shutil.rmtree(self._keychain_dir)
|
||||
self._keychain = self._keychain_dir = None
|
||||
return self.socket.close()
|
||||
else:
|
||||
self._makefile_refs -= 1
|
||||
|
||||
def getpeercert(self, binary_form=False):
|
||||
# Urgh, annoying.
|
||||
#
|
||||
# Here's how we do this:
|
||||
#
|
||||
# 1. Call SSLCopyPeerTrust to get hold of the trust object for this
|
||||
# connection.
|
||||
# 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
|
||||
# 3. To get the CN, call SecCertificateCopyCommonName and process that
|
||||
# string so that it's of the appropriate type.
|
||||
# 4. To get the SAN, we need to do something a bit more complex:
|
||||
# a. Call SecCertificateCopyValues to get the data, requesting
|
||||
# kSecOIDSubjectAltName.
|
||||
# b. Mess about with this dictionary to try to get the SANs out.
|
||||
#
|
||||
# This is gross. Really gross. It's going to be a few hundred LoC extra
|
||||
# just to repeat something that SecureTransport can *already do*. So my
|
||||
# operating assumption at this time is that what we want to do is
|
||||
# instead to just flag to urllib3 that it shouldn't do its own hostname
|
||||
# validation when using SecureTransport.
|
||||
if not binary_form:
|
||||
raise ValueError("SecureTransport only supports dumping binary certs")
|
||||
trust = Security.SecTrustRef()
|
||||
certdata = None
|
||||
der_bytes = None
|
||||
|
||||
try:
|
||||
# Grab the trust store.
|
||||
result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
|
||||
_assert_no_error(result)
|
||||
if not trust:
|
||||
# Probably we haven't done the handshake yet. No biggie.
|
||||
return None
|
||||
|
||||
cert_count = Security.SecTrustGetCertificateCount(trust)
|
||||
if not cert_count:
|
||||
# Also a case that might happen if we haven't handshaked.
|
||||
# Handshook? Handshaken?
|
||||
return None
|
||||
|
||||
leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
|
||||
assert leaf
|
||||
|
||||
# Ok, now we want the DER bytes.
|
||||
certdata = Security.SecCertificateCopyData(leaf)
|
||||
assert certdata
|
||||
|
||||
data_length = CoreFoundation.CFDataGetLength(certdata)
|
||||
data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
|
||||
der_bytes = ctypes.string_at(data_buffer, data_length)
|
||||
finally:
|
||||
if certdata:
|
||||
CoreFoundation.CFRelease(certdata)
|
||||
if trust:
|
||||
CoreFoundation.CFRelease(trust)
|
||||
|
||||
return der_bytes
|
||||
|
||||
def version(self):
|
||||
protocol = Security.SSLProtocol()
|
||||
result = Security.SSLGetNegotiatedProtocolVersion(
|
||||
self.context, ctypes.byref(protocol)
|
||||
)
|
||||
_assert_no_error(result)
|
||||
if protocol.value == SecurityConst.kTLSProtocol13:
|
||||
raise ssl.SSLError("SecureTransport does not support TLS 1.3")
|
||||
elif protocol.value == SecurityConst.kTLSProtocol12:
|
||||
return "TLSv1.2"
|
||||
elif protocol.value == SecurityConst.kTLSProtocol11:
|
||||
return "TLSv1.1"
|
||||
elif protocol.value == SecurityConst.kTLSProtocol1:
|
||||
return "TLSv1"
|
||||
elif protocol.value == SecurityConst.kSSLProtocol3:
|
||||
return "SSLv3"
|
||||
elif protocol.value == SecurityConst.kSSLProtocol2:
|
||||
return "SSLv2"
|
||||
else:
|
||||
raise ssl.SSLError("Unknown TLS version: %r" % protocol)
|
||||
|
||||
def _reuse(self):
|
||||
self._makefile_refs += 1
|
||||
|
||||
def _drop(self):
|
||||
if self._makefile_refs < 1:
|
||||
self.close()
|
||||
else:
|
||||
self._makefile_refs -= 1
|
||||
|
||||
|
||||
if _fileobject: # Platform-specific: Python 2
|
||||
|
||||
def makefile(self, mode, bufsize=-1):
|
||||
self._makefile_refs += 1
|
||||
return _fileobject(self, mode, bufsize, close=True)
|
||||
|
||||
|
||||
else: # Platform-specific: Python 3
|
||||
|
||||
def makefile(self, mode="r", buffering=None, *args, **kwargs):
|
||||
# We disable buffering with SecureTransport because it conflicts with
|
||||
# the buffering that ST does internally (see issue #1153 for more).
|
||||
buffering = 0
|
||||
return backport_makefile(self, mode, buffering, *args, **kwargs)
|
||||
|
||||
|
||||
WrappedSocket.makefile = makefile
|
||||
|
||||
|
||||
class SecureTransportContext(object):
|
||||
"""
|
||||
I am a wrapper class for the SecureTransport library, to translate the
|
||||
interface of the standard library ``SSLContext`` object to calls into
|
||||
SecureTransport.
|
||||
"""
|
||||
|
||||
def __init__(self, protocol):
|
||||
self._min_version, self._max_version = _protocol_to_min_max[protocol]
|
||||
self._options = 0
|
||||
self._verify = False
|
||||
self._trust_bundle = None
|
||||
self._client_cert = None
|
||||
self._client_key = None
|
||||
self._client_key_passphrase = None
|
||||
|
||||
@property
|
||||
def check_hostname(self):
|
||||
"""
|
||||
SecureTransport cannot have its hostname checking disabled. For more,
|
||||
see the comment on getpeercert() in this file.
|
||||
"""
|
||||
return True
|
||||
|
||||
@check_hostname.setter
|
||||
def check_hostname(self, value):
|
||||
"""
|
||||
SecureTransport cannot have its hostname checking disabled. For more,
|
||||
see the comment on getpeercert() in this file.
|
||||
"""
|
||||
pass
|
||||
|
||||
@property
|
||||
def options(self):
|
||||
# TODO: Well, crap.
|
||||
#
|
||||
# So this is the bit of the code that is the most likely to cause us
|
||||
# trouble. Essentially we need to enumerate all of the SSL options that
|
||||
# users might want to use and try to see if we can sensibly translate
|
||||
# them, or whether we should just ignore them.
|
||||
return self._options
|
||||
|
||||
@options.setter
|
||||
def options(self, value):
|
||||
# TODO: Update in line with above.
|
||||
self._options = value
|
||||
|
||||
@property
|
||||
def verify_mode(self):
|
||||
return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
|
||||
|
||||
@verify_mode.setter
|
||||
def verify_mode(self, value):
|
||||
self._verify = True if value == ssl.CERT_REQUIRED else False
|
||||
|
||||
def set_default_verify_paths(self):
|
||||
# So, this has to do something a bit weird. Specifically, what it does
|
||||
# is nothing.
|
||||
#
|
||||
# This means that, if we had previously had load_verify_locations
|
||||
# called, this does not undo that. We need to do that because it turns
|
||||
# out that the rest of the urllib3 code will attempt to load the
|
||||
# default verify paths if it hasn't been told about any paths, even if
|
||||
# the context itself was sometime earlier. We resolve that by just
|
||||
# ignoring it.
|
||||
pass
|
||||
|
||||
def load_default_certs(self):
|
||||
return self.set_default_verify_paths()
|
||||
|
||||
def set_ciphers(self, ciphers):
|
||||
# For now, we just require the default cipher string.
|
||||
if ciphers != util.ssl_.DEFAULT_CIPHERS:
|
||||
raise ValueError("SecureTransport doesn't support custom cipher strings")
|
||||
|
||||
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
|
||||
# OK, we only really support cadata and cafile.
|
||||
if capath is not None:
|
||||
raise ValueError("SecureTransport does not support cert directories")
|
||||
|
||||
# Raise if cafile does not exist.
|
||||
if cafile is not None:
|
||||
with open(cafile):
|
||||
pass
|
||||
|
||||
self._trust_bundle = cafile or cadata
|
||||
|
||||
def load_cert_chain(self, certfile, keyfile=None, password=None):
|
||||
self._client_cert = certfile
|
||||
self._client_key = keyfile
|
||||
self._client_cert_passphrase = password
|
||||
|
||||
def wrap_socket(
|
||||
self,
|
||||
sock,
|
||||
server_side=False,
|
||||
do_handshake_on_connect=True,
|
||||
suppress_ragged_eofs=True,
|
||||
server_hostname=None,
|
||||
):
|
||||
# So, what do we do here? Firstly, we assert some properties. This is a
|
||||
# stripped down shim, so there is some functionality we don't support.
|
||||
# See PEP 543 for the real deal.
|
||||
assert not server_side
|
||||
assert do_handshake_on_connect
|
||||
assert suppress_ragged_eofs
|
||||
|
||||
# Ok, we're good to go. Now we want to create the wrapped socket object
|
||||
# and store it in the appropriate place.
|
||||
wrapped_socket = WrappedSocket(sock)
|
||||
|
||||
# Now we can handshake
|
||||
wrapped_socket.handshake(
|
||||
server_hostname,
|
||||
self._verify,
|
||||
self._trust_bundle,
|
||||
self._min_version,
|
||||
self._max_version,
|
||||
self._client_cert,
|
||||
self._client_key,
|
||||
self._client_key_passphrase,
|
||||
)
|
||||
return wrapped_socket
|
210
venv/Lib/site-packages/pip/_vendor/urllib3/contrib/socks.py
Normal file
210
venv/Lib/site-packages/pip/_vendor/urllib3/contrib/socks.py
Normal file
|
@ -0,0 +1,210 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
This module contains provisional support for SOCKS proxies from within
|
||||
urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and
|
||||
SOCKS5. To enable its functionality, either install PySocks or install this
|
||||
module with the ``socks`` extra.
|
||||
|
||||
The SOCKS implementation supports the full range of urllib3 features. It also
|
||||
supports the following SOCKS features:
|
||||
|
||||
- SOCKS4A (``proxy_url='socks4a://...``)
|
||||
- SOCKS4 (``proxy_url='socks4://...``)
|
||||
- SOCKS5 with remote DNS (``proxy_url='socks5h://...``)
|
||||
- SOCKS5 with local DNS (``proxy_url='socks5://...``)
|
||||
- Usernames and passwords for the SOCKS proxy
|
||||
|
||||
.. note::
|
||||
It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in
|
||||
your ``proxy_url`` to ensure that DNS resolution is done from the remote
|
||||
server instead of client-side when connecting to a domain name.
|
||||
|
||||
SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5
|
||||
supports IPv4, IPv6, and domain names.
|
||||
|
||||
When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url``
|
||||
will be sent as the ``userid`` section of the SOCKS request::
|
||||
|
||||
proxy_url="socks4a://<userid>@proxy-host"
|
||||
|
||||
When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion
|
||||
of the ``proxy_url`` will be sent as the username/password to authenticate
|
||||
with the proxy::
|
||||
|
||||
proxy_url="socks5h://<username>:<password>@proxy-host"
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
try:
|
||||
import socks
|
||||
except ImportError:
|
||||
import warnings
|
||||
from ..exceptions import DependencyWarning
|
||||
|
||||
warnings.warn(
|
||||
(
|
||||
"SOCKS support in urllib3 requires the installation of optional "
|
||||
"dependencies: specifically, PySocks. For more information, see "
|
||||
"https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies"
|
||||
),
|
||||
DependencyWarning,
|
||||
)
|
||||
raise
|
||||
|
||||
from socket import error as SocketError, timeout as SocketTimeout
|
||||
|
||||
from ..connection import HTTPConnection, HTTPSConnection
|
||||
from ..connectionpool import HTTPConnectionPool, HTTPSConnectionPool
|
||||
from ..exceptions import ConnectTimeoutError, NewConnectionError
|
||||
from ..poolmanager import PoolManager
|
||||
from ..util.url import parse_url
|
||||
|
||||
try:
|
||||
import ssl
|
||||
except ImportError:
|
||||
ssl = None
|
||||
|
||||
|
||||
class SOCKSConnection(HTTPConnection):
|
||||
"""
|
||||
A plain-text HTTP connection that connects via a SOCKS proxy.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._socks_options = kwargs.pop("_socks_options")
|
||||
super(SOCKSConnection, self).__init__(*args, **kwargs)
|
||||
|
||||
def _new_conn(self):
|
||||
"""
|
||||
Establish a new connection via the SOCKS proxy.
|
||||
"""
|
||||
extra_kw = {}
|
||||
if self.source_address:
|
||||
extra_kw["source_address"] = self.source_address
|
||||
|
||||
if self.socket_options:
|
||||
extra_kw["socket_options"] = self.socket_options
|
||||
|
||||
try:
|
||||
conn = socks.create_connection(
|
||||
(self.host, self.port),
|
||||
proxy_type=self._socks_options["socks_version"],
|
||||
proxy_addr=self._socks_options["proxy_host"],
|
||||
proxy_port=self._socks_options["proxy_port"],
|
||||
proxy_username=self._socks_options["username"],
|
||||
proxy_password=self._socks_options["password"],
|
||||
proxy_rdns=self._socks_options["rdns"],
|
||||
timeout=self.timeout,
|
||||
**extra_kw
|
||||
)
|
||||
|
||||
except SocketTimeout:
|
||||
raise ConnectTimeoutError(
|
||||
self,
|
||||
"Connection to %s timed out. (connect timeout=%s)"
|
||||
% (self.host, self.timeout),
|
||||
)
|
||||
|
||||
except socks.ProxyError as e:
|
||||
# This is fragile as hell, but it seems to be the only way to raise
|
||||
# useful errors here.
|
||||
if e.socket_err:
|
||||
error = e.socket_err
|
||||
if isinstance(error, SocketTimeout):
|
||||
raise ConnectTimeoutError(
|
||||
self,
|
||||
"Connection to %s timed out. (connect timeout=%s)"
|
||||
% (self.host, self.timeout),
|
||||
)
|
||||
else:
|
||||
raise NewConnectionError(
|
||||
self, "Failed to establish a new connection: %s" % error
|
||||
)
|
||||
else:
|
||||
raise NewConnectionError(
|
||||
self, "Failed to establish a new connection: %s" % e
|
||||
)
|
||||
|
||||
except SocketError as e: # Defensive: PySocks should catch all these.
|
||||
raise NewConnectionError(
|
||||
self, "Failed to establish a new connection: %s" % e
|
||||
)
|
||||
|
||||
return conn
|
||||
|
||||
|
||||
# We don't need to duplicate the Verified/Unverified distinction from
|
||||
# urllib3/connection.py here because the HTTPSConnection will already have been
|
||||
# correctly set to either the Verified or Unverified form by that module. This
|
||||
# means the SOCKSHTTPSConnection will automatically be the correct type.
|
||||
class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
|
||||
pass
|
||||
|
||||
|
||||
class SOCKSHTTPConnectionPool(HTTPConnectionPool):
|
||||
ConnectionCls = SOCKSConnection
|
||||
|
||||
|
||||
class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
|
||||
ConnectionCls = SOCKSHTTPSConnection
|
||||
|
||||
|
||||
class SOCKSProxyManager(PoolManager):
|
||||
"""
|
||||
A version of the urllib3 ProxyManager that routes connections via the
|
||||
defined SOCKS proxy.
|
||||
"""
|
||||
|
||||
pool_classes_by_scheme = {
|
||||
"http": SOCKSHTTPConnectionPool,
|
||||
"https": SOCKSHTTPSConnectionPool,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
proxy_url,
|
||||
username=None,
|
||||
password=None,
|
||||
num_pools=10,
|
||||
headers=None,
|
||||
**connection_pool_kw
|
||||
):
|
||||
parsed = parse_url(proxy_url)
|
||||
|
||||
if username is None and password is None and parsed.auth is not None:
|
||||
split = parsed.auth.split(":")
|
||||
if len(split) == 2:
|
||||
username, password = split
|
||||
if parsed.scheme == "socks5":
|
||||
socks_version = socks.PROXY_TYPE_SOCKS5
|
||||
rdns = False
|
||||
elif parsed.scheme == "socks5h":
|
||||
socks_version = socks.PROXY_TYPE_SOCKS5
|
||||
rdns = True
|
||||
elif parsed.scheme == "socks4":
|
||||
socks_version = socks.PROXY_TYPE_SOCKS4
|
||||
rdns = False
|
||||
elif parsed.scheme == "socks4a":
|
||||
socks_version = socks.PROXY_TYPE_SOCKS4
|
||||
rdns = True
|
||||
else:
|
||||
raise ValueError("Unable to determine SOCKS version from %s" % proxy_url)
|
||||
|
||||
self.proxy_url = proxy_url
|
||||
|
||||
socks_options = {
|
||||
"socks_version": socks_version,
|
||||
"proxy_host": parsed.host,
|
||||
"proxy_port": parsed.port,
|
||||
"username": username,
|
||||
"password": password,
|
||||
"rdns": rdns,
|
||||
}
|
||||
connection_pool_kw["_socks_options"] = socks_options
|
||||
|
||||
super(SOCKSProxyManager, self).__init__(
|
||||
num_pools, headers, **connection_pool_kw
|
||||
)
|
||||
|
||||
self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
|
Loading…
Add table
Add a link
Reference in a new issue