Package googleapiclient :: Module http
[hide private]
[frames] | no frames]

Source Code for Module googleapiclient.http

   1  # Copyright 2014 Google Inc. All Rights Reserved. 
   2  # 
   3  # Licensed under the Apache License, Version 2.0 (the "License"); 
   4  # you may not use this file except in compliance with the License. 
   5  # You may obtain a copy of the License at 
   6  # 
   7  #      http://www.apache.org/licenses/LICENSE-2.0 
   8  # 
   9  # Unless required by applicable law or agreed to in writing, software 
  10  # distributed under the License is distributed on an "AS IS" BASIS, 
  11  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
  12  # See the License for the specific language governing permissions and 
  13  # limitations under the License. 
  14   
  15  """Classes to encapsulate a single HTTP request. 
  16   
  17  The classes implement a command pattern, with every 
  18  object supporting an execute() method that does the 
  19  actuall HTTP request. 
  20  """ 
  21  from __future__ import absolute_import 
  22  import six 
  23  from six.moves import http_client 
  24  from six.moves import range 
  25   
  26  __author__ = 'jcgregorio@google.com (Joe Gregorio)' 
  27   
  28  from six import BytesIO, StringIO 
  29  from six.moves.urllib.parse import urlparse, urlunparse, quote, unquote 
  30   
  31  import base64 
  32  import copy 
  33  import gzip 
  34  import httplib2 
  35  import json 
  36  import logging 
  37  import mimetypes 
  38  import os 
  39  import random 
  40  import socket 
  41  import sys 
  42  import time 
  43  import uuid 
  44   
  45  # TODO(issue 221): Remove this conditional import jibbajabba. 
  46  try: 
  47    import ssl 
  48  except ImportError: 
  49    _ssl_SSLError = object() 
  50  else: 
  51    _ssl_SSLError = ssl.SSLError 
  52   
  53  from email.generator import Generator 
  54  from email.mime.multipart import MIMEMultipart 
  55  from email.mime.nonmultipart import MIMENonMultipart 
  56  from email.parser import FeedParser 
  57   
  58  # Oauth2client < 3 has the positional helper in 'util', >= 3 has it 
  59  # in '_helpers'. 
  60  try: 
  61    from oauth2client import util 
  62  except ImportError: 
  63    from oauth2client import _helpers as util 
  64   
  65  from googleapiclient import mimeparse 
  66  from googleapiclient.errors import BatchError 
  67  from googleapiclient.errors import HttpError 
  68  from googleapiclient.errors import InvalidChunkSizeError 
  69  from googleapiclient.errors import ResumableUploadError 
  70  from googleapiclient.errors import UnexpectedBodyError 
  71  from googleapiclient.errors import UnexpectedMethodError 
  72  from googleapiclient.model import JsonModel 
  73   
  74   
  75  LOGGER = logging.getLogger(__name__) 
  76   
  77  DEFAULT_CHUNK_SIZE = 512*1024 
  78   
  79  MAX_URI_LENGTH = 2048 
  80   
  81  _TOO_MANY_REQUESTS = 429 
82 83 84 -def _should_retry_response(resp_status, content):
85 """Determines whether a response should be retried. 86 87 Args: 88 resp_status: The response status received. 89 content: The response content body. 90 91 Returns: 92 True if the response should be retried, otherwise False. 93 """ 94 # Retry on 5xx errors. 95 if resp_status >= 500: 96 return True 97 98 # Retry on 429 errors. 99 if resp_status == _TOO_MANY_REQUESTS: 100 return True 101 102 # For 403 errors, we have to check for the `reason` in the response to 103 # determine if we should retry. 104 if resp_status == six.moves.http_client.FORBIDDEN: 105 # If there's no details about the 403 type, don't retry. 106 if not content: 107 return False 108 109 # Content is in JSON format. 110 try: 111 data = json.loads(content.decode('utf-8')) 112 reason = data['error']['errors'][0]['reason'] 113 except (UnicodeDecodeError, ValueError, KeyError): 114 LOGGER.warning('Invalid JSON content from response: %s', content) 115 return False 116 117 LOGGER.warning('Encountered 403 Forbidden with reason "%s"', reason) 118 119 # Only retry on rate limit related failures. 120 if reason in ('userRateLimitExceeded', 'rateLimitExceeded', ): 121 return True 122 123 # Everything else is a success or non-retriable so break. 124 return False
125
126 127 -def _retry_request(http, num_retries, req_type, sleep, rand, uri, method, *args, 128 **kwargs):
129 """Retries an HTTP request multiple times while handling errors. 130 131 If after all retries the request still fails, last error is either returned as 132 return value (for HTTP 5xx errors) or thrown (for ssl.SSLError). 133 134 Args: 135 http: Http object to be used to execute request. 136 num_retries: Maximum number of retries. 137 req_type: Type of the request (used for logging retries). 138 sleep, rand: Functions to sleep for random time between retries. 139 uri: URI to be requested. 140 method: HTTP method to be used. 141 args, kwargs: Additional arguments passed to http.request. 142 143 Returns: 144 resp, content - Response from the http request (may be HTTP 5xx). 145 """ 146 resp = None 147 content = None 148 for retry_num in range(num_retries + 1): 149 if retry_num > 0: 150 # Sleep before retrying. 151 sleep_time = rand() * 2 ** retry_num 152 LOGGER.warning( 153 'Sleeping %.2f seconds before retry %d of %d for %s: %s %s, after %s', 154 sleep_time, retry_num, num_retries, req_type, method, uri, 155 resp.status if resp else exception) 156 sleep(sleep_time) 157 158 try: 159 exception = None 160 resp, content = http.request(uri, method, *args, **kwargs) 161 # Retry on SSL errors and socket timeout errors. 162 except _ssl_SSLError as ssl_error: 163 exception = ssl_error 164 except socket.error as socket_error: 165 # errno's contents differ by platform, so we have to match by name. 166 if socket.errno.errorcode.get(socket_error.errno) not in ( 167 'WSAETIMEDOUT', 'ETIMEDOUT', 'EPIPE', 'ECONNABORTED', ): 168 raise 169 exception = socket_error 170 171 if exception: 172 if retry_num == num_retries: 173 raise exception 174 else: 175 continue 176 177 if not _should_retry_response(resp.status, content): 178 break 179 180 return resp, content
181
182 183 -class MediaUploadProgress(object):
184 """Status of a resumable upload.""" 185
186 - def __init__(self, resumable_progress, total_size):
187 """Constructor. 188 189 Args: 190 resumable_progress: int, bytes sent so far. 191 total_size: int, total bytes in complete upload, or None if the total 192 upload size isn't known ahead of time. 193 """ 194 self.resumable_progress = resumable_progress 195 self.total_size = total_size
196
197 - def progress(self):
198 """Percent of upload completed, as a float. 199 200 Returns: 201 the percentage complete as a float, returning 0.0 if the total size of 202 the upload is unknown. 203 """ 204 if self.total_size is not None: 205 return float(self.resumable_progress) / float(self.total_size) 206 else: 207 return 0.0
208
209 210 -class MediaDownloadProgress(object):
211 """Status of a resumable download.""" 212
213 - def __init__(self, resumable_progress, total_size):
214 """Constructor. 215 216 Args: 217 resumable_progress: int, bytes received so far. 218 total_size: int, total bytes in complete download. 219 """ 220 self.resumable_progress = resumable_progress 221 self.total_size = total_size
222
223 - def progress(self):
224 """Percent of download completed, as a float. 225 226 Returns: 227 the percentage complete as a float, returning 0.0 if the total size of 228 the download is unknown. 229 """ 230 if self.total_size is not None: 231 return float(self.resumable_progress) / float(self.total_size) 232 else: 233 return 0.0
234
235 236 -class MediaUpload(object):
237 """Describes a media object to upload. 238 239 Base class that defines the interface of MediaUpload subclasses. 240 241 Note that subclasses of MediaUpload may allow you to control the chunksize 242 when uploading a media object. It is important to keep the size of the chunk 243 as large as possible to keep the upload efficient. Other factors may influence 244 the size of the chunk you use, particularly if you are working in an 245 environment where individual HTTP requests may have a hardcoded time limit, 246 such as under certain classes of requests under Google App Engine. 247 248 Streams are io.Base compatible objects that support seek(). Some MediaUpload 249 subclasses support using streams directly to upload data. Support for 250 streaming may be indicated by a MediaUpload sub-class and if appropriate for a 251 platform that stream will be used for uploading the media object. The support 252 for streaming is indicated by has_stream() returning True. The stream() method 253 should return an io.Base object that supports seek(). On platforms where the 254 underlying httplib module supports streaming, for example Python 2.6 and 255 later, the stream will be passed into the http library which will result in 256 less memory being used and possibly faster uploads. 257 258 If you need to upload media that can't be uploaded using any of the existing 259 MediaUpload sub-class then you can sub-class MediaUpload for your particular 260 needs. 261 """ 262
263 - def chunksize(self):
264 """Chunk size for resumable uploads. 265 266 Returns: 267 Chunk size in bytes. 268 """ 269 raise NotImplementedError()
270
271 - def mimetype(self):
272 """Mime type of the body. 273 274 Returns: 275 Mime type. 276 """ 277 return 'application/octet-stream'
278
279 - def size(self):
280 """Size of upload. 281 282 Returns: 283 Size of the body, or None of the size is unknown. 284 """ 285 return None
286
287 - def resumable(self):
288 """Whether this upload is resumable. 289 290 Returns: 291 True if resumable upload or False. 292 """ 293 return False
294
295 - def getbytes(self, begin, end):
296 """Get bytes from the media. 297 298 Args: 299 begin: int, offset from beginning of file. 300 length: int, number of bytes to read, starting at begin. 301 302 Returns: 303 A string of bytes read. May be shorter than length if EOF was reached 304 first. 305 """ 306 raise NotImplementedError()
307
308 - def has_stream(self):
309 """Does the underlying upload support a streaming interface. 310 311 Streaming means it is an io.IOBase subclass that supports seek, i.e. 312 seekable() returns True. 313 314 Returns: 315 True if the call to stream() will return an instance of a seekable io.Base 316 subclass. 317 """ 318 return False
319
320 - def stream(self):
321 """A stream interface to the data being uploaded. 322 323 Returns: 324 The returned value is an io.IOBase subclass that supports seek, i.e. 325 seekable() returns True. 326 """ 327 raise NotImplementedError()
328 329 @util.positional(1)
330 - def _to_json(self, strip=None):
331 """Utility function for creating a JSON representation of a MediaUpload. 332 333 Args: 334 strip: array, An array of names of members to not include in the JSON. 335 336 Returns: 337 string, a JSON representation of this instance, suitable to pass to 338 from_json(). 339 """ 340 t = type(self) 341 d = copy.copy(self.__dict__) 342 if strip is not None: 343 for member in strip: 344 del d[member] 345 d['_class'] = t.__name__ 346 d['_module'] = t.__module__ 347 return json.dumps(d)
348
349 - def to_json(self):
350 """Create a JSON representation of an instance of MediaUpload. 351 352 Returns: 353 string, a JSON representation of this instance, suitable to pass to 354 from_json(). 355 """ 356 return self._to_json()
357 358 @classmethod
359 - def new_from_json(cls, s):
360 """Utility class method to instantiate a MediaUpload subclass from a JSON 361 representation produced by to_json(). 362 363 Args: 364 s: string, JSON from to_json(). 365 366 Returns: 367 An instance of the subclass of MediaUpload that was serialized with 368 to_json(). 369 """ 370 data = json.loads(s) 371 # Find and call the right classmethod from_json() to restore the object. 372 module = data['_module'] 373 m = __import__(module, fromlist=module.split('.')[:-1]) 374 kls = getattr(m, data['_class']) 375 from_json = getattr(kls, 'from_json') 376 return from_json(s)
377
378 379 -class MediaIoBaseUpload(MediaUpload):
380 """A MediaUpload for a io.Base objects. 381 382 Note that the Python file object is compatible with io.Base and can be used 383 with this class also. 384 385 fh = BytesIO('...Some data to upload...') 386 media = MediaIoBaseUpload(fh, mimetype='image/png', 387 chunksize=1024*1024, resumable=True) 388 farm.animals().insert( 389 id='cow', 390 name='cow.png', 391 media_body=media).execute() 392 393 Depending on the platform you are working on, you may pass -1 as the 394 chunksize, which indicates that the entire file should be uploaded in a single 395 request. If the underlying platform supports streams, such as Python 2.6 or 396 later, then this can be very efficient as it avoids multiple connections, and 397 also avoids loading the entire file into memory before sending it. Note that 398 Google App Engine has a 5MB limit on request size, so you should never set 399 your chunksize larger than 5MB, or to -1. 400 """ 401 402 @util.positional(3)
403 - def __init__(self, fd, mimetype, chunksize=DEFAULT_CHUNK_SIZE, 404 resumable=False):
405 """Constructor. 406 407 Args: 408 fd: io.Base or file object, The source of the bytes to upload. MUST be 409 opened in blocking mode, do not use streams opened in non-blocking mode. 410 The given stream must be seekable, that is, it must be able to call 411 seek() on fd. 412 mimetype: string, Mime-type of the file. 413 chunksize: int, File will be uploaded in chunks of this many bytes. Only 414 used if resumable=True. Pass in a value of -1 if the file is to be 415 uploaded as a single chunk. Note that Google App Engine has a 5MB limit 416 on request size, so you should never set your chunksize larger than 5MB, 417 or to -1. 418 resumable: bool, True if this is a resumable upload. False means upload 419 in a single request. 420 """ 421 super(MediaIoBaseUpload, self).__init__() 422 self._fd = fd 423 self._mimetype = mimetype 424 if not (chunksize == -1 or chunksize > 0): 425 raise InvalidChunkSizeError() 426 self._chunksize = chunksize 427 self._resumable = resumable 428 429 self._fd.seek(0, os.SEEK_END) 430 self._size = self._fd.tell()
431
432 - def chunksize(self):
433 """Chunk size for resumable uploads. 434 435 Returns: 436 Chunk size in bytes. 437 """ 438 return self._chunksize
439
440 - def mimetype(self):
441 """Mime type of the body. 442 443 Returns: 444 Mime type. 445 """ 446 return self._mimetype
447
448 - def size(self):
449 """Size of upload. 450 451 Returns: 452 Size of the body, or None of the size is unknown. 453 """ 454 return self._size
455
456 - def resumable(self):
457 """Whether this upload is resumable. 458 459 Returns: 460 True if resumable upload or False. 461 """ 462 return self._resumable
463
464 - def getbytes(self, begin, length):
465 """Get bytes from the media. 466 467 Args: 468 begin: int, offset from beginning of file. 469 length: int, number of bytes to read, starting at begin. 470 471 Returns: 472 A string of bytes read. May be shorted than length if EOF was reached 473 first. 474 """ 475 self._fd.seek(begin) 476 return self._fd.read(length)
477
478 - def has_stream(self):
479 """Does the underlying upload support a streaming interface. 480 481 Streaming means it is an io.IOBase subclass that supports seek, i.e. 482 seekable() returns True. 483 484 Returns: 485 True if the call to stream() will return an instance of a seekable io.Base 486 subclass. 487 """ 488 return True
489
490 - def stream(self):
491 """A stream interface to the data being uploaded. 492 493 Returns: 494 The returned value is an io.IOBase subclass that supports seek, i.e. 495 seekable() returns True. 496 """ 497 return self._fd
498
499 - def to_json(self):
500 """This upload type is not serializable.""" 501 raise NotImplementedError('MediaIoBaseUpload is not serializable.')
502
503 504 -class MediaFileUpload(MediaIoBaseUpload):
505 """A MediaUpload for a file. 506 507 Construct a MediaFileUpload and pass as the media_body parameter of the 508 method. For example, if we had a service that allowed uploading images: 509 510 511 media = MediaFileUpload('cow.png', mimetype='image/png', 512 chunksize=1024*1024, resumable=True) 513 farm.animals().insert( 514 id='cow', 515 name='cow.png', 516 media_body=media).execute() 517 518 Depending on the platform you are working on, you may pass -1 as the 519 chunksize, which indicates that the entire file should be uploaded in a single 520 request. If the underlying platform supports streams, such as Python 2.6 or 521 later, then this can be very efficient as it avoids multiple connections, and 522 also avoids loading the entire file into memory before sending it. Note that 523 Google App Engine has a 5MB limit on request size, so you should never set 524 your chunksize larger than 5MB, or to -1. 525 """ 526 527 @util.positional(2)
528 - def __init__(self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE, 529 resumable=False):
530 """Constructor. 531 532 Args: 533 filename: string, Name of the file. 534 mimetype: string, Mime-type of the file. If None then a mime-type will be 535 guessed from the file extension. 536 chunksize: int, File will be uploaded in chunks of this many bytes. Only 537 used if resumable=True. Pass in a value of -1 if the file is to be 538 uploaded in a single chunk. Note that Google App Engine has a 5MB limit 539 on request size, so you should never set your chunksize larger than 5MB, 540 or to -1. 541 resumable: bool, True if this is a resumable upload. False means upload 542 in a single request. 543 """ 544 self._filename = filename 545 fd = open(self._filename, 'rb') 546 if mimetype is None: 547 # No mimetype provided, make a guess. 548 mimetype, _ = mimetypes.guess_type(filename) 549 if mimetype is None: 550 # Guess failed, use octet-stream. 551 mimetype = 'application/octet-stream' 552 super(MediaFileUpload, self).__init__(fd, mimetype, chunksize=chunksize, 553 resumable=resumable)
554
555 - def to_json(self):
556 """Creating a JSON representation of an instance of MediaFileUpload. 557 558 Returns: 559 string, a JSON representation of this instance, suitable to pass to 560 from_json(). 561 """ 562 return self._to_json(strip=['_fd'])
563 564 @staticmethod
565 - def from_json(s):
566 d = json.loads(s) 567 return MediaFileUpload(d['_filename'], mimetype=d['_mimetype'], 568 chunksize=d['_chunksize'], resumable=d['_resumable'])
569
570 571 -class MediaInMemoryUpload(MediaIoBaseUpload):
572 """MediaUpload for a chunk of bytes. 573 574 DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for 575 the stream. 576 """ 577 578 @util.positional(2)
579 - def __init__(self, body, mimetype='application/octet-stream', 580 chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
581 """Create a new MediaInMemoryUpload. 582 583 DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for 584 the stream. 585 586 Args: 587 body: string, Bytes of body content. 588 mimetype: string, Mime-type of the file or default of 589 'application/octet-stream'. 590 chunksize: int, File will be uploaded in chunks of this many bytes. Only 591 used if resumable=True. 592 resumable: bool, True if this is a resumable upload. False means upload 593 in a single request. 594 """ 595 fd = BytesIO(body) 596 super(MediaInMemoryUpload, self).__init__(fd, mimetype, chunksize=chunksize, 597 resumable=resumable)
598
599 600 -class MediaIoBaseDownload(object):
601 """"Download media resources. 602 603 Note that the Python file object is compatible with io.Base and can be used 604 with this class also. 605 606 607 Example: 608 request = farms.animals().get_media(id='cow') 609 fh = io.FileIO('cow.png', mode='wb') 610 downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024) 611 612 done = False 613 while done is False: 614 status, done = downloader.next_chunk() 615 if status: 616 print "Download %d%%." % int(status.progress() * 100) 617 print "Download Complete!" 618 """ 619 620 @util.positional(3)
621 - def __init__(self, fd, request, chunksize=DEFAULT_CHUNK_SIZE):
622 """Constructor. 623 624 Args: 625 fd: io.Base or file object, The stream in which to write the downloaded 626 bytes. 627 request: googleapiclient.http.HttpRequest, the media request to perform in 628 chunks. 629 chunksize: int, File will be downloaded in chunks of this many bytes. 630 """ 631 self._fd = fd 632 self._request = request 633 self._uri = request.uri 634 self._chunksize = chunksize 635 self._progress = 0 636 self._total_size = None 637 self._done = False 638 639 # Stubs for testing. 640 self._sleep = time.sleep 641 self._rand = random.random
642 643 @util.positional(1)
644 - def next_chunk(self, num_retries=0):
645 """Get the next chunk of the download. 646 647 Args: 648 num_retries: Integer, number of times to retry with randomized 649 exponential backoff. If all retries fail, the raised HttpError 650 represents the last request. If zero (default), we attempt the 651 request only once. 652 653 Returns: 654 (status, done): (MediaDownloadStatus, boolean) 655 The value of 'done' will be True when the media has been fully 656 downloaded. 657 658 Raises: 659 googleapiclient.errors.HttpError if the response was not a 2xx. 660 httplib2.HttpLib2Error if a transport error has occured. 661 """ 662 headers = { 663 'range': 'bytes=%d-%d' % ( 664 self._progress, self._progress + self._chunksize) 665 } 666 http = self._request.http 667 668 resp, content = _retry_request( 669 http, num_retries, 'media download', self._sleep, self._rand, self._uri, 670 'GET', headers=headers) 671 672 if resp.status in [200, 206]: 673 if 'content-location' in resp and resp['content-location'] != self._uri: 674 self._uri = resp['content-location'] 675 self._progress += len(content) 676 self._fd.write(content) 677 678 if 'content-range' in resp: 679 content_range = resp['content-range'] 680 length = content_range.rsplit('/', 1)[1] 681 self._total_size = int(length) 682 elif 'content-length' in resp: 683 self._total_size = int(resp['content-length']) 684 685 if self._progress == self._total_size: 686 self._done = True 687 return MediaDownloadProgress(self._progress, self._total_size), self._done 688 else: 689 raise HttpError(resp, content, uri=self._uri)
690
691 692 -class _StreamSlice(object):
693 """Truncated stream. 694 695 Takes a stream and presents a stream that is a slice of the original stream. 696 This is used when uploading media in chunks. In later versions of Python a 697 stream can be passed to httplib in place of the string of data to send. The 698 problem is that httplib just blindly reads to the end of the stream. This 699 wrapper presents a virtual stream that only reads to the end of the chunk. 700 """ 701
702 - def __init__(self, stream, begin, chunksize):
703 """Constructor. 704 705 Args: 706 stream: (io.Base, file object), the stream to wrap. 707 begin: int, the seek position the chunk begins at. 708 chunksize: int, the size of the chunk. 709 """ 710 self._stream = stream 711 self._begin = begin 712 self._chunksize = chunksize 713 self._stream.seek(begin)
714
715 - def read(self, n=-1):
716 """Read n bytes. 717 718 Args: 719 n, int, the number of bytes to read. 720 721 Returns: 722 A string of length 'n', or less if EOF is reached. 723 """ 724 # The data left available to read sits in [cur, end) 725 cur = self._stream.tell() 726 end = self._begin + self._chunksize 727 if n == -1 or cur + n > end: 728 n = end - cur 729 return self._stream.read(n)
730
731 732 -class HttpRequest(object):
733 """Encapsulates a single HTTP request.""" 734 735 @util.positional(4)
736 - def __init__(self, http, postproc, uri, 737 method='GET', 738 body=None, 739 headers=None, 740 methodId=None, 741 resumable=None):
742 """Constructor for an HttpRequest. 743 744 Args: 745 http: httplib2.Http, the transport object to use to make a request 746 postproc: callable, called on the HTTP response and content to transform 747 it into a data object before returning, or raising an exception 748 on an error. 749 uri: string, the absolute URI to send the request to 750 method: string, the HTTP method to use 751 body: string, the request body of the HTTP request, 752 headers: dict, the HTTP request headers 753 methodId: string, a unique identifier for the API method being called. 754 resumable: MediaUpload, None if this is not a resumbale request. 755 """ 756 self.uri = uri 757 self.method = method 758 self.body = body 759 self.headers = headers or {} 760 self.methodId = methodId 761 self.http = http 762 self.postproc = postproc 763 self.resumable = resumable 764 self.response_callbacks = [] 765 self._in_error_state = False 766 767 # Pull the multipart boundary out of the content-type header. 768 major, minor, params = mimeparse.parse_mime_type( 769 self.headers.get('content-type', 'application/json')) 770 771 # The size of the non-media part of the request. 772 self.body_size = len(self.body or '') 773 774 # The resumable URI to send chunks to. 775 self.resumable_uri = None 776 777 # The bytes that have been uploaded. 778 self.resumable_progress = 0 779 780 # Stubs for testing. 781 self._rand = random.random 782 self._sleep = time.sleep
783 784 @util.positional(1)
785 - def execute(self, http=None, num_retries=0):
786 """Execute the request. 787 788 Args: 789 http: httplib2.Http, an http object to be used in place of the 790 one the HttpRequest request object was constructed with. 791 num_retries: Integer, number of times to retry with randomized 792 exponential backoff. If all retries fail, the raised HttpError 793 represents the last request. If zero (default), we attempt the 794 request only once. 795 796 Returns: 797 A deserialized object model of the response body as determined 798 by the postproc. 799 800 Raises: 801 googleapiclient.errors.HttpError if the response was not a 2xx. 802 httplib2.HttpLib2Error if a transport error has occured. 803 """ 804 if http is None: 805 http = self.http 806 807 if self.resumable: 808 body = None 809 while body is None: 810 _, body = self.next_chunk(http=http, num_retries=num_retries) 811 return body 812 813 # Non-resumable case. 814 815 if 'content-length' not in self.headers: 816 self.headers['content-length'] = str(self.body_size) 817 # If the request URI is too long then turn it into a POST request. 818 if len(self.uri) > MAX_URI_LENGTH and self.method == 'GET': 819 self.method = 'POST' 820 self.headers['x-http-method-override'] = 'GET' 821 self.headers['content-type'] = 'application/x-www-form-urlencoded' 822 parsed = urlparse(self.uri) 823 self.uri = urlunparse( 824 (parsed.scheme, parsed.netloc, parsed.path, parsed.params, None, 825 None) 826 ) 827 self.body = parsed.query 828 self.headers['content-length'] = str(len(self.body)) 829 830 # Handle retries for server-side errors. 831 resp, content = _retry_request( 832 http, num_retries, 'request', self._sleep, self._rand, str(self.uri), 833 method=str(self.method), body=self.body, headers=self.headers) 834 835 for callback in self.response_callbacks: 836 callback(resp) 837 if resp.status >= 300: 838 raise HttpError(resp, content, uri=self.uri) 839 return self.postproc(resp, content)
840 841 @util.positional(2)
842 - def add_response_callback(self, cb):
843 """add_response_headers_callback 844 845 Args: 846 cb: Callback to be called on receiving the response headers, of signature: 847 848 def cb(resp): 849 # Where resp is an instance of httplib2.Response 850 """ 851 self.response_callbacks.append(cb)
852 853 @util.positional(1)
854 - def next_chunk(self, http=None, num_retries=0):
855 """Execute the next step of a resumable upload. 856 857 Can only be used if the method being executed supports media uploads and 858 the MediaUpload object passed in was flagged as using resumable upload. 859 860 Example: 861 862 media = MediaFileUpload('cow.png', mimetype='image/png', 863 chunksize=1000, resumable=True) 864 request = farm.animals().insert( 865 id='cow', 866 name='cow.png', 867 media_body=media) 868 869 response = None 870 while response is None: 871 status, response = request.next_chunk() 872 if status: 873 print "Upload %d%% complete." % int(status.progress() * 100) 874 875 876 Args: 877 http: httplib2.Http, an http object to be used in place of the 878 one the HttpRequest request object was constructed with. 879 num_retries: Integer, number of times to retry with randomized 880 exponential backoff. If all retries fail, the raised HttpError 881 represents the last request. If zero (default), we attempt the 882 request only once. 883 884 Returns: 885 (status, body): (ResumableMediaStatus, object) 886 The body will be None until the resumable media is fully uploaded. 887 888 Raises: 889 googleapiclient.errors.HttpError if the response was not a 2xx. 890 httplib2.HttpLib2Error if a transport error has occured. 891 """ 892 if http is None: 893 http = self.http 894 895 if self.resumable.size() is None: 896 size = '*' 897 else: 898 size = str(self.resumable.size()) 899 900 if self.resumable_uri is None: 901 start_headers = copy.copy(self.headers) 902 start_headers['X-Upload-Content-Type'] = self.resumable.mimetype() 903 if size != '*': 904 start_headers['X-Upload-Content-Length'] = size 905 start_headers['content-length'] = str(self.body_size) 906 907 resp, content = _retry_request( 908 http, num_retries, 'resumable URI request', self._sleep, self._rand, 909 self.uri, method=self.method, body=self.body, headers=start_headers) 910 911 if resp.status == 200 and 'location' in resp: 912 self.resumable_uri = resp['location'] 913 else: 914 raise ResumableUploadError(resp, content) 915 elif self._in_error_state: 916 # If we are in an error state then query the server for current state of 917 # the upload by sending an empty PUT and reading the 'range' header in 918 # the response. 919 headers = { 920 'Content-Range': 'bytes */%s' % size, 921 'content-length': '0' 922 } 923 resp, content = http.request(self.resumable_uri, 'PUT', 924 headers=headers) 925 status, body = self._process_response(resp, content) 926 if body: 927 # The upload was complete. 928 return (status, body) 929 930 if self.resumable.has_stream(): 931 data = self.resumable.stream() 932 if self.resumable.chunksize() == -1: 933 data.seek(self.resumable_progress) 934 chunk_end = self.resumable.size() - self.resumable_progress - 1 935 else: 936 # Doing chunking with a stream, so wrap a slice of the stream. 937 data = _StreamSlice(data, self.resumable_progress, 938 self.resumable.chunksize()) 939 chunk_end = min( 940 self.resumable_progress + self.resumable.chunksize() - 1, 941 self.resumable.size() - 1) 942 else: 943 data = self.resumable.getbytes( 944 self.resumable_progress, self.resumable.chunksize()) 945 946 # A short read implies that we are at EOF, so finish the upload. 947 if len(data) < self.resumable.chunksize(): 948 size = str(self.resumable_progress + len(data)) 949 950 chunk_end = self.resumable_progress + len(data) - 1 951 952 headers = { 953 'Content-Range': 'bytes %d-%d/%s' % ( 954 self.resumable_progress, chunk_end, size), 955 # Must set the content-length header here because httplib can't 956 # calculate the size when working with _StreamSlice. 957 'Content-Length': str(chunk_end - self.resumable_progress + 1) 958 } 959 960 for retry_num in range(num_retries + 1): 961 if retry_num > 0: 962 self._sleep(self._rand() * 2**retry_num) 963 LOGGER.warning( 964 'Retry #%d for media upload: %s %s, following status: %d' 965 % (retry_num, self.method, self.uri, resp.status)) 966 967 try: 968 resp, content = http.request(self.resumable_uri, method='PUT', 969 body=data, 970 headers=headers) 971 except: 972 self._in_error_state = True 973 raise 974 if not _should_retry_response(resp.status, content): 975 break 976 977 return self._process_response(resp, content)
978
979 - def _process_response(self, resp, content):
980 """Process the response from a single chunk upload. 981 982 Args: 983 resp: httplib2.Response, the response object. 984 content: string, the content of the response. 985 986 Returns: 987 (status, body): (ResumableMediaStatus, object) 988 The body will be None until the resumable media is fully uploaded. 989 990 Raises: 991 googleapiclient.errors.HttpError if the response was not a 2xx or a 308. 992 """ 993 if resp.status in [200, 201]: 994 self._in_error_state = False 995 return None, self.postproc(resp, content) 996 elif resp.status == 308: 997 self._in_error_state = False 998 # A "308 Resume Incomplete" indicates we are not done. 999 try: 1000 self.resumable_progress = int(resp['range'].split('-')[1]) + 1 1001 except KeyError: 1002 # If resp doesn't contain range header, resumable progress is 0 1003 self.resumable_progress = 0 1004 if 'location' in resp: 1005 self.resumable_uri = resp['location'] 1006 else: 1007 self._in_error_state = True 1008 raise HttpError(resp, content, uri=self.uri) 1009 1010 return (MediaUploadProgress(self.resumable_progress, self.resumable.size()), 1011 None)
1012
1013 - def to_json(self):
1014 """Returns a JSON representation of the HttpRequest.""" 1015 d = copy.copy(self.__dict__) 1016 if d['resumable'] is not None: 1017 d['resumable'] = self.resumable.to_json() 1018 del d['http'] 1019 del d['postproc'] 1020 del d['_sleep'] 1021 del d['_rand'] 1022 1023 return json.dumps(d)
1024 1025 @staticmethod
1026 - def from_json(s, http, postproc):
1027 """Returns an HttpRequest populated with info from a JSON object.""" 1028 d = json.loads(s) 1029 if d['resumable'] is not None: 1030 d['resumable'] = MediaUpload.new_from_json(d['resumable']) 1031 return HttpRequest( 1032 http, 1033 postproc, 1034 uri=d['uri'], 1035 method=d['method'], 1036 body=d['body'], 1037 headers=d['headers'], 1038 methodId=d['methodId'], 1039 resumable=d['resumable'])
1040
1041 1042 -class BatchHttpRequest(object):
1043 """Batches multiple HttpRequest objects into a single HTTP request. 1044 1045 Example: 1046 from googleapiclient.http import BatchHttpRequest 1047 1048 def list_animals(request_id, response, exception): 1049 \"\"\"Do something with the animals list response.\"\"\" 1050 if exception is not None: 1051 # Do something with the exception. 1052 pass 1053 else: 1054 # Do something with the response. 1055 pass 1056 1057 def list_farmers(request_id, response, exception): 1058 \"\"\"Do something with the farmers list response.\"\"\" 1059 if exception is not None: 1060 # Do something with the exception. 1061 pass 1062 else: 1063 # Do something with the response. 1064 pass 1065 1066 service = build('farm', 'v2') 1067 1068 batch = BatchHttpRequest() 1069 1070 batch.add(service.animals().list(), list_animals) 1071 batch.add(service.farmers().list(), list_farmers) 1072 batch.execute(http=http) 1073 """ 1074 1075 @util.positional(1)
1076 - def __init__(self, callback=None, batch_uri=None):
1077 """Constructor for a BatchHttpRequest. 1078 1079 Args: 1080 callback: callable, A callback to be called for each response, of the 1081 form callback(id, response, exception). The first parameter is the 1082 request id, and the second is the deserialized response object. The 1083 third is an googleapiclient.errors.HttpError exception object if an HTTP error 1084 occurred while processing the request, or None if no error occurred. 1085 batch_uri: string, URI to send batch requests to. 1086 """ 1087 if batch_uri is None: 1088 batch_uri = 'https://www.googleapis.com/batch' 1089 self._batch_uri = batch_uri 1090 1091 # Global callback to be called for each individual response in the batch. 1092 self._callback = callback 1093 1094 # A map from id to request. 1095 self._requests = {} 1096 1097 # A map from id to callback. 1098 self._callbacks = {} 1099 1100 # List of request ids, in the order in which they were added. 1101 self._order = [] 1102 1103 # The last auto generated id. 1104 self._last_auto_id = 0 1105 1106 # Unique ID on which to base the Content-ID headers. 1107 self._base_id = None 1108 1109 # A map from request id to (httplib2.Response, content) response pairs 1110 self._responses = {} 1111 1112 # A map of id(Credentials) that have been refreshed. 1113 self._refreshed_credentials = {}
1114
1115 - def _refresh_and_apply_credentials(self, request, http):
1116 """Refresh the credentials and apply to the request. 1117 1118 Args: 1119 request: HttpRequest, the request. 1120 http: httplib2.Http, the global http object for the batch. 1121 """ 1122 # For the credentials to refresh, but only once per refresh_token 1123 # If there is no http per the request then refresh the http passed in 1124 # via execute() 1125 creds = None 1126 if request.http is not None and hasattr(request.http.request, 1127 'credentials'): 1128 creds = request.http.request.credentials 1129 elif http is not None and hasattr(http.request, 'credentials'): 1130 creds = http.request.credentials 1131 if creds is not None: 1132 if id(creds) not in self._refreshed_credentials: 1133 creds.refresh(http) 1134 self._refreshed_credentials[id(creds)] = 1 1135 1136 # Only apply the credentials if we are using the http object passed in, 1137 # otherwise apply() will get called during _serialize_request(). 1138 if request.http is None or not hasattr(request.http.request, 1139 'credentials'): 1140 creds.apply(request.headers)
1141
1142 - def _id_to_header(self, id_):
1143 """Convert an id to a Content-ID header value. 1144 1145 Args: 1146 id_: string, identifier of individual request. 1147 1148 Returns: 1149 A Content-ID header with the id_ encoded into it. A UUID is prepended to 1150 the value because Content-ID headers are supposed to be universally 1151 unique. 1152 """ 1153 if self._base_id is None: 1154 self._base_id = uuid.uuid4() 1155 1156 return '<%s+%s>' % (self._base_id, quote(id_))
1157
1158 - def _header_to_id(self, header):
1159 """Convert a Content-ID header value to an id. 1160 1161 Presumes the Content-ID header conforms to the format that _id_to_header() 1162 returns. 1163 1164 Args: 1165 header: string, Content-ID header value. 1166 1167 Returns: 1168 The extracted id value. 1169 1170 Raises: 1171 BatchError if the header is not in the expected format. 1172 """ 1173 if header[0] != '<' or header[-1] != '>': 1174 raise BatchError("Invalid value for Content-ID: %s" % header) 1175 if '+' not in header: 1176 raise BatchError("Invalid value for Content-ID: %s" % header) 1177 base, id_ = header[1:-1].rsplit('+', 1) 1178 1179 return unquote(id_)
1180
1181 - def _serialize_request(self, request):
1182 """Convert an HttpRequest object into a string. 1183 1184 Args: 1185 request: HttpRequest, the request to serialize. 1186 1187 Returns: 1188 The request as a string in application/http format. 1189 """ 1190 # Construct status line 1191 parsed = urlparse(request.uri) 1192 request_line = urlunparse( 1193 ('', '', parsed.path, parsed.params, parsed.query, '') 1194 ) 1195 status_line = request.method + ' ' + request_line + ' HTTP/1.1\n' 1196 major, minor = request.headers.get('content-type', 'application/json').split('/') 1197 msg = MIMENonMultipart(major, minor) 1198 headers = request.headers.copy() 1199 1200 if request.http is not None and hasattr(request.http.request, 1201 'credentials'): 1202 request.http.request.credentials.apply(headers) 1203 1204 # MIMENonMultipart adds its own Content-Type header. 1205 if 'content-type' in headers: 1206 del headers['content-type'] 1207 1208 for key, value in six.iteritems(headers): 1209 msg[key] = value 1210 msg['Host'] = parsed.netloc 1211 msg.set_unixfrom(None) 1212 1213 if request.body is not None: 1214 msg.set_payload(request.body) 1215 msg['content-length'] = str(len(request.body)) 1216 1217 # Serialize the mime message. 1218 fp = StringIO() 1219 # maxheaderlen=0 means don't line wrap headers. 1220 g = Generator(fp, maxheaderlen=0) 1221 g.flatten(msg, unixfrom=False) 1222 body = fp.getvalue() 1223 1224 return status_line + body
1225
1226 - def _deserialize_response(self, payload):
1227 """Convert string into httplib2 response and content. 1228 1229 Args: 1230 payload: string, headers and body as a string. 1231 1232 Returns: 1233 A pair (resp, content), such as would be returned from httplib2.request. 1234 """ 1235 # Strip off the status line 1236 status_line, payload = payload.split('\n', 1) 1237 protocol, status, reason = status_line.split(' ', 2) 1238 1239 # Parse the rest of the response 1240 parser = FeedParser() 1241 parser.feed(payload) 1242 msg = parser.close() 1243 msg['status'] = status 1244 1245 # Create httplib2.Response from the parsed headers. 1246 resp = httplib2.Response(msg) 1247 resp.reason = reason 1248 resp.version = int(protocol.split('/', 1)[1].replace('.', '')) 1249 1250 content = payload.split('\r\n\r\n', 1)[1] 1251 1252 return resp, content
1253
1254 - def _new_id(self):
1255 """Create a new id. 1256 1257 Auto incrementing number that avoids conflicts with ids already used. 1258 1259 Returns: 1260 string, a new unique id. 1261 """ 1262 self._last_auto_id += 1 1263 while str(self._last_auto_id) in self._requests: 1264 self._last_auto_id += 1 1265 return str(self._last_auto_id)
1266 1267 @util.positional(2)
1268 - def add(self, request, callback=None, request_id=None):
1269 """Add a new request. 1270 1271 Every callback added will be paired with a unique id, the request_id. That 1272 unique id will be passed back to the callback when the response comes back 1273 from the server. The default behavior is to have the library generate it's 1274 own unique id. If the caller passes in a request_id then they must ensure 1275 uniqueness for each request_id, and if they are not an exception is 1276 raised. Callers should either supply all request_ids or nevery supply a 1277 request id, to avoid such an error. 1278 1279 Args: 1280 request: HttpRequest, Request to add to the batch. 1281 callback: callable, A callback to be called for this response, of the 1282 form callback(id, response, exception). The first parameter is the 1283 request id, and the second is the deserialized response object. The 1284 third is an googleapiclient.errors.HttpError exception object if an HTTP error 1285 occurred while processing the request, or None if no errors occurred. 1286 request_id: string, A unique id for the request. The id will be passed to 1287 the callback with the response. 1288 1289 Returns: 1290 None 1291 1292 Raises: 1293 BatchError if a media request is added to a batch. 1294 KeyError is the request_id is not unique. 1295 """ 1296 if request_id is None: 1297 request_id = self._new_id() 1298 if request.resumable is not None: 1299 raise BatchError("Media requests cannot be used in a batch request.") 1300 if request_id in self._requests: 1301 raise KeyError("A request with this ID already exists: %s" % request_id) 1302 self._requests[request_id] = request 1303 self._callbacks[request_id] = callback 1304 self._order.append(request_id)
1305
1306 - def _execute(self, http, order, requests):
1307 """Serialize batch request, send to server, process response. 1308 1309 Args: 1310 http: httplib2.Http, an http object to be used to make the request with. 1311 order: list, list of request ids in the order they were added to the 1312 batch. 1313 request: list, list of request objects to send. 1314 1315 Raises: 1316 httplib2.HttpLib2Error if a transport error has occured. 1317 googleapiclient.errors.BatchError if the response is the wrong format. 1318 """ 1319 message = MIMEMultipart('mixed') 1320 # Message should not write out it's own headers. 1321 setattr(message, '_write_headers', lambda self: None) 1322 1323 # Add all the individual requests. 1324 for request_id in order: 1325 request = requests[request_id] 1326 1327 msg = MIMENonMultipart('application', 'http') 1328 msg['Content-Transfer-Encoding'] = 'binary' 1329 msg['Content-ID'] = self._id_to_header(request_id) 1330 1331 body = self._serialize_request(request) 1332 msg.set_payload(body) 1333 message.attach(msg) 1334 1335 # encode the body: note that we can't use `as_string`, because 1336 # it plays games with `From ` lines. 1337 fp = StringIO() 1338 g = Generator(fp, mangle_from_=False) 1339 g.flatten(message, unixfrom=False) 1340 body = fp.getvalue() 1341 1342 headers = {} 1343 headers['content-type'] = ('multipart/mixed; ' 1344 'boundary="%s"') % message.get_boundary() 1345 1346 resp, content = http.request(self._batch_uri, method='POST', body=body, 1347 headers=headers) 1348 1349 if resp.status >= 300: 1350 raise HttpError(resp, content, uri=self._batch_uri) 1351 1352 # Prepend with a content-type header so FeedParser can handle it. 1353 header = 'content-type: %s\r\n\r\n' % resp['content-type'] 1354 # PY3's FeedParser only accepts unicode. So we should decode content 1355 # here, and encode each payload again. 1356 if six.PY3: 1357 content = content.decode('utf-8') 1358 for_parser = header + content 1359 1360 parser = FeedParser() 1361 parser.feed(for_parser) 1362 mime_response = parser.close() 1363 1364 if not mime_response.is_multipart(): 1365 raise BatchError("Response not in multipart/mixed format.", resp=resp, 1366 content=content) 1367 1368 for part in mime_response.get_payload(): 1369 request_id = self._header_to_id(part['Content-ID']) 1370 response, content = self._deserialize_response(part.get_payload()) 1371 # We encode content here to emulate normal http response. 1372 if isinstance(content, six.text_type): 1373 content = content.encode('utf-8') 1374 self._responses[request_id] = (response, content)
1375 1376 @util.positional(1)
1377 - def execute(self, http=None):
1378 """Execute all the requests as a single batched HTTP request. 1379 1380 Args: 1381 http: httplib2.Http, an http object to be used in place of the one the 1382 HttpRequest request object was constructed with. If one isn't supplied 1383 then use a http object from the requests in this batch. 1384 1385 Returns: 1386 None 1387 1388 Raises: 1389 httplib2.HttpLib2Error if a transport error has occured. 1390 googleapiclient.errors.BatchError if the response is the wrong format. 1391 """ 1392 # If we have no requests return 1393 if len(self._order) == 0: 1394 return None 1395 1396 # If http is not supplied use the first valid one given in the requests. 1397 if http is None: 1398 for request_id in self._order: 1399 request = self._requests[request_id] 1400 if request is not None: 1401 http = request.http 1402 break 1403 1404 if http is None: 1405 raise ValueError("Missing a valid http object.") 1406 1407 # Special case for OAuth2Credentials-style objects which have not yet been 1408 # refreshed with an initial access_token. 1409 if getattr(http.request, 'credentials', None) is not None: 1410 creds = http.request.credentials 1411 if not getattr(creds, 'access_token', None): 1412 LOGGER.info('Attempting refresh to obtain initial access_token') 1413 creds.refresh(http) 1414 1415 self._execute(http, self._order, self._requests) 1416 1417 # Loop over all the requests and check for 401s. For each 401 request the 1418 # credentials should be refreshed and then sent again in a separate batch. 1419 redo_requests = {} 1420 redo_order = [] 1421 1422 for request_id in self._order: 1423 resp, content = self._responses[request_id] 1424 if resp['status'] == '401': 1425 redo_order.append(request_id) 1426 request = self._requests[request_id] 1427 self._refresh_and_apply_credentials(request, http) 1428 redo_requests[request_id] = request 1429 1430 if redo_requests: 1431 self._execute(http, redo_order, redo_requests) 1432 1433 # Now process all callbacks that are erroring, and raise an exception for 1434 # ones that return a non-2xx response? Or add extra parameter to callback 1435 # that contains an HttpError? 1436 1437 for request_id in self._order: 1438 resp, content = self._responses[request_id] 1439 1440 request = self._requests[request_id] 1441 callback = self._callbacks[request_id] 1442 1443 response = None 1444 exception = None 1445 try: 1446 if resp.status >= 300: 1447 raise HttpError(resp, content, uri=request.uri) 1448 response = request.postproc(resp, content) 1449 except HttpError as e: 1450 exception = e 1451 1452 if callback is not None: 1453 callback(request_id, response, exception) 1454 if self._callback is not None: 1455 self._callback(request_id, response, exception)
1456
1457 1458 -class HttpRequestMock(object):
1459 """Mock of HttpRequest. 1460 1461 Do not construct directly, instead use RequestMockBuilder. 1462 """ 1463
1464 - def __init__(self, resp, content, postproc):
1465 """Constructor for HttpRequestMock 1466 1467 Args: 1468 resp: httplib2.Response, the response to emulate coming from the request 1469 content: string, the response body 1470 postproc: callable, the post processing function usually supplied by 1471 the model class. See model.JsonModel.response() as an example. 1472 """ 1473 self.resp = resp 1474 self.content = content 1475 self.postproc = postproc 1476 if resp is None: 1477 self.resp = httplib2.Response({'status': 200, 'reason': 'OK'}) 1478 if 'reason' in self.resp: 1479 self.resp.reason = self.resp['reason']
1480
1481 - def execute(self, http=None):
1482 """Execute the request. 1483 1484 Same behavior as HttpRequest.execute(), but the response is 1485 mocked and not really from an HTTP request/response. 1486 """ 1487 return self.postproc(self.resp, self.content)
1488
1489 1490 -class RequestMockBuilder(object):
1491 """A simple mock of HttpRequest 1492 1493 Pass in a dictionary to the constructor that maps request methodIds to 1494 tuples of (httplib2.Response, content, opt_expected_body) that should be 1495 returned when that method is called. None may also be passed in for the 1496 httplib2.Response, in which case a 200 OK response will be generated. 1497 If an opt_expected_body (str or dict) is provided, it will be compared to 1498 the body and UnexpectedBodyError will be raised on inequality. 1499 1500 Example: 1501 response = '{"data": {"id": "tag:google.c...' 1502 requestBuilder = RequestMockBuilder( 1503 { 1504 'plus.activities.get': (None, response), 1505 } 1506 ) 1507 googleapiclient.discovery.build("plus", "v1", requestBuilder=requestBuilder) 1508 1509 Methods that you do not supply a response for will return a 1510 200 OK with an empty string as the response content or raise an excpetion 1511 if check_unexpected is set to True. The methodId is taken from the rpcName 1512 in the discovery document. 1513 1514 For more details see the project wiki. 1515 """ 1516
1517 - def __init__(self, responses, check_unexpected=False):
1518 """Constructor for RequestMockBuilder 1519 1520 The constructed object should be a callable object 1521 that can replace the class HttpResponse. 1522 1523 responses - A dictionary that maps methodIds into tuples 1524 of (httplib2.Response, content). The methodId 1525 comes from the 'rpcName' field in the discovery 1526 document. 1527 check_unexpected - A boolean setting whether or not UnexpectedMethodError 1528 should be raised on unsupplied method. 1529 """ 1530 self.responses = responses 1531 self.check_unexpected = check_unexpected
1532
1533 - def __call__(self, http, postproc, uri, method='GET', body=None, 1534 headers=None, methodId=None, resumable=None):
1535 """Implements the callable interface that discovery.build() expects 1536 of requestBuilder, which is to build an object compatible with 1537 HttpRequest.execute(). See that method for the description of the 1538 parameters and the expected response. 1539 """ 1540 if methodId in self.responses: 1541 response = self.responses[methodId] 1542 resp, content = response[:2] 1543 if len(response) > 2: 1544 # Test the body against the supplied expected_body. 1545 expected_body = response[2] 1546 if bool(expected_body) != bool(body): 1547 # Not expecting a body and provided one 1548 # or expecting a body and not provided one. 1549 raise UnexpectedBodyError(expected_body, body) 1550 if isinstance(expected_body, str): 1551 expected_body = json.loads(expected_body) 1552 body = json.loads(body) 1553 if body != expected_body: 1554 raise UnexpectedBodyError(expected_body, body) 1555 return HttpRequestMock(resp, content, postproc) 1556 elif self.check_unexpected: 1557 raise UnexpectedMethodError(methodId=methodId) 1558 else: 1559 model = JsonModel(False) 1560 return HttpRequestMock(None, '{}', model.response)
1561
1562 1563 -class HttpMock(object):
1564 """Mock of httplib2.Http""" 1565
1566 - def __init__(self, filename=None, headers=None):
1567 """ 1568 Args: 1569 filename: string, absolute filename to read response from 1570 headers: dict, header to return with response 1571 """ 1572 if headers is None: 1573 headers = {'status': '200'} 1574 if filename: 1575 f = open(filename, 'rb') 1576 self.data = f.read() 1577 f.close() 1578 else: 1579 self.data = None 1580 self.response_headers = headers 1581 self.headers = None 1582 self.uri = None 1583 self.method = None 1584 self.body = None 1585 self.headers = None
1586 1587
1588 - def request(self, uri, 1589 method='GET', 1590 body=None, 1591 headers=None, 1592 redirections=1, 1593 connection_type=None):
1594 self.uri = uri 1595 self.method = method 1596 self.body = body 1597 self.headers = headers 1598 return httplib2.Response(self.response_headers), self.data
1599
1600 1601 -class HttpMockSequence(object):
1602 """Mock of httplib2.Http 1603 1604 Mocks a sequence of calls to request returning different responses for each 1605 call. Create an instance initialized with the desired response headers 1606 and content and then use as if an httplib2.Http instance. 1607 1608 http = HttpMockSequence([ 1609 ({'status': '401'}, ''), 1610 ({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'), 1611 ({'status': '200'}, 'echo_request_headers'), 1612 ]) 1613 resp, content = http.request("http://examples.com") 1614 1615 There are special values you can pass in for content to trigger 1616 behavours that are helpful in testing. 1617 1618 'echo_request_headers' means return the request headers in the response body 1619 'echo_request_headers_as_json' means return the request headers in 1620 the response body 1621 'echo_request_body' means return the request body in the response body 1622 'echo_request_uri' means return the request uri in the response body 1623 """ 1624
1625 - def __init__(self, iterable):
1626 """ 1627 Args: 1628 iterable: iterable, a sequence of pairs of (headers, body) 1629 """ 1630 self._iterable = iterable 1631 self.follow_redirects = True
1632
1633 - def request(self, uri, 1634 method='GET', 1635 body=None, 1636 headers=None, 1637 redirections=1, 1638 connection_type=None):
1639 resp, content = self._iterable.pop(0) 1640 if content == 'echo_request_headers': 1641 content = headers 1642 elif content == 'echo_request_headers_as_json': 1643 content = json.dumps(headers) 1644 elif content == 'echo_request_body': 1645 if hasattr(body, 'read'): 1646 content = body.read() 1647 else: 1648 content = body 1649 elif content == 'echo_request_uri': 1650 content = uri 1651 if isinstance(content, six.text_type): 1652 content = content.encode('utf-8') 1653 return httplib2.Response(resp), content
1654
1655 1656 -def set_user_agent(http, user_agent):
1657 """Set the user-agent on every request. 1658 1659 Args: 1660 http - An instance of httplib2.Http 1661 or something that acts like it. 1662 user_agent: string, the value for the user-agent header. 1663 1664 Returns: 1665 A modified instance of http that was passed in. 1666 1667 Example: 1668 1669 h = httplib2.Http() 1670 h = set_user_agent(h, "my-app-name/6.0") 1671 1672 Most of the time the user-agent will be set doing auth, this is for the rare 1673 cases where you are accessing an unauthenticated endpoint. 1674 """ 1675 request_orig = http.request 1676 1677 # The closure that will replace 'httplib2.Http.request'. 1678 def new_request(uri, method='GET', body=None, headers=None, 1679 redirections=httplib2.DEFAULT_MAX_REDIRECTS, 1680 connection_type=None): 1681 """Modify the request headers to add the user-agent.""" 1682 if headers is None: 1683 headers = {} 1684 if 'user-agent' in headers: 1685 headers['user-agent'] = user_agent + ' ' + headers['user-agent'] 1686 else: 1687 headers['user-agent'] = user_agent 1688 resp, content = request_orig(uri, method, body, headers, 1689 redirections, connection_type) 1690 return resp, content
1691 1692 http.request = new_request 1693 return http 1694
1695 1696 -def tunnel_patch(http):
1697 """Tunnel PATCH requests over POST. 1698 Args: 1699 http - An instance of httplib2.Http 1700 or something that acts like it. 1701 1702 Returns: 1703 A modified instance of http that was passed in. 1704 1705 Example: 1706 1707 h = httplib2.Http() 1708 h = tunnel_patch(h, "my-app-name/6.0") 1709 1710 Useful if you are running on a platform that doesn't support PATCH. 1711 Apply this last if you are using OAuth 1.0, as changing the method 1712 will result in a different signature. 1713 """ 1714 request_orig = http.request 1715 1716 # The closure that will replace 'httplib2.Http.request'. 1717 def new_request(uri, method='GET', body=None, headers=None, 1718 redirections=httplib2.DEFAULT_MAX_REDIRECTS, 1719 connection_type=None): 1720 """Modify the request headers to add the user-agent.""" 1721 if headers is None: 1722 headers = {} 1723 if method == 'PATCH': 1724 if 'oauth_token' in headers.get('authorization', ''): 1725 LOGGER.warning( 1726 'OAuth 1.0 request made with Credentials after tunnel_patch.') 1727 headers['x-http-method-override'] = "PATCH" 1728 method = 'POST' 1729 resp, content = request_orig(uri, method, body, headers, 1730 redirections, connection_type) 1731 return resp, content
1732 1733 http.request = new_request 1734 return http 1735