1   
  2   
  3   
  4   
  5   
  6   
  7   
  8   
  9   
 10   
 11   
 12   
 13   
 14   
 15   
 16   
 17   
 18   
 19   
 20   
 21   
 22   
 23   
 24   
 25   
 26   
 27   
 28   
 29   
 30   
 31   
 32   
 33   
 34   
 35   
 36   
 37   
 38  """ 
 39  Store-type extension that writes data to Amazon S3. 
 40   
 41  This extension requires a new configuration section <amazons3> and is intended 
 42  to be run immediately after the standard stage action, replacing the standard 
 43  store action.  Aside from its own configuration, it requires the options and 
 44  staging configuration sections in the standard Cedar Backup configuration file. 
 45  Since it is intended to replace the store action, it does not rely on any store 
 46  configuration. 
 47   
 48  The underlying functionality relies on the U{AWS CLI interface 
 49  <http://aws.amazon.com/documentation/cli/>}.  Before you use this extension, 
 50  you need to set up your Amazon S3 account and configure the AWS CLI connection 
 51  per Amazon's documentation.  The extension assumes that the backup is being 
 52  executed as root, and switches over to the configured backup user to 
 53  communicate with AWS.  So, make sure you configure AWS CLI as the backup user 
 54  and not root. 
 55   
 56  You can optionally configure Cedar Backup to encrypt data before sending it 
 57  to S3.  To do that, provide a complete command line using the C{${input}} and 
 58  C{${output}} variables to represent the original input file and the encrypted 
 59  output file.  This command will be executed as the backup user. 
 60   
 61  For instance, you can use something like this with GPG:: 
 62   
 63     /usr/bin/gpg -c --no-use-agent --batch --yes --passphrase-file /home/backup/.passphrase -o ${output} ${input} 
 64   
 65  The GPG mechanism depends on a strong passphrase for security.  One way to 
 66  generate a strong passphrase is using your system random number generator, i.e.:: 
 67   
 68     dd if=/dev/urandom count=20 bs=1 | xxd -ps 
 69   
 70  (See U{StackExchange <http://security.stackexchange.com/questions/14867/gpg-encryption-security>} 
 71  for more details about that advice.) If you decide to use encryption, make sure 
 72  you save off the passphrase in a safe place, so you can get at your backup data 
 73  later if you need to.  And obviously, make sure to set permissions on the 
 74  passphrase file so it can only be read by the backup user. 
 75   
 76  This extension was written for and tested on Linux.  It will throw an exception 
 77  if run on Windows. 
 78   
 79  @author: Kenneth J. Pronovici <pronovic@ieee.org> 
 80  """ 
 81   
 82   
 83   
 84   
 85   
 86   
 87  import sys 
 88  import os 
 89  import logging 
 90  import tempfile 
 91  import datetime 
 92  import json 
 93  import shutil 
 94   
 95   
 96  from CedarBackup2.filesystem import FilesystemList, BackupFileList 
 97  from CedarBackup2.util import resolveCommand, executeCommand, isRunningAsRoot, changeOwnership, isStartOfWeek 
 98  from CedarBackup2.util import displayBytes, UNIT_BYTES 
 99  from CedarBackup2.xmlutil import createInputDom, addContainerNode, addBooleanNode, addStringNode 
100  from CedarBackup2.xmlutil import readFirstChild, readString, readBoolean 
101  from CedarBackup2.actions.util import writeIndicatorFile 
102  from CedarBackup2.actions.constants import DIR_TIME_FORMAT, STAGE_INDICATOR 
103  from CedarBackup2.config import ByteQuantity, readByteQuantity, addByteQuantityNode 
104   
105   
106   
107   
108   
109   
110  logger = logging.getLogger("CedarBackup2.log.extend.amazons3") 
111   
112  SU_COMMAND    = [ "su" ] 
113  AWS_COMMAND   = [ "aws" ] 
114   
115  STORE_INDICATOR = "cback.amazons3" 
123   
124     """ 
125     Class representing Amazon S3 configuration. 
126   
127     Amazon S3 configuration is used for storing backup data in Amazon's S3 cloud 
128     storage using the C{s3cmd} tool. 
129   
130     The following restrictions exist on data in this class: 
131   
132        - The s3Bucket value must be a non-empty string 
133        - The encryptCommand value, if set, must be a non-empty string 
134        - The full backup size limit, if set, must be a ByteQuantity >= 0 
135        - The incremental backup size limit, if set, must be a ByteQuantity >= 0 
136   
137     @sort: __init__, __repr__, __str__, __cmp__, warnMidnite, s3Bucket 
138     """ 
139   
140 -   def __init__(self, warnMidnite=None, s3Bucket=None, encryptCommand=None, 
141                  fullBackupSizeLimit=None, incrementalBackupSizeLimit=None): 
 142        """ 
143        Constructor for the C{AmazonS3Config} class. 
144   
145        @param warnMidnite: Whether to generate warnings for crossing midnite. 
146        @param s3Bucket: Name of the Amazon S3 bucket in which to store the data 
147        @param encryptCommand: Command used to encrypt backup data before upload to S3 
148        @param fullBackupSizeLimit: Maximum size of a full backup, a ByteQuantity 
149        @param incrementalBackupSizeLimit: Maximum size of an incremental backup, a ByteQuantity 
150   
151        @raise ValueError: If one of the values is invalid. 
152        """ 
153        self._warnMidnite = None 
154        self._s3Bucket = None 
155        self._encryptCommand = None 
156        self._fullBackupSizeLimit = None 
157        self._incrementalBackupSizeLimit = None 
158        self.warnMidnite = warnMidnite 
159        self.s3Bucket = s3Bucket 
160        self.encryptCommand = encryptCommand 
161        self.fullBackupSizeLimit = fullBackupSizeLimit 
162        self.incrementalBackupSizeLimit = incrementalBackupSizeLimit 
 163   
170   
172        """ 
173        Informal string representation for class instance. 
174        """ 
175        return self.__repr__() 
 176   
211   
213        """ 
214        Property target used to set the midnite warning flag. 
215        No validations, but we normalize the value to C{True} or C{False}. 
216        """ 
217        if value: 
218           self._warnMidnite = True 
219        else: 
220           self._warnMidnite = False 
 221   
223        """ 
224        Property target used to get the midnite warning flag. 
225        """ 
226        return self._warnMidnite 
 227   
229        """ 
230        Property target used to set the S3 bucket. 
231        """ 
232        if value is not None: 
233           if len(value) < 1: 
234              raise ValueError("S3 bucket must be non-empty string.") 
235        self._s3Bucket = value 
 236   
238        """ 
239        Property target used to get the S3 bucket. 
240        """ 
241        return self._s3Bucket 
 242   
244        """ 
245        Property target used to set the encrypt command. 
246        """ 
247        if value is not None: 
248           if len(value) < 1: 
249              raise ValueError("Encrypt command must be non-empty string.") 
250        self._encryptCommand = value 
 251   
253        """ 
254        Property target used to get the encrypt command. 
255        """ 
256        return self._encryptCommand 
 257   
259        """ 
260        Property target used to set the full backup size limit. 
261        The value must be an integer >= 0. 
262        @raise ValueError: If the value is not valid. 
263        """ 
264        if value is None: 
265           self._fullBackupSizeLimit = None 
266        else: 
267           if isinstance(value, ByteQuantity): 
268              self._fullBackupSizeLimit = value 
269           else: 
270              self._fullBackupSizeLimit = ByteQuantity(value, UNIT_BYTES) 
 271   
273        """ 
274        Property target used to get the full backup size limit. 
275        """ 
276        return self._fullBackupSizeLimit 
 277   
279        """ 
280        Property target used to set the incremental backup size limit. 
281        The value must be an integer >= 0. 
282        @raise ValueError: If the value is not valid. 
283        """ 
284        if value is None: 
285           self._incrementalBackupSizeLimit = None 
286        else: 
287           if isinstance(value, ByteQuantity): 
288              self._incrementalBackupSizeLimit = value 
289           else: 
290              self._incrementalBackupSizeLimit = ByteQuantity(value, UNIT_BYTES) 
 291   
293        """ 
294        Property target used to get the incremental backup size limit. 
295        """ 
296        return self._incrementalBackupSizeLimit 
 297   
298     warnMidnite = property(_getWarnMidnite, _setWarnMidnite, None, "Whether to generate warnings for crossing midnite.") 
299     s3Bucket = property(_getS3Bucket, _setS3Bucket, None, doc="Amazon S3 Bucket in which to store data") 
300     encryptCommand = property(_getEncryptCommand, _setEncryptCommand, None, doc="Command used to encrypt data before upload to S3") 
301     fullBackupSizeLimit = property(_getFullBackupSizeLimit, _setFullBackupSizeLimit, None, 
302                                    doc="Maximum size of a full backup, as a ByteQuantity") 
303     incrementalBackupSizeLimit = property(_getIncrementalBackupSizeLimit, _setIncrementalBackupSizeLimit, None, 
304                                           doc="Maximum size of an incremental backup, as a ByteQuantity") 
 305   
312   
313     """ 
314     Class representing this extension's configuration document. 
315   
316     This is not a general-purpose configuration object like the main Cedar 
317     Backup configuration object.  Instead, it just knows how to parse and emit 
318     amazons3-specific configuration values.  Third parties who need to read and 
319     write configuration related to this extension should access it through the 
320     constructor, C{validate} and C{addConfig} methods. 
321   
322     @note: Lists within this class are "unordered" for equality comparisons. 
323   
324     @sort: __init__, __repr__, __str__, __cmp__, amazons3, validate, addConfig 
325     """ 
326   
327 -   def __init__(self, xmlData=None, xmlPath=None, validate=True): 
 328        """ 
329        Initializes a configuration object. 
330   
331        If you initialize the object without passing either C{xmlData} or 
332        C{xmlPath} then configuration will be empty and will be invalid until it 
333        is filled in properly. 
334   
335        No reference to the original XML data or original path is saved off by 
336        this class.  Once the data has been parsed (successfully or not) this 
337        original information is discarded. 
338   
339        Unless the C{validate} argument is C{False}, the L{LocalConfig.validate} 
340        method will be called (with its default arguments) against configuration 
341        after successfully parsing any passed-in XML.  Keep in mind that even if 
342        C{validate} is C{False}, it might not be possible to parse the passed-in 
343        XML document if lower-level validations fail. 
344   
345        @note: It is strongly suggested that the C{validate} option always be set 
346        to C{True} (the default) unless there is a specific need to read in 
347        invalid configuration from disk. 
348   
349        @param xmlData: XML data representing configuration. 
350        @type xmlData: String data. 
351   
352        @param xmlPath: Path to an XML file on disk. 
353        @type xmlPath: Absolute path to a file on disk. 
354   
355        @param validate: Validate the document after parsing it. 
356        @type validate: Boolean true/false. 
357   
358        @raise ValueError: If both C{xmlData} and C{xmlPath} are passed-in. 
359        @raise ValueError: If the XML data in C{xmlData} or C{xmlPath} cannot be parsed. 
360        @raise ValueError: If the parsed configuration document is not valid. 
361        """ 
362        self._amazons3 = None 
363        self.amazons3 = None 
364        if xmlData is not None and xmlPath is not None: 
365           raise ValueError("Use either xmlData or xmlPath, but not both.") 
366        if xmlData is not None: 
367           self._parseXmlData(xmlData) 
368           if validate: 
369              self.validate() 
370        elif xmlPath is not None: 
371           xmlData = open(xmlPath).read() 
372           self._parseXmlData(xmlData) 
373           if validate: 
374              self.validate() 
 375   
377        """ 
378        Official string representation for class instance. 
379        """ 
380        return "LocalConfig(%s)" % (self.amazons3) 
 381   
383        """ 
384        Informal string representation for class instance. 
385        """ 
386        return self.__repr__() 
 387   
389        """ 
390        Definition of equals operator for this class. 
391        Lists within this class are "unordered" for equality comparisons. 
392        @param other: Other object to compare to. 
393        @return: -1/0/1 depending on whether self is C{<}, C{=} or C{>} other. 
394        """ 
395        if other is None: 
396           return 1 
397        if self.amazons3 != other.amazons3: 
398           if self.amazons3 < other.amazons3: 
399              return -1 
400           else: 
401              return 1 
402        return 0 
 403   
405        """ 
406        Property target used to set the amazons3 configuration value. 
407        If not C{None}, the value must be a C{AmazonS3Config} object. 
408        @raise ValueError: If the value is not a C{AmazonS3Config} 
409        """ 
410        if value is None: 
411           self._amazons3 = None 
412        else: 
413           if not isinstance(value, AmazonS3Config): 
414              raise ValueError("Value must be a C{AmazonS3Config} object.") 
415           self._amazons3 = value 
 416   
418        """ 
419        Property target used to get the amazons3 configuration value. 
420        """ 
421        return self._amazons3 
 422   
423     amazons3 = property(_getAmazonS3, _setAmazonS3, None, "AmazonS3 configuration in terms of a C{AmazonS3Config} object.") 
424   
426        """ 
427        Validates configuration represented by the object. 
428   
429        AmazonS3 configuration must be filled in.  Within that, the s3Bucket target must be filled in 
430   
431        @raise ValueError: If one of the validations fails. 
432        """ 
433        if self.amazons3 is None: 
434           raise ValueError("AmazonS3 section is required.") 
435        if self.amazons3.s3Bucket is None: 
436           raise ValueError("AmazonS3 s3Bucket must be set.") 
 437   
439        """ 
440        Adds an <amazons3> configuration section as the next child of a parent. 
441   
442        Third parties should use this function to write configuration related to 
443        this extension. 
444   
445        We add the following fields to the document:: 
446   
447           warnMidnite                 //cb_config/amazons3/warn_midnite 
448           s3Bucket                    //cb_config/amazons3/s3_bucket 
449           encryptCommand              //cb_config/amazons3/encrypt 
450           fullBackupSizeLimit         //cb_config/amazons3/full_size_limit 
451           incrementalBackupSizeLimit  //cb_config/amazons3/incr_size_limit 
452   
453        @param xmlDom: DOM tree as from C{impl.createDocument()}. 
454        @param parentNode: Parent that the section should be appended to. 
455        """ 
456        if self.amazons3 is not None: 
457           sectionNode = addContainerNode(xmlDom, parentNode, "amazons3") 
458           addBooleanNode(xmlDom, sectionNode, "warn_midnite", self.amazons3.warnMidnite) 
459           addStringNode(xmlDom, sectionNode, "s3_bucket", self.amazons3.s3Bucket) 
460           addStringNode(xmlDom, sectionNode, "encrypt", self.amazons3.encryptCommand) 
461           addByteQuantityNode(xmlDom, sectionNode, "full_size_limit", self.amazons3.fullBackupSizeLimit) 
462           addByteQuantityNode(xmlDom, sectionNode, "incr_size_limit", self.amazons3.incrementalBackupSizeLimit) 
 463   
465        """ 
466        Internal method to parse an XML string into the object. 
467   
468        This method parses the XML document into a DOM tree (C{xmlDom}) and then 
469        calls a static method to parse the amazons3 configuration section. 
470   
471        @param xmlData: XML data to be parsed 
472        @type xmlData: String data 
473   
474        @raise ValueError: If the XML cannot be successfully parsed. 
475        """ 
476        (xmlDom, parentNode) = createInputDom(xmlData) 
477        self._amazons3 = LocalConfig._parseAmazonS3(parentNode) 
 478   
479     @staticmethod 
 507   
508   
509   
510   
511   
512   
513   
514   
515   
516   
517 -def executeAction(configPath, options, config): 
 518     """ 
519     Executes the amazons3 backup action. 
520   
521     @param configPath: Path to configuration file on disk. 
522     @type configPath: String representing a path on disk. 
523   
524     @param options: Program command-line options. 
525     @type options: Options object. 
526   
527     @param config: Program configuration. 
528     @type config: Config object. 
529   
530     @raise ValueError: Under many generic error conditions 
531     @raise IOError: If there are I/O problems reading or writing files 
532     """ 
533     logger.debug("Executing amazons3 extended action.") 
534     if not isRunningAsRoot(): 
535        logger.error("Error: the amazons3 extended action must be run as root.") 
536        raise ValueError("The amazons3 extended action must be run as root.") 
537     if sys.platform == "win32": 
538        logger.error("Error: the amazons3 extended action is not supported on Windows.") 
539        raise ValueError("The amazons3 extended action is not supported on Windows.") 
540     if config.options is None or config.stage is None: 
541        raise ValueError("Cedar Backup configuration is not properly filled in.") 
542     local = LocalConfig(xmlPath=configPath) 
543     stagingDirs = _findCorrectDailyDir(options, config, local) 
544     _applySizeLimits(options, config, local, stagingDirs) 
545     _writeToAmazonS3(config, local, stagingDirs) 
546     _writeStoreIndicator(config, stagingDirs) 
547     logger.info("Executed the amazons3 extended action successfully.") 
 548   
559     """ 
560     Finds the correct daily staging directory to be written to Amazon S3. 
561   
562     This is substantially similar to the same function in store.py.  The 
563     main difference is that it doesn't rely on store configuration at all. 
564   
565     @param options: Options object. 
566     @param config: Config object. 
567     @param local: Local config object. 
568   
569     @return: Correct staging dir, as a dict mapping directory to date suffix. 
570     @raise IOError: If the staging directory cannot be found. 
571     """ 
572     oneDay = datetime.timedelta(days=1) 
573     today = datetime.date.today() 
574     yesterday = today - oneDay 
575     tomorrow = today + oneDay 
576     todayDate = today.strftime(DIR_TIME_FORMAT) 
577     yesterdayDate = yesterday.strftime(DIR_TIME_FORMAT) 
578     tomorrowDate = tomorrow.strftime(DIR_TIME_FORMAT) 
579     todayPath = os.path.join(config.stage.targetDir, todayDate) 
580     yesterdayPath = os.path.join(config.stage.targetDir, yesterdayDate) 
581     tomorrowPath = os.path.join(config.stage.targetDir, tomorrowDate) 
582     todayStageInd = os.path.join(todayPath, STAGE_INDICATOR) 
583     yesterdayStageInd = os.path.join(yesterdayPath, STAGE_INDICATOR) 
584     tomorrowStageInd = os.path.join(tomorrowPath, STAGE_INDICATOR) 
585     todayStoreInd = os.path.join(todayPath, STORE_INDICATOR) 
586     yesterdayStoreInd = os.path.join(yesterdayPath, STORE_INDICATOR) 
587     tomorrowStoreInd = os.path.join(tomorrowPath, STORE_INDICATOR) 
588     if options.full: 
589        if os.path.isdir(todayPath) and os.path.exists(todayStageInd): 
590           logger.info("Amazon S3 process will use current day's staging directory [%s]", todayPath) 
591           return { todayPath:todayDate } 
592        raise IOError("Unable to find staging directory to process (only tried today due to full option).") 
593     else: 
594        if os.path.isdir(todayPath) and os.path.exists(todayStageInd) and not os.path.exists(todayStoreInd): 
595           logger.info("Amazon S3 process will use current day's staging directory [%s]", todayPath) 
596           return { todayPath:todayDate } 
597        elif os.path.isdir(yesterdayPath) and os.path.exists(yesterdayStageInd) and not os.path.exists(yesterdayStoreInd): 
598           logger.info("Amazon S3 process will use previous day's staging directory [%s]", yesterdayPath) 
599           if local.amazons3.warnMidnite: 
600              logger.warn("Warning: Amazon S3 process crossed midnite boundary to find data.") 
601           return { yesterdayPath:yesterdayDate } 
602        elif os.path.isdir(tomorrowPath) and os.path.exists(tomorrowStageInd) and not os.path.exists(tomorrowStoreInd): 
603           logger.info("Amazon S3 process will use next day's staging directory [%s]", tomorrowPath) 
604           if local.amazons3.warnMidnite: 
605              logger.warn("Warning: Amazon S3 process crossed midnite boundary to find data.") 
606           return { tomorrowPath:tomorrowDate } 
607        raise IOError("Unable to find unused staging directory to process (tried today, yesterday, tomorrow).") 
 608   
615     """ 
616     Apply size limits, throwing an exception if any limits are exceeded. 
617   
618     Size limits are optional.  If a limit is set to None, it does not apply. 
619     The full size limit applies if the full option is set or if today is the 
620     start of the week.  The incremental size limit applies otherwise.  Limits 
621     are applied to the total size of all the relevant staging directories. 
622   
623     @param options: Options object. 
624     @param config: Config object. 
625     @param local: Local config object. 
626     @param stagingDirs: Dictionary mapping directory path to date suffix. 
627   
628     @raise ValueError: Under many generic error conditions 
629     @raise ValueError: If a size limit has been exceeded 
630     """ 
631     if options.full or isStartOfWeek(config.options.startingDay): 
632        logger.debug("Using Amazon S3 size limit for full backups.") 
633        limit = local.amazons3.fullBackupSizeLimit 
634     else: 
635        logger.debug("Using Amazon S3 size limit for incremental backups.") 
636        limit = local.amazons3.incrementalBackupSizeLimit 
637     if limit is None: 
638        logger.debug("No Amazon S3 size limit will be applied.") 
639     else: 
640        logger.debug("Amazon S3 size limit is: %s", limit) 
641        contents = BackupFileList() 
642        for stagingDir in stagingDirs: 
643           contents.addDirContents(stagingDir) 
644        total = contents.totalSize() 
645        logger.debug("Amazon S3 backup size is: %s", displayBytes(total)) 
646        if total > limit.bytes: 
647           logger.error("Amazon S3 size limit exceeded: %s > %s", displayBytes(total), limit) 
648           raise ValueError("Amazon S3 size limit exceeded: %s > %s" % (displayBytes(total), limit)) 
649        else: 
650           logger.info("Total size does not exceed Amazon S3 size limit, so backup can continue.") 
 651   
658     """ 
659     Writes the indicated staging directories to an Amazon S3 bucket. 
660   
661     Each of the staging directories listed in C{stagingDirs} will be written to 
662     the configured Amazon S3 bucket from local configuration.  The directories 
663     will be placed into the image at the root by date, so staging directory 
664     C{/opt/stage/2005/02/10} will be placed into the S3 bucket at C{/2005/02/10}. 
665     If an encrypt commmand is provided, the files will be encrypted first. 
666   
667     @param config: Config object. 
668     @param local: Local config object. 
669     @param stagingDirs: Dictionary mapping directory path to date suffix. 
670   
671     @raise ValueError: Under many generic error conditions 
672     @raise IOError: If there is a problem writing to Amazon S3 
673     """ 
674     for stagingDir in stagingDirs.keys(): 
675        logger.debug("Storing stage directory to Amazon S3 [%s].", stagingDir) 
676        dateSuffix = stagingDirs[stagingDir] 
677        s3BucketUrl = "s3://%s/%s" % (local.amazons3.s3Bucket, dateSuffix) 
678        logger.debug("S3 bucket URL is [%s]", s3BucketUrl) 
679        _clearExistingBackup(config, s3BucketUrl) 
680        if local.amazons3.encryptCommand is None: 
681           logger.debug("Encryption is disabled; files will be uploaded in cleartext.") 
682           _uploadStagingDir(config, stagingDir, s3BucketUrl) 
683           _verifyUpload(config, stagingDir, s3BucketUrl) 
684        else: 
685           logger.debug("Encryption is enabled; files will be uploaded after being encrypted.") 
686           encryptedDir = tempfile.mkdtemp(dir=config.options.workingDir) 
687           changeOwnership(encryptedDir, config.options.backupUser, config.options.backupGroup) 
688           try: 
689              _encryptStagingDir(config, local, stagingDir, encryptedDir) 
690              _uploadStagingDir(config, encryptedDir, s3BucketUrl) 
691              _verifyUpload(config, encryptedDir, s3BucketUrl) 
692           finally: 
693              if os.path.exists(encryptedDir): 
694                 shutil.rmtree(encryptedDir) 
 695   
711   
718     """ 
719     Clear any existing backup files for an S3 bucket URL. 
720     @param config: Config object. 
721     @param s3BucketUrl: S3 bucket URL associated with the staging directory 
722     """ 
723     suCommand = resolveCommand(SU_COMMAND) 
724     awsCommand = resolveCommand(AWS_COMMAND) 
725     actualCommand = "%s s3 rm --recursive %s/" % (awsCommand[0], s3BucketUrl) 
726     result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0] 
727     if result != 0: 
728        raise IOError("Error [%d] calling AWS CLI to clear existing backup for [%s]." % (result, s3BucketUrl)) 
729     logger.debug("Completed clearing any existing backup in S3 for [%s]", s3BucketUrl) 
 730   
737     """ 
738     Upload the contents of a staging directory out to the Amazon S3 cloud. 
739     @param config: Config object. 
740     @param stagingDir: Staging directory to upload 
741     @param s3BucketUrl: S3 bucket URL associated with the staging directory 
742     """ 
743      
744      
745      
746      
747      
748     suCommand = resolveCommand(SU_COMMAND) 
749     awsCommand = resolveCommand(AWS_COMMAND) 
750     actualCommand = "%s s3 cp --recursive --exclude \"*cback.*\" %s/ %s/" % (awsCommand[0], stagingDir, s3BucketUrl) 
751     result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0] 
752     if result != 0: 
753        raise IOError("Error [%d] calling AWS CLI to upload staging directory to [%s]." % (result, s3BucketUrl)) 
754     logger.debug("Completed uploading staging dir [%s] to [%s]", stagingDir, s3BucketUrl) 
 755   
756   
757   
758   
759   
760   
761 -def _verifyUpload(config, stagingDir, s3BucketUrl): 
 762     """ 
763     Verify that a staging directory was properly uploaded to the Amazon S3 cloud. 
764     @param config: Config object. 
765     @param stagingDir: Staging directory to verify 
766     @param s3BucketUrl: S3 bucket URL associated with the staging directory 
767     """ 
768     (bucket, prefix) = s3BucketUrl.replace("s3://", "").split("/", 1) 
769     suCommand = resolveCommand(SU_COMMAND) 
770     awsCommand = resolveCommand(AWS_COMMAND) 
771     query = "Contents[].{Key: Key, Size: Size}" 
772     actualCommand = "%s s3api list-objects --bucket %s --prefix %s --query '%s'" % (awsCommand[0], bucket, prefix, query) 
773     (result, data) = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand], returnOutput=True) 
774     if result != 0: 
775        raise IOError("Error [%d] calling AWS CLI verify upload to [%s]." % (result, s3BucketUrl)) 
776     contents = { } 
777     for entry in json.loads("".join(data)): 
778        key = entry["Key"].replace(prefix, "") 
779        size = long(entry["Size"]) 
780        contents[key] = size 
781     files = FilesystemList() 
782     files.excludeBasenamePatterns = [ r"cback\..*", ]   
783     files.addDirContents(stagingDir) 
784     for entry in files: 
785        if os.path.isfile(entry): 
786           key = entry.replace(stagingDir, "") 
787           size = long(os.stat(entry).st_size) 
788           if not key in contents: 
789              raise IOError("File was apparently not uploaded: [%s]" % entry) 
790           else: 
791              if size != contents[key]: 
792                 raise IOError("File size differs [%s], expected %s bytes but got %s bytes" % (entry, size, contents[key])) 
793     logger.debug("Completed verifying upload from [%s] to [%s].", stagingDir, s3BucketUrl) 
 794   
801     """ 
802     Encrypt a staging directory, creating a new directory in the process. 
803     @param config: Config object. 
804     @param stagingDir: Staging directory to use as source 
805     @param encryptedDir: Target directory into which encrypted files should be written 
806     """ 
807     suCommand = resolveCommand(SU_COMMAND) 
808     files = FilesystemList() 
809     files.addDirContents(stagingDir) 
810     for cleartext in files: 
811        if os.path.isfile(cleartext): 
812           encrypted = "%s%s" % (encryptedDir, cleartext.replace(stagingDir, "")) 
813           if long(os.stat(cleartext).st_size) == 0: 
814              open(encrypted, 'a').close()  
815           else: 
816              actualCommand = local.amazons3.encryptCommand.replace("${input}", cleartext).replace("${output}", encrypted) 
817              subdir = os.path.dirname(encrypted) 
818              if not os.path.isdir(subdir): 
819                 os.makedirs(subdir) 
820                 changeOwnership(subdir, config.options.backupUser, config.options.backupGroup) 
821              result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0] 
822              if result != 0: 
823                 raise IOError("Error [%d] encrypting [%s]." % (result, cleartext)) 
824     logger.debug("Completed encrypting staging directory [%s] into [%s]", stagingDir, encryptedDir) 
 825