我们从Python开源项目中,提取了以下45个代码示例,用于说明如何使用log.log()。
def process_cleanup_arch(self): log.log(log.LOG_INFO, "Processing Cleanup of Architectures") for arch in self.get_config_section('cleanup-architecture'): try: self.validator.cleanup_arch(arch) except MultipleInvalid as e: log.log(log.LOG_WARN, "Cannot delete Architecture '{0}': YAML validation Error: {1}".format(arch['name'], e)) continue try: self.fm.architectures.show(arch['name'])['id'] log.log(log.LOG_INFO, "Delete Architecture '{0}'".format(arch['name'])) self.fm.architectures.destroy( arch['name'] ) except: log.log(log.LOG_WARN, "Architecture '{0}' already absent.".format(arch['name']))
def process_cleanup_computeprfl(self): log.log(log.LOG_INFO, "Processing Cleanup of Compute profiles") for computeprfl in self.get_config_section('cleanup-compute-profile'): try: self.validator.cleanup_computeprfl(computeprfl) except MultipleInvalid as e: log.log(log.LOG_WARN, "Cannot delete Compute profile '{0}': YAML validation Error: {1}".format(computeprfl['name'], e)) continue try: self.fm.compute_profiles.show(computeprfl['name'])['id'] log.log(log.LOG_INFO, "Delete Compute profile '{0}'".format(computeprfl['name'])) self.fm.compute_profiles.destroy( computeprfl['name'] ) except: log.log(log.LOG_WARN, "Compute profile '{0}' already absent.".format(computeprfl['name']))
def process_cleanup_medium(self): log.log(log.LOG_INFO, "Processing Cleanup of Media") medialist = self.fm.media.index(per_page=99999)['results'] for medium in self.get_config_section('cleanup-medium'): try: self.validator.cleanup_medium(medium) except MultipleInvalid as e: log.log(log.LOG_WARN, "Cannot delete Medium '{0}': YAML validation Error: {1}".format(medium['name'], e)) continue medium_deleted = False # fm.media.show(name) does not work, we need to iterate over fm.media.index() for mediac in medialist: if (mediac['name'] == medium['name']): medium_deleted = True log.log(log.LOG_INFO, "Delete Medium '{0}'".format(medium['name'])) self.fm.media.destroy( medium['name'] ) continue if not medium_deleted: log.log(log.LOG_WARN, "Medium '{0}' already absent.".format(medium['name']))
def process_cleanup_ptable(self): log.log(log.LOG_INFO, "Processing Cleanup of Partition Tables") for ptable in self.get_config_section('cleanup-partition-table'): try: self.validator.cleanup_ptable(ptable) except MultipleInvalid as e: log.log(log.LOG_WARN, "Cannot delete Partition Table '{0}': YAML validation Error: {1}".format(ptable['name'], e)) continue try: self.fm.ptables.show(ptable['name'])['id'] log.log(log.LOG_INFO, "Delete Partition Table '{0}'".format(ptable['name'])) self.fm.ptables.destroy( ptable['name'] ) except: log.log(log.LOG_WARN, "Partition Table '{0}' already absent.".format(ptable['name']))
def process_config_enviroment(self): log.log(log.LOG_INFO, "Processing Environments") envlist = self.fm.environments.index(per_page=99999)['results'] for env in self.get_config_section('environment'): try: self.validator.enviroment(env) except MultipleInvalid as e: log.log(log.LOG_WARN, "Cannot create Environment '{0}': YAML validation Error: {1}".format(env['name'], e)) continue env_id = False # fm.media.show(name) does not work, we need to iterate over fm.media.index() for envc in envlist: if (env['name'] == envc['name']): env_id = envc['id'] log.log(log.LOG_DEBUG, "Environment '{0}' (id={1}) already present.".format(env['name'], env_id)) continue if not env_id: log.log(log.LOG_INFO, "Create Environment '{0}'".format(env['name'])) self.fm.environments.create( environment = { 'name': env['name'] } )
def process_config_model(self): log.log(log.LOG_INFO, "Processing Models") for model in self.get_config_section('model'): try: self.validator.model(model) except MultipleInvalid as e: log.log(log.LOG_WARN, "Cannot create Model '{0}': YAML validation Error: {1}".format(model['name'], e)) continue try: model_id = self.fm.models.show(model['name'])['id'] log.log(log.LOG_DEBUG, "Model '{0}' (id={1}) already present.".format(model['name'], model_id)) except: log.log(log.LOG_INFO, "Create Model '{0}'".format(model['name'])) model_tpl = { 'name': model['name'], 'info': model['info'], 'vendor_class': model['vendor-class'], 'hardware_model': model['hardware-model'] } self.fm.models.create( model = model_tpl )
def process_config_medium(self): log.log(log.LOG_INFO, "Processing Media") medialist = self.fm.media.index(per_page=99999)['results'] for medium in self.get_config_section('medium'): try: self.validator.medium(medium) except MultipleInvalid as e: log.log(log.LOG_WARN, "Cannot create Media '{0}': YAML validation Error: {1}".format(medium['name'], e)) continue medium_id = False # fm.media.show(name) does not work, we need to iterate over fm.media.index() for mediac in medialist: if (mediac['name'] == medium['name']): medium_id = mediac['id'] log.log(log.LOG_DEBUG, "Medium '{0}' (id={1}) already present.".format(medium['name'], medium_id)) if not medium_id: log.log(log.LOG_INFO, "Create Medium '{0}'".format(medium['name'])) medium_tpl = { 'name': medium['name'], 'path': medium['path'], 'os_family': medium['os-family'] } self.fm.media.create( medium = medium_tpl )
def process_config_smartproxy(self): log.log(log.LOG_INFO, "Processing Smart Proxies") for proxy in self.get_config_section('smart-proxy'): try: proxy_id = self.fm.smart_proxies.show(proxy['name'])['id'] log.log(log.LOG_DEBUG, "Proxy '{0}' (id={1}) already present.".format(proxy['name'], proxy_id)) except: log.log(log.LOG_INFO, "Create Smart Proxy '{0}'".format(proxy['name'])) proxy_tpl = { 'name': proxy['name'], 'url': proxy['url'], } try: self.fm.smart_proxies.create( smart_proxy = proxy_tpl ) except: log.log(log.LOG_WARN, "Cannot create Smart Proxy '{0}'. Is the Proxy online? ".format(proxy['name']))
def process_config_ptable(self): log.log(log.LOG_INFO, "Processing Partition Tables") for ptable in self.get_config_section('partition-table'): try: self.validator.ptable(ptable) except MultipleInvalid as e: log.log(log.LOG_WARN, "Cannot create Partition Table '{0}': YAML validation Error: {1}".format(ptable['name'], e)) continue try: ptable_id = self.fm.ptables.show(ptable['name'])['id'] log.log(log.LOG_DEBUG, "Partition Table '{0}' (id={1}) already present.".format(ptable['name'], ptable_id)) except: log.log(log.LOG_INFO, "Create Partition Table '{0}'".format(ptable['name'])) ptable_tpl = { 'name': ptable['name'], 'layout': ptable['layout'], 'snippet': ptable['snippet'], 'audit_comment': ptable['audit-comment'], 'locked': ptable['locked'], 'os_family': ptable['os-family'] } self.fm.ptables.create( ptable = ptable_tpl )
def add_details(file_name, title, artist, album, lyrics=""): ''' Adds the details to song ''' tags = EasyMP3(file_name) tags["title"] = title tags["artist"] = artist tags["album"] = album tags.save() tags = ID3(file_name) uslt_output = USLT(encoding=3, lang=u'eng', desc=u'desc', text=lyrics) tags["USLT::'eng'"] = uslt_output tags.save(file_name) log.log("> Adding properties") log.log_indented("[*] Title: %s" % title) log.log_indented("[*] Artist: %s" % artist) log.log_indented("[*] Album: %s " % album)
def do_quit(self, arg): """Quit fuzzbunch""" try: opencontracts = [ item.name for item in self.session.get_itemlist() if item.value.has_opencontract() ] if opencontracts: self.io.print_opensessions({'sessions' : opencontracts}) line = self.io.get_input("Really quit [n] ? ") if line.lower() not in ("yes", "y", "q", "quit"): return if self.log: self.log.close() return True except: pass return True
def update_dist(self, indexcomp, comp, norm, last_comp=False, classifier=None): """ Calculate the distance from the supplied pattern to the stored pattern """ # Called from CM1KEmulator.update_all_neuron_dists(), i.e., whenever COMP or LCOMP is updated log.trace("Neuron.update_dist()") if norm == cm1k.CM1KDistNorm.l1: self.dist += abs(comp - self.pattern[indexcomp]) elif norm == cm1k.CM1KDistNorm.lsup: self.dist = max(abs(comp - self.pattern[indexcomp]), self.dist) elif norm == cm1k.CM1KDistNorm.euc: self.dist += (comp - self.pattern[indexcomp]) * (comp - self.pattern[indexcomp]) if last_comp: if norm == cm1k.CM1KDistNorm.euc: self.dist = int(round(math.sqrt(self.dist))) if (classifier == cm1k.CM1KClassifier.rbf and self.dist < self.aif) or classifier == cm1k.CM1KClassifier.knn: # The neuron has fired self.chip.store_firing_neuron(self)
def store_firing_neuron(self, neuron): # Called from Neuron.update_dist(), i.e., whenever COMP or LCOMP is updated # Call from Neuron.broadcast() # Called by individual neurons whenever they fire log.trace("CM1KEmulator.store_firing_neuron()") # NOTE: firing_neurons won't be sorted until all neurons are added (see update_all_neuron_dists()) # Only store a firing neuron if its dist-and-cat combination is unique (CM1K Hardware Manual, p. 17) unique = True for neuron2 in self.firing_neurons: if neuron2.dist == neuron.dist and neuron2.cat == neuron.cat: unique = False break if unique: # self.firing_neurons.append(neuron) insert_pos = len(self.firing_neurons) for i, neuron2 in enumerate(self.firing_neurons): if neuron2.dist <= neuron.dist: # This must be <=, not <, so that earlier neurons win, ala CM1K spec insert_pos = i break self.firing_neurons.insert(insert_pos, neuron)
def get(db,table,field,matchfield,matchvalue): init(db) matchvalue = matchvalue.encode('utf-8').lower() try: result = db.execute("SELECT {} FROM {} WHERE {}='{}';".format(field,table,matchfield,matchvalue)).fetchone() if result: return result[0].encode('utf-8') else: return False except: log.log("***ERROR: SELECT {} FROM {} WHERE {}='{}';".format(field,table,matchfield,matchvalue))
def process_config_arch(self): log.log(log.LOG_INFO, "Processing Architectures") for arch in self.get_config_section('architecture'): try: self.validator.arch(arch) except MultipleInvalid as e: log.log(log.LOG_WARN, "Cannot create Architecture '{0}': YAML validation Error: {1}".format(arch['name'], e)) continue try: arch_id = self.fm.architectures.show(arch['name'])['id'] log.log(log.LOG_DEBUG, "Architecture '{0}' (id={1}) already present.".format(arch['name'], arch_id)) except: log.log(log.LOG_INFO, "Create Architecture '{0}'".format(arch['name'])) self.fm.architectures.create( architecture = { 'name': arch['name'] } )
def process_config_domain(self): log.log(log.LOG_INFO, "Processing Domains") for domain in self.get_config_section('domain'): try: self.validator.domain(domain) except MultipleInvalid as e: log.log(log.LOG_WARN, "Cannot create Domain '{0}': YAML validation Error: {1}".format(domain['name'], e)) continue try: dom_id = self.fm.domains.show(domain['name'])['id'] log.log(log.LOG_DEBUG, "Domain '{0}' (id={1}) already present.".format(domain['name'], dom_id)) except: dns_proxy_id = False try: dns_proxy_id = self.fm.smart_proxies.show(domain['dns-proxy'])['id'] except: log.log(log.LOG_WARN, "Cannot get ID of DNS Smart Proxy '{0}', skipping".format(domain['dns-proxy'])) log.log(log.LOG_INFO, "Create Domain '{0}'".format(domain['name'])) dom_params = [] if (domain['parameters']): for name,value in domain['parameters'].iteritems(): p = { 'name': name, 'value': value } dom_params.append(p) dom_tpl = { 'name': domain['name'], 'fullname': domain['fullname'], } fixdom = { 'domain_parameters_attributes': dom_params } if dns_proxy_id: dom_tpl['dns_id'] = dns_proxy_id domo = self.fm.domains.create( domain = dom_tpl ) if dom_params: self.fm.domains.update(fixdom, domo['id'])
def process_config_os(self): log.log(log.LOG_INFO, "Processing Operating Systems") for operatingsystem in self.get_config_section('os'): try: self.validator.os(operatingsystem) except MultipleInvalid as e: log.log(log.LOG_WARN, "Cannot create Operating System '{0}': YAML validation Error: {1}".format(operatingsystem['name'], e)) continue try: os_id = self.fm.operatingsystems.show(operatingsystem['description'])['id'] log.log(log.LOG_DEBUG, "Operating System '{0}' (id={1}) already present.".format(operatingsystem['name'], os_id)) except: log.log(log.LOG_INFO, "Create Operating System '{0}'".format(operatingsystem['name'])) os_tpl = { 'name': operatingsystem['name'], 'description': operatingsystem['description'], 'major': operatingsystem['major'], 'minor': operatingsystem['minor'], 'family': operatingsystem['family'], 'release_name': operatingsystem['release-name'], 'password_hash': operatingsystem['password-hash'] } os_obj = self.fm.operatingsystems.create(operatingsystem=os_tpl) # host_params if operatingsystem['parameters'] is not None: for name,value in operatingsystem['parameters'].iteritems(): p = { 'name': name, 'value': value } try: self.fm.operatingsystems.parameters_create(os_obj['id'], p ) except: log.log(log.LOG_WARN, "Error adding host parameter '{0}'".format(name))
def process_config_user(self): log.log(log.LOG_INFO, "Processing users") for user in self.get_config_section('users'): # validate yaml try: self.validator.user(user) except MultipleInvalid as e: log.log(log.LOG_WARN, "Cannot create User '{0}': YAML validation Error: {1}".format(user['login'], e)) continue try: as_id = self.fm.users.show(user['login'])['id'] log.log(log.LOG_WARN, "User {0} allready exists".format(user['login'])) continue except TypeError: pass # resolve auth source if user['auth-source'] is not 'INTERNAL': try: as_id = self.fm.auth_source_ldaps.show(user['auth-source'])['id'] except TypeError: log.log(log.LOG_ERROR, "Cannot resolve auth source '{0}' for user '{1}', skipping creation".format(user['login'], user['auth-source'])) continue del(user['auth-source']) user['auth_source_id'] = as_id else: del(user['auth-source']) user['auth_source_id'] = 1 try: self.fm.users.create(user=user) except ForemanException as e: msg = self.get_api_error_msg(e) log.log(log.LOG_ERROR, "Cannot create user '{0}', api says: '{1}'".format(user['login'], msg) ) continue
def __init__(self, config, loglevel=logging.INFO): logging.basicConfig(level=loglevel) log.LOGLEVEL = loglevel self.config = config['foreman'] self.loglevel = loglevel self.validator = Validator()
def connect(self): try: logging.disable(logging.WARNING) self.fm = Foreman(self.config['auth']['url'], (self.config['auth']['user'], self.config['auth']['pass']), api_version=2, use_cache=False, strict_cache=False) # this is nescesary for detecting faulty credentials in yaml self.fm.architectures.index() logging.disable(self.loglevel-1) except: log.log(log.LOG_ERROR, "Cannot connect to Foreman-API") sys.exit(1)
def get_details_spotify(song_name): ''' Tries finding metadata through Spotify ''' song_name = improvename.songname(song_name) spotify = spotipy.Spotify() results = spotify.search(song_name, limit=1) # Find top result log.log_indented('* Finding metadata from Spotify.') try: album = (results['tracks']['items'][0]['album'] ['name']) # Parse json dictionary artist = (results['tracks']['items'][0]['album']['artists'][0]['name']) song_title = (results['tracks']['items'][0]['name']) try: log_indented("* Finding lyrics from Genius.com") lyrics = get_lyrics_genius(song_title) except: log_error("* Could not find lyrics from Genius.com, trying something else") lyrics = get_lyrics_letssingit(song_title) match_bool, score = matching_details(song_name, song_title, artist) if match_bool: return artist, album, song_title, lyrics, match_bool, score else: return None except IndexError: log.log_error( '* Could not find metadata from spotify, trying something else.', indented=True) return None
def add_albumart(albumart, song_title): ''' Adds the album art to the song ''' try: img = urlopen(albumart) # Gets album art from url except Exception: log.log_error("* Could not add album art", indented=True) return None audio = EasyMP3(song_title, ID3=ID3) try: audio.add_tags() except _util.error: pass audio.tags.add( APIC( encoding=3, # UTF-8 mime='image/png', type=3, # 3 is for album art desc='Cover', data=img.read() # Reads and adds album art ) ) audio.save() log.log("> Added album art")
def get_logdir(self): """Retrieve the current log directory""" (base_dir, log_dir) = self.session.get_dirs() return log_dir
def set_logdir(self, log_dir=None): """Set the current log directory and create a new log file""" if not log_dir or not os.path.exists(log_dir): log_dir = os.path.normpath(self.default_logdir) base_dir = self.get_basedir() self.session.set_dirs(base_dir, log_dir) logname = "ISF-%s.log" % util.formattime() self.io.setlogfile(os.path.join(log_dir, logname))
def _prompt_for_logging(self, target, oldproject): try: if oldproject is None: oldproject = '' base_logdir = self.get_logdir() base_logdir = ''.join(base_logdir[:base_logdir.find(oldproject)]) if len(base_logdir) == 0: base_logdir = self.get_logdir() # Request #1699: Change to include compatible logging structure self.io.newline() log_dir = self.io.prompt_user("Base Log directory", base_logdir, gvars=self.fbglobalvars) log_dir = os.path.abspath(log_dir) # Get the list of projects self.io.print_msg("Checking %s for projects" % (log_dir)) projects = self._get_projectlist(log_dir) # Give the user the choice to use an existing project or create a new one project = None while project is None: project = self._prompt_for_project(projects) log_dir = os.path.join(log_dir, project, 'z'+target.replace(":", "_")) # To support IPv6 address in log files if not self.io.prompt_yn("Set target log directory to '%s'?" % (log_dir)): log_dir = self.io.prompt_user("Target log directory?", log_dir, gvars=self.fbglobalvars) try: os.makedirs(log_dir) # Fix from 3.2.0 - Don't reinvent the wheel except: if not os.path.exists(log_dir): raise self.set_logdir(log_dir) return (project, log_dir) except OSError: self.io.print_warning("Access Denied to '%s'! Choose a different log directory." %(log_dir)) return (None, None)
def __init__(self, id_, chip): """ cxt: The CM1K offer 127 contexts in the range 1-127. Context 0 is used during training to train all neurons against an input regardless of their contexts. cat: The CM1K offers 32767 categories in the range 1-32767. Category 0 is used during training to present counterexamples (to shrink the neurons' AIFs) If the neuron degenerates, bit 15 of the category is set to 1 (i.e., 32768 will be added to the category). aif: This should be indicated in the same range as dist, below, as determined by norm. dist: If the norm is L1, then distances will be in the range 0-65280 (255 x 256). If the norm is Lsup (i.e., max), then distances will be in the range 0-255. pattern: A byte array which will be compared on a byte-by-byte basis (not bit-by-bit, so not hamming distance). """ log.trace("Neuron.init()") self.id_ = id_ self.chip = chip self.state = NeuronState.idle self.cxt = 0 # Context self.cat = 0 # Category self.aif = 0 # Active influence field self.degenerate = False # True when aif shrinks to minif self.dist = 0 self.pattern = [] # Components (the pattern or rbf "center" stored in this neuron)
def reset_dist(self): """ Reset the distance to 0 """ # Called from CM1KEmulator.reset_indexcomp(), i.e., whenever LCOMP is updated log.trace("Neuron.reset_dist()") self.dist = 0
def broadcast(self, input_, norm, classifier=None, aif_scale=1): """ Used for high level broadcast, in which the input is processed in bulk instead of per-component, i.e., per byte. input_ of len 1-256 (for proper CM1K emulation, otherwise unlimited) norm: A DistNorm enum classifier: A Classifier enum aif_scale: Modify the aif when determining whether the fire. The aif can also be permanently scaled via CM1KEmulator.scale_all_aifs(), but this parameter enables the same behavior without altering the neuron. """ # Called from CM1KEmulator.broadcast() log.trace("Neuron.broadcast()") # This shouldn't be necessary. This function should only be called on committed and the rtl neurons. if self.state == NeuronState.idle: log.error("Neuron.broadcast() called on idle neuron") return self.dist = 0 # NOTE: Not sure this is necessary. Also, undecided whether this should simply call reset_dist(). if norm == cm1k.CM1KDistNorm.l1: for i, comp in enumerate(input_): self.dist += abs(comp - self.pattern[i]) elif norm == cm1k.CM1KDistNorm.lsup: for i, comp in enumerate(input_): self.dist = max(abs(comp - self.pattern[i]), self.dist) elif norm == cm1k.CM1KDistNorm.euc: for i, comp in enumerate(input_): self.dist += (comp - self.pattern[i]) * (comp - self.pattern[i]) self.dist = int(round(math.sqrt(self.dist))) log.log("Single neuron cat{} dist: {:>5} < {:>5} ?".format(self.cat, self.dist, self.aif)) # TODO: Use the minimum and maximum AIFs of each neuron (i.e., of each context) aif = self.aif if aif_scale == 1 else min(max(int(round(self.aif * aif_scale)), 0), 0xFFFF) if (classifier == cm1k.CM1KClassifier.rbf and self.dist < aif) or classifier == cm1k.CM1KClassifier.knn: # The neuron has fired log.log("Fire with dist{} aif{} cat{}".format(self.dist, aif, self.cat)) self.chip.store_firing_neuron(self)
def commit(self, cxt, cat, aif, pattern): """ Commit this neuron to the network. It will already have received a new pattern in the immediately preceding broadcast. """ log.trace("Neuron.commit() cxt{} cat{} aif{}".format(cxt, cat, aif)) self.state = NeuronState.com self.cxt = cxt self.cat = cat self.aif = aif self.dist = 0 # We shouldn't need to assign the pattern. It should already be assigned, but no harm done. self.pattern = pattern
def shrink_if_necessary(self, cat, new_aif, minif): """ Shrink if the AIF if categories don't match and error-compensating AIF < currently held AIF. """ log.trace("Neuron.shrink_if_necessary()") # TODO: create unit test where misfiring neuron has exactly the same distance as the best neuron. if cat != self.cat and new_aif < self.aif: self.shrink_aif(new_aif, minif)
def dump_registers(self): """ Log the register values """ for key, val in self.register_legend.iteritems(): log.log("{:12} {:>2}: {:>10} {:>10}".format( key, val[0], self.registers[val[0]], "0x{:X}".format(self.registers[val[0]]))) # =========================================================================================================
def update_firing_dist_and_cat(self): # Called from update_all_neuron_dists() when LCOMP is updated to seed DIST with best neuron's distance # Called whenever DIST is read log.trace("CM1KEmulator.update_firing_dist_and_cat()") if self.firing_neurons: self.write_dist_non_ui(self.firing_neurons[-1].dist) self.write_cat(self.firing_neurons[-1].cat) if self.firing_neurons[-1].degenerate: self.write_cat_degenerate(True) self.firing_neurons.pop() else: self.write_dist_non_ui(0xFFFF) self.write_cat(0xFFFF)
def update_all_neuron_dists(self, last_comp=False): # Called whenever COMP or LCOMP is updated log.trace("CM1KEmulator.update_all_neuron_dists()") gcr = self.read_gcr_context() comp = self.input_[self.indexcomp] for neuron in self.neurons: if (neuron.state == nrn.NeuronState.com and neuron.cxt == gcr) or neuron.state == nrn.NeuronState.rtl: neuron.update_dist( self.indexcomp, comp, self.read_ncr_norm(), last_comp, self.read_nsr_classifier_enum()) if last_comp: # After writing the last component, sort the firing neurons by distance self.firing_neurons.sort(key=lambda x: x.dist, reverse=True)
def listen(self): self.socket.listen() log.log('Server is listening at %s:%d' % (self.config['addr'], self.config['port'])) while True: clientSocket, _ = self.socket.accept() if self.isTLS: try: clientSocket = self.context.wrap_socket(clientSocket, server_side = True) except: clientSocket.close() continue tunnel.Tunnel(clientSocket).start()
def set_logdir(self, log_dir=None): """Set the current log directory and create a new log file""" if not log_dir: log_dir = os.path.normpath(self.default_logdir) base_dir = self.get_basedir() self.session.set_dirs(base_dir, log_dir) logname = "fuzzbunch-%s.log" % util.formattime() self.io.setlogfile(os.path.join(log_dir, logname))
def main(): try: function = sys.argv[1] except: log.log(log.LOG_ERROR, "No action defined (Valid: dump, import, cleanup)") sys.exit(1) if os.path.isfile(sys.argv[1]): config_file = sys.argv[1] function = "legacy" else: try: config_file = sys.argv[2] except IndexError: log.log(log.LOG_ERROR, "No YAML provided") sys.exit(1) try: config_file = open(config_file, 'r') config = yaml.load(config_file) config_file.close() except: log.log(log.LOG_ERROR, "Failed to load/parse config") sys.exit(1) if (function == "import"): fm = ForemanImport(config) fm.connect() fm_import(fm) if (function == "dump"): fm = ForemanDump(config) fm.connect() fm_dump(fm) if (function == "cleanup"): fm = ForemanCleanup(config) fm_cleanup(fm) if (function == "legacy"): fm_cls = ForemanCleanup(config) fm_cls.connect() fm_cleanup(fm_cls) fm_imp = ForemanImport(config) fm_imp.connect() fm_import(fm_imp)
def process_config_provisioningtpl(self): log.log(log.LOG_INFO, "Processing Provisioning Templates") # fm.provisioning_templates.show(name) does not work as expected, we need to iterate over fm.provisioning_templates.index() ptlist = self.fm.provisioning_templates.index(per_page=99999)['results'] for pt in self.get_config_section('provisioning-template'): try: self.validator.provt(pt) except MultipleInvalid as e: log.log(log.LOG_WARN, "Cannot create Provisioning Template '{0}': YAML validation Error: {1}".format(pt['name'], e)) continue pt_id = False for ptc in ptlist: if (ptc['name'] == pt['name']): pt_id = ptc['id'] log.log(log.LOG_DEBUG, "Provisioning Template '{0}' (id={1}) already present.".format(pt['name'], pt_id)) if not pt_id: log.log(log.LOG_INFO, "Create Provisioning Template '{0}'".format(pt['name'])) pt_tpl = { 'name': pt['name'], 'template': pt['template'], 'snippet': pt['snippet'], 'audit_comment': pt['audit-comment'], 'template_kind_id': pt['template-kind-id'], 'locked': pt['locked'] } os_ids = [] for osc in pt['os']: try: os_id = self.fm.operatingsystems.show(osc['name'])['id'] os_ids.append(os_id) except: log.log(log.LOG_WARN, "Cannot link OS '{0}' to Provisioning Template '{1}'".format(osc['name'],pt['name'])) pt_tpl = { 'name': pt['name'], 'template': pt['template'], 'snippet': pt['snippet'], 'audit_comment': pt['audit-comment'], 'template_kind_id': pt['template-kind-id'], 'locked': pt['locked'], 'operatingsystem_ids': os_ids } prtes = self.fm.provisioning_templates.create(provisioning_template=pt_tpl)
def process_template_combination_attribute(self): ptlist = self.fm.provisioning_templates.index(per_page=99999)['results'] envlist = self.fm.environments.index(per_page=99999)['results'] for pt in self.get_config_section('provisioning-template'): msg = "" pt_id = False for ptc in ptlist: if (ptc['name'] == pt['name']): pt_id = ptc['id'] if not pt_id: log.log(log.LOG_WARN, "Cannot resolve Provisioning template '{0}' ".format(pt['name']) ) continue if 'template_combination_attribute' not in pt or pt['template-combination-attribute'] is None: continue else: linklist = pt['template-combination-attribute'] for item in linklist: env_id = False hg_id = False for envc in envlist: try: if (item['enviroment'] == envc['name']): env_id = envc['id'] except KeyError: env_id = False try: hg_id = self.fm.hostgroups.show(item['hostgroup'])['id'] except: hg_id = False if hg_id is not False or env_id is not False: pt_api_arr = { "template_combinations_attributes": [ {} ] } if hg_id is not False: pt_api_arr["template_combinations_attributes"][0]["hostgroup_id"] = hg_id if env_id is not False: pt_api_arr["template_combinations_attributes"][0]["environment_id"] = env_id try: self.fm.provisioning_templates.update(pt_api_arr, pt_id) except ForemanException as e: msg = self.get_api_error_msg(e) log.log(log.LOG_WARN, "Cannot link provisioning template '{0}' api says: '{1}'".format(pt['name'], msg) ) continue else: log.log(log.LOG_WARN, "Cannot link provisioning template '{0}', at least hostgroup needs to be valid".format(pt['name'], msg) )
def get_details_letssingit(song_name): ''' Gets the song details if song details not found through spotify ''' song_name = improvename.songname(song_name) url = "http://search.letssingit.com/cgi-exe/am.cgi?a=search&artist_id=&l=archive&s=" + \ quote(song_name.encode('utf-8')) html = urlopen(url).read() soup = BeautifulSoup(html, "html.parser") link = soup.find('a', {'class': 'high_profile'}) try: link = link.get('href') link = urlopen(link).read() soup = BeautifulSoup(link, "html.parser") album_div = soup.find('div', {'id': 'albums'}) title_div = soup.find('div', {'id': 'content_artist'}).find('h1') try: lyrics = soup.find('div', {'id': 'lyrics'}).text lyrics = lyrics[3:] except AttributeError: lyrics = "" log.log_error("* Couldn't find lyrics", indented=True) try: song_title = title_div.contents[0] song_title = song_title[1:-8] except AttributeError: log.log_error("* Couldn't reset song title", indented=True) song_title = song_name try: artist = title_div.contents[1].getText() except AttributeError: log.log_error("* Couldn't find artist name", indented=True) artist = "Unknown" try: album = album_div.find('a').contents[0] album = album[:-7] except AttributeError: log.log_error("* Couldn't find the album name", indented=True) album = artist except AttributeError: log.log_error("* Couldn't find song details", indented=True) album = song_name song_title = song_name artist = "Unknown" lyrics = "" match_bool, score = matching_details(song_name, song_title, artist) return artist, album, song_title, lyrics, match_bool, score
def fix_music(file_name): ''' Searches for '.mp3' files in directory (optionally recursive) and checks whether they already contain album art and album name tags or not. ''' setup() if not Py3: file_name = file_name.encode('utf-8') tags = File(file_name) log.log(file_name) log.log('> Adding metadata') try: artist, album, song_name, lyrics, match_bool, score = get_details_spotify( file_name) # Try finding details through spotify except Exception: artist, album, song_name, lyrics, match_bool, score = get_details_letssingit( file_name) # Use bad scraping method as last resort try: log.log_indented('* Trying to extract album art from Google.com') albumart = albumsearch.img_search_google(artist+' '+album) except Exception: log.log_indented('* Trying to extract album art from Bing.com') albumart = albumsearch.img_search_bing(artist+' '+album) if match_bool: add_albumart(albumart, file_name) add_details(file_name, song_name, artist, album, lyrics) try: rename(file_name, artist+' - '+song_name+'.mp3') except Exception: log.log_error("Couldn't rename file") pass else: log.log_error( "* Couldn't find appropriate details of your song", indented=True) log.log("Match score: %s/10.0" % round(score * 10, 1)) log.log(LOG_LINE_SEPERATOR) log.log_success()
def __init__(self, configfile, base_dir, log_dir, stdin=None, stdout=None, stderr=None): """@brief Initialize the Fuzzbunch object @param configfile The main Fuzzbunch configuration file (an XML file) @param base_dir @param log_dir Location for Fuzzbunch log files @param stdin @param stdout @param stderr """ # Initialize the command interpreter, which creates a CmdCtx self.configvars = {} # Stores global config info (not setg globals) self.readconfig(configfile) # Read in variables set for Fuzzbunch # Fix bug #2910 - Color breaks in some terminals that don't support ansi encoding. Added # option to disable color enablecolor = eval(self.configvars['globals']['Color']) FbCmd.__init__(self, stdin=stdin, stdout=stdout, stderr=stderr, enablecolor=enablecolor ) # Set the info function to Fuzzbunch's print_info function self.defaultcontext.print_info = self.print_info self.preconfig() self.fbglobalvars = util.iDict() # Our Fuzzbunch global variables self.pluginmanagers = util.iDict() # A list of PluginManagers, each # of which contains a list of Plugins. # Create our Session manager, which has a list of the plugins we've run self.session = session.Session(self.name) self.session.set_dirs(base_dir, log_dir) # Set the logdir from the Fuzzbunch.xml file, which will be overridden # later when retarget is executed self.default_logdir = os.path.normpath(log_dir) self.set_logdir(log_dir) # Create a Redirection object to keep track of the status of redirection, and to # perform transforms on the parameters prior to and after executing plugins self.redirection = redirection.RedirectionManager(self.io) self.fontdir = os.path.join(base_dir, "fonts") self.storage = os.path.join(base_dir, "storage") self.setbanner() self.postconfig() self.pwnies = False self.conv_tools = util.iDict([('MultiLine', self.toolpaste_ep), ('MD5', self.toolpaste_md5), ('SHA1', self.toolpaste_sha1), ('Base64', self.toolpaste_base64), ]) self.log = log(self.name, self.version, dict(debugging=True, enabled=True, verbose=False))