From e48b09cd7dc6cbcfa9fd44971417874a05535b7a Mon Sep 17 00:00:00 2001 From: Bogdan Popescu <68062990+bopopescu@users.noreply.github.com> Date: Fri, 24 Jul 2020 09:24:04 +0300 Subject: [PATCH] Removing the master-slave lingo --- .../contrib/extended_snapshot_attributes.py | 8 +- cinder/cinder/api/openstack/wsgi.py | 2 +- cinder/cinder/api/xmlutil.py | 122 +++++----- cinder/cinder/compute/aggregate_states.py | 2 +- .../cinder/openstack/common/gettextutils.py | 4 +- cinder/cinder/tests/api/test_xmlutil.py | 174 +++++++-------- cinder/cinder/tests/test_storwize_svc.py | 12 +- cinder/doc/source/conf.py | 4 +- cliff/doc/source/conf.py | 4 +- django_openstack_auth/doc/source/conf.py | 4 +- glance/doc/source/conf.py | 4 +- glance/glance/cmd/replicator.py | 124 +++++------ .../common/db/sqlalchemy/migration.py | 2 +- .../glance/openstack/common/gettextutils.py | 4 +- .../tests/unit/test_glance_replicator.py | 16 +- heat/doc/source/conf.py | 4 +- .../common/db/sqlalchemy/migration.py | 2 +- .../openstack/common/db/sqlalchemy/session.py | 30 +-- heat/heat/openstack/common/gettextutils.py | 4 +- horizon/doc/source/conf.py | 4 +- keystone/doc/source/conf.py | 4 +- .../common/db/sqlalchemy/migration.py | 2 +- .../keystone/openstack/common/gettextutils.py | 4 +- keystone/keystone/tests/core.py | 2 +- .../keystone/tests/test_keystoneclient.py | 6 +- neutron/doc/source/conf.py | 4 +- .../openstack/common/db/sqlalchemy/session.py | 30 +-- .../neutron/openstack/common/gettextutils.py | 4 +- .../plugins/bigswitch/servermanager.py | 4 +- .../agent/linuxbridge_neutron_agent.py | 2 +- .../neutron/tests/unit/test_linux_ip_lib.py | 4 +- nova/doc/source/conf.py | 4 +- .../openstack/compute/contrib/extended_ips.py | 8 +- .../compute/contrib/extended_ips_mac.py | 8 +- .../contrib/extended_server_attributes.py | 8 +- .../compute/contrib/extended_status.py | 8 +- .../extended_virtual_interfaces_net.py | 4 +- .../compute/contrib/extended_volumes.py | 8 +- .../compute/contrib/flavor_access.py | 12 +- .../openstack/compute/contrib/image_size.py | 8 +- .../openstack/compute/contrib/server_usage.py | 8 +- nova/nova/api/openstack/compute/servers.py | 16 +- nova/nova/api/openstack/wsgi.py | 2 +- nova/nova/api/openstack/xmlutil.py | 120 +++++----- nova/nova/compute/manager.py | 54 ++--- nova/nova/compute/rpcapi.py | 10 +- nova/nova/conductor/api.py | 4 +- nova/nova/conductor/manager.py | 8 +- nova/nova/conductor/rpcapi.py | 6 +- nova/nova/console/xvp.py | 2 +- nova/nova/db/api.py | 26 +-- nova/nova/db/sqlalchemy/api.py | 98 ++++----- nova/nova/db/sqlalchemy/utils.py | 2 +- nova/nova/network/ldapdns.py | 8 +- nova/nova/network/linux_net.py | 2 +- nova/nova/network/manager.py | 4 +- nova/nova/network/neutronv2/api.py | 8 +- nova/nova/objects/block_device.py | 6 +- nova/nova/objects/instance.py | 22 +- nova/nova/objects/migration.py | 6 +- nova/nova/objects/virtual_interface.py | 4 +- .../common/db/sqlalchemy/migration.py | 2 +- nova/nova/openstack/common/gettextutils.py | 4 +- nova/nova/tests/api/ec2/test_cloud.py | 4 +- .../compute/contrib/test_disk_config.py | 2 +- .../compute/contrib/test_instance_actions.py | 6 +- .../compute/contrib/test_security_groups.py | 10 +- .../compute/contrib/test_server_start_stop.py | 2 +- .../openstack/compute/contrib/test_shelve.py | 2 +- .../openstack/compute/contrib/test_volumes.py | 2 +- .../plugins/v3/test_instance_actions.py | 6 +- .../compute/plugins/v3/test_server_actions.py | 6 +- .../plugins/v3/test_server_metadata.py | 6 +- .../compute/plugins/v3/test_servers.py | 20 +- .../compute/plugins/v3/test_shelve.py | 2 +- .../openstack/compute/test_server_actions.py | 6 +- .../openstack/compute/test_server_metadata.py | 6 +- .../api/openstack/compute/test_servers.py | 14 +- nova/nova/tests/api/openstack/fakes.py | 8 +- nova/nova/tests/api/openstack/test_xmlutil.py | 208 +++++++++--------- nova/nova/tests/compute/test_compute.py | 48 ++-- nova/nova/tests/compute/test_compute_api.py | 6 +- nova/nova/tests/compute/test_compute_mgr.py | 14 +- nova/nova/tests/compute/test_compute_utils.py | 2 +- nova/nova/tests/compute/test_compute_xen.py | 2 +- nova/nova/tests/compute/test_rpcapi.py | 8 +- nova/nova/tests/conductor/test_conductor.py | 16 +- nova/nova/tests/db/test_db_api.py | 4 +- .../nova/tests/integrated/test_api_samples.py | 2 +- .../integrated/v3/test_extended_volumes.py | 2 +- nova/nova/tests/network/test_linux_net.py | 2 +- nova/nova/tests/network/test_manager.py | 14 +- nova/nova/tests/objects/test_instance.py | 56 ++--- nova/nova/tests/objects/test_migration.py | 6 +- nova/nova/tests/test_metadata.py | 2 +- nova/nova/tests/virt/libvirt/test_libvirt.py | 4 +- nova/nova/tests/virt/xenapi/test_xenapi.py | 88 ++++---- nova/nova/virt/xenapi/client/session.py | 12 +- nova/nova/virt/xenapi/fake.py | 4 +- nova/nova/virt/xenapi/pool.py | 86 ++++---- nova/nova/virt/xenapi/pool_states.py | 2 +- nova/tools/db/schema_diff.py | 6 +- oslo.config/doc/source/conf.py | 4 +- oslo.messaging/doc/source/conf.py | 4 +- .../tests/drivers/test_matchmaker_redis.py | 4 +- oslo.rootwrap/doc/source/conf.py | 4 +- oslo.vmware/doc/source/conf.py | 4 +- pbr/doc/source/conf.py | 4 +- pbr/pbr/packaging.py | 2 +- pycadf/doc/source/conf.py | 4 +- .../openstack/common/gettextutils.py | 4 +- python-cinderclient/doc/source/conf.py | 4 +- python-glanceclient/doc/source/conf.py | 4 +- .../openstack/common/gettextutils.py | 4 +- python-heatclient/doc/source/conf.py | 4 +- .../openstack/common/gettextutils.py | 4 +- python-keystoneclient/doc/source/conf.py | 4 +- .../openstack/common/gettextutils.py | 4 +- python-neutronclient/doc/source/conf.py | 4 +- python-novaclient/doc/source/conf.py | 4 +- python-openstackclient/doc/source/conf.py | 4 +- .../tests/identity/v3/fakes.py | 2 +- python-swiftclient/doc/source/conf.py | 4 +- stevedore/doc/source/conf.py | 4 +- taskflow/doc/source/conf.py | 4 +- taskflow/taskflow/test.py | 16 +- .../tests/unit/worker_based/test_engine.py | 4 +- .../tests/unit/worker_based/test_executor.py | 34 +-- .../tests/unit/worker_based/test_proxy.py | 46 ++-- .../tests/unit/worker_based/test_server.py | 82 +++---- .../tests/unit/worker_based/test_worker.py | 48 ++-- taskflow/tools/schema_generator.py | 2 +- tempest/doc/source/conf.py | 4 +- .../compute/admin/test_fixed_ips_negative.py | 2 +- .../data_processing/test_cluster_templates.py | 2 +- .../tempest/openstack/common/gettextutils.py | 4 +- 136 files changed, 1057 insertions(+), 1057 deletions(-) diff --git a/cinder/cinder/api/contrib/extended_snapshot_attributes.py b/cinder/cinder/api/contrib/extended_snapshot_attributes.py index 56cc243..0ea17f9 100644 --- a/cinder/cinder/api/contrib/extended_snapshot_attributes.py +++ b/cinder/cinder/api/contrib/extended_snapshot_attributes.py @@ -49,7 +49,7 @@ def _extend_snapshot(self, req, resp_snap): def show(self, req, resp_obj, id): context = req.environ['cinder.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedSnapshotAttributeTemplate()) snapshot = resp_obj.obj['snapshot'] self._extend_snapshot(req, snapshot) @@ -58,7 +58,7 @@ def show(self, req, resp_obj, id): def detail(self, req, resp_obj): context = req.environ['cinder.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedSnapshotAttributesTemplate()) for snapshot in list(resp_obj.obj['snapshots']): self._extend_snapshot(req, snapshot) @@ -93,7 +93,7 @@ def construct(self): make_snapshot(root) alias = Extended_snapshot_attributes.alias namespace = Extended_snapshot_attributes.namespace - return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace}) class ExtendedSnapshotAttributesTemplate(xmlutil.TemplateBuilder): @@ -104,4 +104,4 @@ def construct(self): make_snapshot(elem) alias = Extended_snapshot_attributes.alias namespace = Extended_snapshot_attributes.namespace - return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace}) diff --git a/cinder/cinder/api/openstack/wsgi.py b/cinder/cinder/api/openstack/wsgi.py index c93a088..f08b2ae 100644 --- a/cinder/cinder/api/openstack/wsgi.py +++ b/cinder/cinder/api/openstack/wsgi.py @@ -577,7 +577,7 @@ def preserialize(self, content_type, default_serializers=None): self.serializer = serializer() def attach(self, **kwargs): - """Attach slave templates to serializers.""" + """Attach subordinate templates to serializers.""" if self.media_type in kwargs: self.serializer.attach(kwargs[self.media_type]) diff --git a/cinder/cinder/api/xmlutil.py b/cinder/cinder/api/xmlutil.py index cf96968..eb0155b 100644 --- a/cinder/cinder/api/xmlutil.py +++ b/cinder/cinder/api/xmlutil.py @@ -690,13 +690,13 @@ def wrap(self): # We are a template return self - def apply(self, master): - """Hook method for determining slave applicability. + def apply(self, main): + """Hook method for determining subordinate applicability. An overridable hook method used to determine if this template - is applicable as a slave to a given master template. + is applicable as a subordinate to a given main template. - :param master: The master template to test. + :param main: The main template to test. """ return True @@ -711,17 +711,17 @@ def tree(self): return "%r: %s" % (self, self.root.tree()) -class MasterTemplate(Template): - """Represent a master template. +class MainTemplate(Template): + """Represent a main template. - Master templates are versioned derivatives of templates that - additionally allow slave templates to be attached. Slave + Main templates are versioned derivatives of templates that + additionally allow subordinate templates to be attached. Subordinate templates allow modification of the serialized result without - directly changing the master. + directly changing the main. """ def __init__(self, root, version, nsmap=None): - """Initialize a master template. + """Initialize a main template. :param root: The root element of the template. :param version: The version number of the template. @@ -730,9 +730,9 @@ def __init__(self, root, version, nsmap=None): template. """ - super(MasterTemplate, self).__init__(root, nsmap) + super(MainTemplate, self).__init__(root, nsmap) self.version = version - self.slaves = [] + self.subordinates = [] def __repr__(self): """Return string representation of the template.""" @@ -746,89 +746,89 @@ def _siblings(self): An overridable hook method to return the siblings of the root element. This is the root element plus the root elements of - all the slave templates. + all the subordinate templates. """ - return [self.root] + [slave.root for slave in self.slaves] + return [self.root] + [subordinate.root for subordinate in self.subordinates] def _nsmap(self): """Hook method for computing the namespace dictionary. An overridable hook method to return the namespace dictionary. - The namespace dictionary is computed by taking the master + The namespace dictionary is computed by taking the main template's namespace dictionary and updating it from all the - slave templates. + subordinate templates. """ nsmap = self.nsmap.copy() - for slave in self.slaves: - nsmap.update(slave._nsmap()) + for subordinate in self.subordinates: + nsmap.update(subordinate._nsmap()) return nsmap - def attach(self, *slaves): - """Attach one or more slave templates. + def attach(self, *subordinates): + """Attach one or more subordinate templates. - Attaches one or more slave templates to the master template. - Slave templates must have a root element with the same tag as - the master template. The slave template's apply() method will - be called to determine if the slave should be applied to this - master; if it returns False, that slave will be skipped. - (This allows filtering of slaves based on the version of the - master template.) + Attaches one or more subordinate templates to the main template. + Subordinate templates must have a root element with the same tag as + the main template. The subordinate template's apply() method will + be called to determine if the subordinate should be applied to this + main; if it returns False, that subordinate will be skipped. + (This allows filtering of subordinates based on the version of the + main template.) """ - slave_list = [] - for slave in slaves: - slave = slave.wrap() + subordinate_list = [] + for subordinate in subordinates: + subordinate = subordinate.wrap() # Make sure we have a tree match - if slave.root.tag != self.root.tag: - msg = (_("Template tree mismatch; adding slave %(slavetag)s " - "to master %(mastertag)s") % - {'slavetag': slave.root.tag, - 'mastertag': self.root.tag}) + if subordinate.root.tag != self.root.tag: + msg = (_("Template tree mismatch; adding subordinate %(subordinatetag)s " + "to main %(maintag)s") % + {'subordinatetag': subordinate.root.tag, + 'maintag': self.root.tag}) raise ValueError(msg) - # Make sure slave applies to this template - if not slave.apply(self): + # Make sure subordinate applies to this template + if not subordinate.apply(self): continue - slave_list.append(slave) + subordinate_list.append(subordinate) - # Add the slaves - self.slaves.extend(slave_list) + # Add the subordinates + self.subordinates.extend(subordinate_list) def copy(self): - """Return a copy of this master template.""" + """Return a copy of this main template.""" - # Return a copy of the MasterTemplate + # Return a copy of the MainTemplate tmp = self.__class__(self.root, self.version, self.nsmap) - tmp.slaves = self.slaves[:] + tmp.subordinates = self.subordinates[:] return tmp -class SlaveTemplate(Template): - """Represent a slave template. +class SubordinateTemplate(Template): + """Represent a subordinate template. - Slave templates are versioned derivatives of templates. Each - slave has a minimum version and optional maximum version of the - master template to which they can be attached. + Subordinate templates are versioned derivatives of templates. Each + subordinate has a minimum version and optional maximum version of the + main template to which they can be attached. """ def __init__(self, root, min_vers, max_vers=None, nsmap=None): - """Initialize a slave template. + """Initialize a subordinate template. :param root: The root element of the template. - :param min_vers: The minimum permissible version of the master - template for this slave template to apply. - :param max_vers: An optional upper bound for the master + :param min_vers: The minimum permissible version of the main + template for this subordinate template to apply. + :param max_vers: An optional upper bound for the main template version. :param nsmap: An optional namespace dictionary to be associated with the root element of the template. """ - super(SlaveTemplate, self).__init__(root, nsmap) + super(SubordinateTemplate, self).__init__(root, nsmap) self.min_vers = min_vers self.max_vers = max_vers @@ -839,23 +839,23 @@ def __repr__(self): (self.__class__.__module__, self.__class__.__name__, self.min_vers, self.max_vers, id(self))) - def apply(self, master): - """Hook method for determining slave applicability. + def apply(self, main): + """Hook method for determining subordinate applicability. An overridable hook method used to determine if this template - is applicable as a slave to a given master template. This - version requires the master template to have a version number + is applicable as a subordinate to a given main template. This + version requires the main template to have a version number between min_vers and max_vers. - :param master: The master template to test. + :param main: The main template to test. """ - # Does the master meet our minimum version requirement? - if master.version < self.min_vers: + # Does the main meet our minimum version requirement? + if main.version < self.min_vers: return False # How about our maximum version requirement? - if self.max_vers is not None and master.version > self.max_vers: + if self.max_vers is not None and main.version > self.max_vers: return False return True diff --git a/cinder/cinder/compute/aggregate_states.py b/cinder/cinder/compute/aggregate_states.py index 149f403..242b7c1 100644 --- a/cinder/cinder/compute/aggregate_states.py +++ b/cinder/cinder/compute/aggregate_states.py @@ -25,7 +25,7 @@ A 'created' aggregate becomes 'changing' during the first request of adding a host. During a 'changing' status no other requests will be accepted; this is to allow the hypervisor layer to instantiate the underlying pool -without any potential race condition that may incur in master/slave-based +without any potential race condition that may incur in main/subordinate-based configurations. The aggregate goes into the 'active' state when the underlying pool has been correctly instantiated. All other operations (e.g. add/remove hosts) that succeed will keep the diff --git a/cinder/cinder/openstack/common/gettextutils.py b/cinder/cinder/openstack/common/gettextutils.py index 4957e37..8f78d4a 100644 --- a/cinder/cinder/openstack/common/gettextutils.py +++ b/cinder/cinder/openstack/common/gettextutils.py @@ -300,9 +300,9 @@ def get_available_languages(domain): # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was - # renamed to locale_identifiers() in >=1.0, the requirements master list + # renamed to locale_identifiers() in >=1.0, the requirements main list # requires >=0.9.6, uncapped, so defensively work with both. We can remove - # this check when the master list updates to >=1.0, and update all projects + # this check when the main list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() diff --git a/cinder/cinder/tests/api/test_xmlutil.py b/cinder/cinder/tests/api/test_xmlutil.py index 4c111e7..24836d5 100644 --- a/cinder/cinder/tests/api/test_xmlutil.py +++ b/cinder/cinder/tests/api/test_xmlutil.py @@ -364,15 +364,15 @@ def test__render(self): attr2=xmlutil.ConstantSelector(2), attr3=xmlutil.ConstantSelector(3)) - # Create a master template element - master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1']) + # Create a main template element + main_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1']) - # Create a couple of slave template element - slave_elems = [xmlutil.TemplateElement('test', attr2=attrs['attr2']), + # Create a couple of subordinate template element + subordinate_elems = [xmlutil.TemplateElement('test', attr2=attrs['attr2']), xmlutil.TemplateElement('test', attr3=attrs['attr3']), ] # Try the render - elem = master_elem._render(None, None, slave_elems, None) + elem = main_elem._render(None, None, subordinate_elems, None) # Verify the particulars of the render self.assertEqual(elem.tag, 'test') @@ -384,7 +384,7 @@ def test__render(self): parent = etree.Element('parent') # Try the render again... - elem = master_elem._render(parent, None, slave_elems, dict(a='foo')) + elem = main_elem._render(parent, None, subordinate_elems, dict(a='foo')) # Verify the particulars of the render self.assertEqual(len(parent), 1) @@ -483,46 +483,46 @@ def test__nsmap(self): self.assertEqual(len(nsmap), 1) self.assertEqual(nsmap['a'], 'foo') - def test_master_attach(self): - # Set up a master template + def test_main_attach(self): + # Set up a main template elem = xmlutil.TemplateElement('test') - tmpl = xmlutil.MasterTemplate(elem, 1) + tmpl = xmlutil.MainTemplate(elem, 1) - # Make sure it has a root but no slaves + # Make sure it has a root but no subordinates self.assertEqual(tmpl.root, elem) - self.assertEqual(len(tmpl.slaves), 0) + self.assertEqual(len(tmpl.subordinates), 0) - # Try to attach an invalid slave + # Try to attach an invalid subordinate bad_elem = xmlutil.TemplateElement('test2') self.assertRaises(ValueError, tmpl.attach, bad_elem) - self.assertEqual(len(tmpl.slaves), 0) + self.assertEqual(len(tmpl.subordinates), 0) - # Try to attach an invalid and a valid slave + # Try to attach an invalid and a valid subordinate good_elem = xmlutil.TemplateElement('test') self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem) - self.assertEqual(len(tmpl.slaves), 0) + self.assertEqual(len(tmpl.subordinates), 0) # Try to attach an inapplicable template class InapplicableTemplate(xmlutil.Template): - def apply(self, master): + def apply(self, main): return False inapp_tmpl = InapplicableTemplate(good_elem) tmpl.attach(inapp_tmpl) - self.assertEqual(len(tmpl.slaves), 0) + self.assertEqual(len(tmpl.subordinates), 0) # Now try attaching an applicable template tmpl.attach(good_elem) - self.assertEqual(len(tmpl.slaves), 1) - self.assertEqual(tmpl.slaves[0].root, good_elem) + self.assertEqual(len(tmpl.subordinates), 1) + self.assertEqual(tmpl.subordinates[0].root, good_elem) - def test_master_copy(self): - # Construct a master template + def test_main_copy(self): + # Construct a main template elem = xmlutil.TemplateElement('test') - tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo')) + tmpl = xmlutil.MainTemplate(elem, 1, nsmap=dict(a='foo')) - # Give it a slave - slave = xmlutil.TemplateElement('test') - tmpl.attach(slave) + # Give it a subordinate + subordinate = xmlutil.TemplateElement('test') + tmpl.attach(subordinate) # Construct a copy copy = tmpl.copy() @@ -532,42 +532,42 @@ def test_master_copy(self): self.assertEqual(tmpl.root, copy.root) self.assertEqual(tmpl.version, copy.version) self.assertEqual(id(tmpl.nsmap), id(copy.nsmap)) - self.assertNotEqual(id(tmpl.slaves), id(copy.slaves)) - self.assertEqual(len(tmpl.slaves), len(copy.slaves)) - self.assertEqual(tmpl.slaves[0], copy.slaves[0]) + self.assertNotEqual(id(tmpl.subordinates), id(copy.subordinates)) + self.assertEqual(len(tmpl.subordinates), len(copy.subordinates)) + self.assertEqual(tmpl.subordinates[0], copy.subordinates[0]) - def test_slave_apply(self): - # Construct a master template + def test_subordinate_apply(self): + # Construct a main template elem = xmlutil.TemplateElement('test') - master = xmlutil.MasterTemplate(elem, 3) + main = xmlutil.MainTemplate(elem, 3) - # Construct a slave template with applicable minimum version - slave = xmlutil.SlaveTemplate(elem, 2) - self.assertEqual(slave.apply(master), True) + # Construct a subordinate template with applicable minimum version + subordinate = xmlutil.SubordinateTemplate(elem, 2) + self.assertEqual(subordinate.apply(main), True) - # Construct a slave template with equal minimum version - slave = xmlutil.SlaveTemplate(elem, 3) - self.assertEqual(slave.apply(master), True) + # Construct a subordinate template with equal minimum version + subordinate = xmlutil.SubordinateTemplate(elem, 3) + self.assertEqual(subordinate.apply(main), True) - # Construct a slave template with inapplicable minimum version - slave = xmlutil.SlaveTemplate(elem, 4) - self.assertEqual(slave.apply(master), False) + # Construct a subordinate template with inapplicable minimum version + subordinate = xmlutil.SubordinateTemplate(elem, 4) + self.assertEqual(subordinate.apply(main), False) - # Construct a slave template with applicable version range - slave = xmlutil.SlaveTemplate(elem, 2, 4) - self.assertEqual(slave.apply(master), True) + # Construct a subordinate template with applicable version range + subordinate = xmlutil.SubordinateTemplate(elem, 2, 4) + self.assertEqual(subordinate.apply(main), True) - # Construct a slave template with low version range - slave = xmlutil.SlaveTemplate(elem, 1, 2) - self.assertEqual(slave.apply(master), False) + # Construct a subordinate template with low version range + subordinate = xmlutil.SubordinateTemplate(elem, 1, 2) + self.assertEqual(subordinate.apply(main), False) - # Construct a slave template with high version range - slave = xmlutil.SlaveTemplate(elem, 4, 5) - self.assertEqual(slave.apply(master), False) + # Construct a subordinate template with high version range + subordinate = xmlutil.SubordinateTemplate(elem, 4, 5) + self.assertEqual(subordinate.apply(main), False) - # Construct a slave template with matching version range - slave = xmlutil.SlaveTemplate(elem, 3, 3) - self.assertEqual(slave.apply(master), True) + # Construct a subordinate template with matching version range + subordinate = xmlutil.SubordinateTemplate(elem, 3, 3) + self.assertEqual(subordinate.apply(main), True) def test__serialize(self): # Our test object to serialize @@ -579,7 +579,7 @@ def test__serialize(self): 'd': 4, }, 'image': {'name': 'image_foobar', 'id': 42, }, }, } - # Set up our master template + # Set up our main template root = xmlutil.TemplateElement('test', selector='test', name='name') value = xmlutil.SubTemplateElement(root, 'value', selector='values') @@ -587,22 +587,22 @@ def test__serialize(self): attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs') xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items, key=0, value=1) - master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo')) + main = xmlutil.MainTemplate(root, 1, nsmap=dict(f='foo')) - # Set up our slave template - root_slave = xmlutil.TemplateElement('test', selector='test') - image = xmlutil.SubTemplateElement(root_slave, 'image', + # Set up our subordinate template + root_subordinate = xmlutil.TemplateElement('test', selector='test') + image = xmlutil.SubTemplateElement(root_subordinate, 'image', selector='image', id='id') image.text = xmlutil.Selector('name') - slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar')) + subordinate = xmlutil.SubordinateTemplate(root_subordinate, 1, nsmap=dict(b='bar')) - # Attach the slave to the master... - master.attach(slave) + # Attach the subordinate to the main... + main.attach(subordinate) # Try serializing our object - siblings = master._siblings() - nsmap = master._nsmap() - result = master._serialize(None, obj, siblings, nsmap) + siblings = main._siblings() + nsmap = main._nsmap() + result = main._serialize(None, obj, siblings, nsmap) # Now we get to manually walk the element tree... self.assertEqual(result.tag, 'test') @@ -632,7 +632,7 @@ def test_serialize_with_delimiter(self): 'scope0:scope1:scope2:key3': 'Value3' }} - # Set up our master template + # Set up our main template root = xmlutil.TemplateElement('test', selector='test') key1 = xmlutil.SubTemplateElement(root, 'scope0:key1', selector='scope0:key1') @@ -643,7 +643,7 @@ def test_serialize_with_delimiter(self): key3 = xmlutil.SubTemplateElement(root, 'scope0:scope1:scope2:key3', selector='scope0:scope1:scope2:key3') key3.text = xmlutil.Selector() - serializer = xmlutil.MasterTemplate(root, 1) + serializer = xmlutil.MainTemplate(root, 1) xml_list = [] xml_list.append("") xml_list.append("Value1") @@ -655,60 +655,60 @@ def test_serialize_with_delimiter(self): self.assertEqual(result, expected_xml) -class MasterTemplateBuilder(xmlutil.TemplateBuilder): +class MainTemplateBuilder(xmlutil.TemplateBuilder): def construct(self): elem = xmlutil.TemplateElement('test') - return xmlutil.MasterTemplate(elem, 1) + return xmlutil.MainTemplate(elem, 1) -class SlaveTemplateBuilder(xmlutil.TemplateBuilder): +class SubordinateTemplateBuilder(xmlutil.TemplateBuilder): def construct(self): elem = xmlutil.TemplateElement('test') - return xmlutil.SlaveTemplate(elem, 1) + return xmlutil.SubordinateTemplate(elem, 1) class TemplateBuilderTest(test.TestCase): - def test_master_template_builder(self): + def test_main_template_builder(self): # Make sure the template hasn't been built yet - self.assertIsNone(MasterTemplateBuilder._tmpl) + self.assertIsNone(MainTemplateBuilder._tmpl) # Now, construct the template - tmpl1 = MasterTemplateBuilder() + tmpl1 = MainTemplateBuilder() # Make sure that there is a template cached... - self.assertIsNotNone(MasterTemplateBuilder._tmpl) + self.assertIsNotNone(MainTemplateBuilder._tmpl) # Make sure it wasn't what was returned... - self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1) + self.assertNotEqual(MainTemplateBuilder._tmpl, tmpl1) # Make sure it doesn't get rebuilt - cached = MasterTemplateBuilder._tmpl - tmpl2 = MasterTemplateBuilder() - self.assertEqual(MasterTemplateBuilder._tmpl, cached) + cached = MainTemplateBuilder._tmpl + tmpl2 = MainTemplateBuilder() + self.assertEqual(MainTemplateBuilder._tmpl, cached) # Make sure we're always getting fresh copies self.assertNotEqual(tmpl1, tmpl2) # Make sure we can override the copying behavior - tmpl3 = MasterTemplateBuilder(False) - self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3) + tmpl3 = MainTemplateBuilder(False) + self.assertEqual(MainTemplateBuilder._tmpl, tmpl3) - def test_slave_template_builder(self): + def test_subordinate_template_builder(self): # Make sure the template hasn't been built yet - self.assertIsNone(SlaveTemplateBuilder._tmpl) + self.assertIsNone(SubordinateTemplateBuilder._tmpl) # Now, construct the template - tmpl1 = SlaveTemplateBuilder() + tmpl1 = SubordinateTemplateBuilder() # Make sure there is a template cached... - self.assertIsNotNone(SlaveTemplateBuilder._tmpl) + self.assertIsNotNone(SubordinateTemplateBuilder._tmpl) # Make sure it was what was returned... - self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1) + self.assertEqual(SubordinateTemplateBuilder._tmpl, tmpl1) # Make sure it doesn't get rebuilt - tmpl2 = SlaveTemplateBuilder() - self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1) + tmpl2 = SubordinateTemplateBuilder() + self.assertEqual(SubordinateTemplateBuilder._tmpl, tmpl1) # Make sure we're always getting the cached copy self.assertEqual(tmpl1, tmpl2) @@ -719,6 +719,6 @@ def test_make_flat_dict(self): expected_xml = ("\n" 'foobar') root = xmlutil.make_flat_dict('wrapper') - tmpl = xmlutil.MasterTemplate(root, 1) + tmpl = xmlutil.MainTemplate(root, 1) result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar'))) self.assertEqual(result, expected_xml) diff --git a/cinder/cinder/tests/test_storwize_svc.py b/cinder/cinder/tests/test_storwize_svc.py index 95a2ffe..fd61818 100644 --- a/cinder/cinder/tests/test_storwize_svc.py +++ b/cinder/cinder/tests/test_storwize_svc.py @@ -2135,29 +2135,29 @@ def test_storwize_svc_multi_host_maps(self): def test_storwize_svc_delete_volume_snapshots(self): # Create a volume with two snapshots - master = self._create_volume() + main = self._create_volume() # Fail creating a snapshot - will force delete the snapshot if self.USESIM and False: - snap = self._generate_vol_info(master['name'], master['id']) + snap = self._generate_vol_info(main['name'], main['id']) self.sim.error_injection('startfcmap', 'bad_id') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snap) self._assert_vol_exists(snap['name'], False) # Delete a snapshot - snap = self._generate_vol_info(master['name'], master['id']) + snap = self._generate_vol_info(main['name'], main['id']) self.driver.create_snapshot(snap) self._assert_vol_exists(snap['name'], True) self.driver.delete_snapshot(snap) self._assert_vol_exists(snap['name'], False) # Delete a volume with snapshots (regular) - snap = self._generate_vol_info(master['name'], master['id']) + snap = self._generate_vol_info(main['name'], main['id']) self.driver.create_snapshot(snap) self._assert_vol_exists(snap['name'], True) - self.driver.delete_volume(master) - self._assert_vol_exists(master['name'], False) + self.driver.delete_volume(main) + self._assert_vol_exists(main['name'], False) # Fail create volume from snapshot - will force delete the volume if self.USESIM: diff --git a/cinder/doc/source/conf.py b/cinder/doc/source/conf.py index 42a4340..afa7e3d 100644 --- a/cinder/doc/source/conf.py +++ b/cinder/doc/source/conf.py @@ -62,8 +62,8 @@ # The encoding of source files. #source_encoding = 'utf-8' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'cinder' diff --git a/cliff/doc/source/conf.py b/cliff/doc/source/conf.py index ff997ae..e039ea0 100644 --- a/cliff/doc/source/conf.py +++ b/cliff/doc/source/conf.py @@ -39,8 +39,8 @@ # The encoding of source files. source_encoding = 'utf-8-sig' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'cliff' diff --git a/django_openstack_auth/doc/source/conf.py b/django_openstack_auth/doc/source/conf.py index 694f137..b1ccc40 100644 --- a/django_openstack_auth/doc/source/conf.py +++ b/django_openstack_auth/doc/source/conf.py @@ -38,8 +38,8 @@ # The encoding of source files. #source_encoding = 'utf-8-sig' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'Django OpenStack Auth' diff --git a/glance/doc/source/conf.py b/glance/doc/source/conf.py index 2253323..43ac951 100644 --- a/glance/doc/source/conf.py +++ b/glance/doc/source/conf.py @@ -59,8 +59,8 @@ # The encoding of source files. #source_encoding = 'utf-8' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'Glance' diff --git a/glance/glance/cmd/replicator.py b/glance/glance/cmd/replicator.py index 4ea3771..dcc13e0 100755 --- a/glance/glance/cmd/replicator.py +++ b/glance/glance/cmd/replicator.py @@ -44,7 +44,7 @@ help Output help for one of the commands below - compare What is missing from the slave glance? + compare What is missing from the subordinate glance? dump Dump the contents of a glance instance to local disk. livecopy Load the contents of one glance instance into another. load Load the contents of a local directory into glance. @@ -53,10 +53,10 @@ IMAGE_ALREADY_PRESENT_MESSAGE = _('The image %s is already present on ' - 'the slave, but our check for it did ' + 'the subordinate, but our check for it did ' 'not find it. This indicates that we ' 'do not have permissions to see all ' - 'the images on the slave server.') + 'the images on the subordinate server.') SERVER_PORT_REGEX = '\w+:\w+' @@ -293,7 +293,7 @@ def replication_size(options, args): imageservice = get_image_service() client = imageservice(httplib.HTTPConnection(server, port), - options.slavetoken) + options.subordinatetoken) for image in client.get_images(): logging.debug(_('Considering image: %(image)s') % {'image': image}) if image['status'] == 'active': @@ -328,7 +328,7 @@ def replication_dump(options, args): imageservice = get_image_service() client = imageservice(httplib.HTTPConnection(server, port), - options.mastertoken) + options.maintoken) for image in client.get_images(): logging.info(_('Considering: %s') % image['id']) @@ -363,19 +363,19 @@ def _dict_diff(a, b): Returns: True if the dictionaries are different """ - # Only things the master has which the slave lacks matter + # Only things the main has which the subordinate lacks matter if set(a.keys()) - set(b.keys()): - logging.debug(_('metadata diff -- master has extra keys: %(keys)s') + logging.debug(_('metadata diff -- main has extra keys: %(keys)s') % {'keys': ' '.join(set(a.keys()) - set(b.keys()))}) return True for key in a: if str(a[key]) != str(b[key]): logging.debug(_('metadata diff -- value differs for key ' - '%(key)s: master "%(master_value)s" vs ' - 'slave "%(slave_value)s"') % - {'key': key, 'master_value': a[key], - 'slave_value': b[key]}) + '%(key)s: main "%(main_value)s" vs ' + 'subordinate "%(subordinate_value)s"') % + {'key': key, 'main_value': a[key], + 'subordinate_value': b[key]}) return True return False @@ -413,7 +413,7 @@ def replication_load(options, args): imageservice = get_image_service() client = imageservice(httplib.HTTPConnection(server, port), - options.slavetoken) + options.subordinatetoken) updated = [] @@ -441,7 +441,7 @@ def replication_load(options, args): headers = client.get_image_meta(image_uuid) for key in options.dontreplicate.split(' '): if key in headers: - logging.debug(_('Stripping %(header)s from slave ' + logging.debug(_('Stripping %(header)s from subordinate ' 'metadata'), {'header': key}) del headers[key] @@ -474,69 +474,69 @@ def replication_livecopy(options, args): Load the contents of one glance instance into another. - fromserver:port: the location of the master glance instance. - toserver:port: the location of the slave glance instance. + fromserver:port: the location of the main glance instance. + toserver:port: the location of the subordinate glance instance. """ # Make sure from-server and to-server are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) - slave_server_port = args.pop() - master_server_port = args.pop() + subordinate_server_port = args.pop() + main_server_port = args.pop() - if not re.match(SERVER_PORT_REGEX, slave_server_port) or \ - not re.match(SERVER_PORT_REGEX, master_server_port): + if not re.match(SERVER_PORT_REGEX, subordinate_server_port) or \ + not re.match(SERVER_PORT_REGEX, main_server_port): raise ValueError(_("Bad format of the given arguments.")) imageservice = get_image_service() - slave_server, slave_port = slave_server_port.split(':') - slave_conn = httplib.HTTPConnection(slave_server, slave_port) - slave_client = imageservice(slave_conn, options.slavetoken) + subordinate_server, subordinate_port = subordinate_server_port.split(':') + subordinate_conn = httplib.HTTPConnection(subordinate_server, subordinate_port) + subordinate_client = imageservice(subordinate_conn, options.subordinatetoken) - master_server, master_port = master_server_port.split(':') - master_conn = httplib.HTTPConnection(master_server, master_port) - master_client = imageservice(master_conn, options.mastertoken) + main_server, main_port = main_server_port.split(':') + main_conn = httplib.HTTPConnection(main_server, main_port) + main_client = imageservice(main_conn, options.maintoken) updated = [] - for image in master_client.get_images(): + for image in main_client.get_images(): logging.info(_('Considering %(id)s') % {'id': image['id']}) for key in options.dontreplicate.split(' '): if key in image: - logging.debug(_('Stripping %(header)s from master metadata'), + logging.debug(_('Stripping %(header)s from main metadata'), {'header': key}) del image[key] - if _image_present(slave_client, image['id']): + if _image_present(subordinate_client, image['id']): # NOTE(mikal): Perhaps we just need to update the metadata? # Note that we don't attempt to change an image file once it # has been uploaded. - headers = slave_client.get_image_meta(image['id']) + headers = subordinate_client.get_image_meta(image['id']) if headers['status'] == 'active': for key in options.dontreplicate.split(' '): if key in image: - logging.debug(_('Stripping %(header)s from master ' + logging.debug(_('Stripping %(header)s from main ' 'metadata'), {'header': key}) del image[key] if key in headers: - logging.debug(_('Stripping %(header)s from slave ' + logging.debug(_('Stripping %(header)s from subordinate ' 'metadata'), {'header': key}) del headers[key] if _dict_diff(image, headers): logging.info(_('... metadata has changed')) - headers, body = slave_client.add_image_meta(image) + headers, body = subordinate_client.add_image_meta(image) _check_upload_response_headers(headers, body) updated.append(image['id']) elif image['status'] == 'active': logging.info(_('%s is being synced') % image['id']) if not options.metaonly: - image_response = master_client.get_image(image['id']) + image_response = main_client.get_image(image['id']) try: - headers, body = slave_client.add_image(image, + headers, body = subordinate_client.add_image(image, image_response) _check_upload_response_headers(headers, body) updated.append(image['id']) @@ -551,55 +551,55 @@ def replication_compare(options, args): Compare the contents of fromserver with those of toserver. - fromserver:port: the location of the master glance instance. - toserver:port: the location of the slave glance instance. + fromserver:port: the location of the main glance instance. + toserver:port: the location of the subordinate glance instance. """ # Make sure from-server and to-server are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) - slave_server_port = args.pop() - master_server_port = args.pop() + subordinate_server_port = args.pop() + main_server_port = args.pop() - if not re.match(SERVER_PORT_REGEX, slave_server_port) or \ - not re.match(SERVER_PORT_REGEX, master_server_port): + if not re.match(SERVER_PORT_REGEX, subordinate_server_port) or \ + not re.match(SERVER_PORT_REGEX, main_server_port): raise ValueError(_("Bad format of the given arguments.")) imageservice = get_image_service() - slave_server, slave_port = slave_server_port.split(':') - slave_conn = httplib.HTTPConnection(slave_server, slave_port) - slave_client = imageservice(slave_conn, options.slavetoken) + subordinate_server, subordinate_port = subordinate_server_port.split(':') + subordinate_conn = httplib.HTTPConnection(subordinate_server, subordinate_port) + subordinate_client = imageservice(subordinate_conn, options.subordinatetoken) - master_server, master_port = master_server_port.split(':') - master_conn = httplib.HTTPConnection(master_server, master_port) - master_client = imageservice(master_conn, options.mastertoken) + main_server, main_port = main_server_port.split(':') + main_conn = httplib.HTTPConnection(main_server, main_port) + main_client = imageservice(main_conn, options.maintoken) differences = {} - for image in master_client.get_images(): - if _image_present(slave_client, image['id']): - headers = slave_client.get_image_meta(image['id']) + for image in main_client.get_images(): + if _image_present(subordinate_client, image['id']): + headers = subordinate_client.get_image_meta(image['id']) for key in options.dontreplicate.split(' '): if key in image: - logging.debug(_('Stripping %(header)s from master ' + logging.debug(_('Stripping %(header)s from main ' 'metadata'), {'header': key}) del image[key] if key in headers: - logging.debug(_('Stripping %(header)s from slave ' + logging.debug(_('Stripping %(header)s from subordinate ' 'metadata'), {'header': key}) del headers[key] for key in image: if image[key] != headers.get(key, None): logging.info(_('%(image_id)s: field %(key)s differs ' - '(source is %(master_value)s, destination ' - 'is %(slave_value)s)') + '(source is %(main_value)s, destination ' + 'is %(subordinate_value)s)') % {'image_id': image['id'], 'key': key, - 'master_value': image[key], - 'slave_value': headers.get(key, + 'main_value': image[key], + 'subordinate_value': headers.get(key, 'undefined')}) differences[image['id']] = 'diff' else: @@ -754,13 +754,13 @@ def main(): oparser.add_option('-t', '--token', action="store", default='', help=("Pass in your authentication token if you have " "one. If you use this option the same token is " - "used for both the master and the slave.")) - oparser.add_option('-M', '--mastertoken', action="store", default='', + "used for both the main and the subordinate.")) + oparser.add_option('-M', '--maintoken', action="store", default='', help=("Pass in your authentication token if you have " - "one. This is the token used for the master.")) - oparser.add_option('-S', '--slavetoken', action="store", default='', + "one. This is the token used for the main.")) + oparser.add_option('-S', '--subordinatetoken', action="store", default='', help=("Pass in your authentication token if you have " - "one. This is the token used for the slave.")) + "one. This is the token used for the subordinate.")) oparser.add_option('-v', '--verbose', action="store_true", default=False, help="Print more verbose output.") @@ -787,8 +787,8 @@ def main(): sys.excepthook = logging_excepthook if options.token: - options.slavetoken = options.token - options.mastertoken = options.token + options.subordinatetoken = options.token + options.maintoken = options.token handler.setFormatter(formatter) root_logger.addHandler(handler) diff --git a/glance/glance/openstack/common/db/sqlalchemy/migration.py b/glance/glance/openstack/common/db/sqlalchemy/migration.py index 182cc28..eb05f95 100644 --- a/glance/glance/openstack/common/db/sqlalchemy/migration.py +++ b/glance/glance/openstack/common/db/sqlalchemy/migration.py @@ -64,7 +64,7 @@ def _get_unique_constraints(self, table): data = table.metadata.bind.execute( """SELECT sql - FROM sqlite_master + FROM sqlite_main WHERE type='table' AND name=:table_name""", diff --git a/glance/glance/openstack/common/gettextutils.py b/glance/glance/openstack/common/gettextutils.py index 6f39d28..3b36734 100644 --- a/glance/glance/openstack/common/gettextutils.py +++ b/glance/glance/openstack/common/gettextutils.py @@ -326,9 +326,9 @@ def get_available_languages(domain): # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was - # renamed to locale_identifiers() in >=1.0, the requirements master list + # renamed to locale_identifiers() in >=1.0, the requirements main list # requires >=0.9.6, uncapped, so defensively work with both. We can remove - # this check when the master list updates to >=1.0, and update all projects + # this check when the main list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() diff --git a/glance/glance/tests/unit/test_glance_replicator.py b/glance/glance/tests/unit/test_glance_replicator.py index d7dde3b..42947c1 100644 --- a/glance/glance/tests/unit/test_glance_replicator.py +++ b/glance/glance/tests/unit/test_glance_replicator.py @@ -276,7 +276,7 @@ def __init__(self, http_conn, authtoken): self.authtoken = authtoken def get_images(self): - if self.authtoken == 'livemastertoken': + if self.authtoken == 'livemaintoken': return FAKEIMAGES_LIVEMASTER return FAKEIMAGES @@ -335,7 +335,7 @@ def check_bad_args(command, args): class ReplicationCommandsTestCase(test_utils.BaseTestCase): def test_replication_size(self): options = UserDict.UserDict() - options.slavetoken = 'slavetoken' + options.subordinatetoken = 'subordinatetoken' args = ['localhost:9292'] stdout = sys.stdout @@ -368,7 +368,7 @@ def test_replication_dump(self): options = UserDict.UserDict() options.chunksize = 4096 - options.mastertoken = 'mastertoken' + options.maintoken = 'maintoken' options.metaonly = False args = ['localhost:9292', tempdir] @@ -455,7 +455,7 @@ def write_image(img, data): # Finally, we're ready to test options = UserDict.UserDict() options.dontreplicate = 'dontrepl dontreplabsent' - options.slavetoken = 'slavetoken' + options.subordinatetoken = 'subordinatetoken' args = ['localhost:9292', tempdir] orig_img_service = glance_replicator.get_image_service @@ -484,8 +484,8 @@ def test_replication_livecopy(self): options = UserDict.UserDict() options.chunksize = 4096 options.dontreplicate = 'dontrepl dontreplabsent' - options.mastertoken = 'livemastertoken' - options.slavetoken = 'liveslavetoken' + options.maintoken = 'livemaintoken' + options.subordinatetoken = 'livesubordinatetoken' options.metaonly = False args = ['localhost:9292', 'localhost:9393'] @@ -512,8 +512,8 @@ def test_replication_compare(self): options = UserDict.UserDict() options.chunksize = 4096 options.dontreplicate = 'dontrepl dontreplabsent' - options.mastertoken = 'livemastertoken' - options.slavetoken = 'liveslavetoken' + options.maintoken = 'livemaintoken' + options.subordinatetoken = 'livesubordinatetoken' options.metaonly = False args = ['localhost:9292', 'localhost:9393'] diff --git a/heat/doc/source/conf.py b/heat/doc/source/conf.py index 7b436c8..f6476f1 100644 --- a/heat/doc/source/conf.py +++ b/heat/doc/source/conf.py @@ -222,8 +222,8 @@ def find_autodoc_modules(module_name, sourcedir): # The encoding of source files. #source_encoding = 'utf-8-sig' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'Heat' diff --git a/heat/heat/openstack/common/db/sqlalchemy/migration.py b/heat/heat/openstack/common/db/sqlalchemy/migration.py index c0297ea..b3ea78d 100644 --- a/heat/heat/openstack/common/db/sqlalchemy/migration.py +++ b/heat/heat/openstack/common/db/sqlalchemy/migration.py @@ -96,7 +96,7 @@ def _get_unique_constraints(self, table): data = table.metadata.bind.execute( """SELECT sql - FROM sqlite_master + FROM sqlite_main WHERE type='table' AND name=:table_name""", diff --git a/heat/heat/openstack/common/db/sqlalchemy/session.py b/heat/heat/openstack/common/db/sqlalchemy/session.py index 83a00a3..d948607 100644 --- a/heat/heat/openstack/common/db/sqlalchemy/session.py +++ b/heat/heat/openstack/common/db/sqlalchemy/session.py @@ -311,11 +311,11 @@ def soft_delete_multi_models(): group='DATABASE'), cfg.DeprecatedOpt('connection', group='sql'), ]), - cfg.StrOpt('slave_connection', + cfg.StrOpt('subordinate_connection', default='', secret=True, help='The SQLAlchemy connection string used to connect to the ' - 'slave database'), + 'subordinate database'), cfg.IntOpt('idle_timeout', default=3600, deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout', @@ -442,21 +442,21 @@ def connect(self, dbapi_con, con_record): def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False, - slave_session=False, mysql_traditional_mode=False): + subordinate_session=False, mysql_traditional_mode=False): """Return a SQLAlchemy session.""" global _MAKER global _SLAVE_MAKER maker = _MAKER - if slave_session: + if subordinate_session: maker = _SLAVE_MAKER if maker is None: - engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session, + engine = get_engine(sqlite_fk=sqlite_fk, subordinate_engine=subordinate_session, mysql_traditional_mode=mysql_traditional_mode) maker = get_maker(engine, autocommit, expire_on_commit) - if slave_session: + if subordinate_session: _SLAVE_MAKER = maker else: _MAKER = maker @@ -598,7 +598,7 @@ def _wrap(*args, **kwargs): return _wrap -def get_engine(sqlite_fk=False, slave_engine=False, +def get_engine(sqlite_fk=False, subordinate_engine=False, mysql_traditional_mode=False): """Return a SQLAlchemy engine.""" global _ENGINE @@ -606,14 +606,14 @@ def get_engine(sqlite_fk=False, slave_engine=False, engine = _ENGINE db_uri = CONF.database.connection - if slave_engine: + if subordinate_engine: engine = _SLAVE_ENGINE - db_uri = CONF.database.slave_connection + db_uri = CONF.database.subordinate_connection if engine is None: engine = create_engine(db_uri, sqlite_fk=sqlite_fk, mysql_traditional_mode=mysql_traditional_mode) - if slave_engine: + if subordinate_engine: _SLAVE_ENGINE = engine else: _ENGINE = engine @@ -696,7 +696,7 @@ def create_engine(sql_connection, sqlite_fk=False, mysql_traditional_mode=False): """Return a new SQLAlchemy engine.""" # NOTE(geekinutah): At this point we could be connecting to the normal - # db handle or the slave db handle. Things like + # db handle or the subordinate db handle. Things like # _wrap_db_error aren't going to work well if their # backends don't match. Let's check. _assert_matching_drivers() @@ -856,12 +856,12 @@ def _do_query(self, q): def _assert_matching_drivers(): - """Make sure slave handle and normal handle have the same driver.""" + """Make sure subordinate handle and normal handle have the same driver.""" # NOTE(geekinutah): There's no use case for writing to one backend and # reading from another. Who knows what the future holds? - if CONF.database.slave_connection == '': + if CONF.database.subordinate_connection == '': return normal = sqlalchemy.engine.url.make_url(CONF.database.connection) - slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection) - assert normal.drivername == slave.drivername + subordinate = sqlalchemy.engine.url.make_url(CONF.database.subordinate_connection) + assert normal.drivername == subordinate.drivername diff --git a/heat/heat/openstack/common/gettextutils.py b/heat/heat/openstack/common/gettextutils.py index 20fe89b..6d895ec 100644 --- a/heat/heat/openstack/common/gettextutils.py +++ b/heat/heat/openstack/common/gettextutils.py @@ -292,9 +292,9 @@ def get_available_languages(domain): # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was - # renamed to locale_identifiers() in >=1.0, the requirements master list + # renamed to locale_identifiers() in >=1.0, the requirements main list # requires >=0.9.6, uncapped, so defensively work with both. We can remove - # this check when the master list updates to >=1.0, and update all projects + # this check when the main list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() diff --git a/horizon/doc/source/conf.py b/horizon/doc/source/conf.py index 2e00cc0..648c3c7 100644 --- a/horizon/doc/source/conf.py +++ b/horizon/doc/source/conf.py @@ -156,8 +156,8 @@ def find_autodoc_modules(module_name, sourcedir): # The encoding of source files. #source_encoding = 'utf-8-sig' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'Horizon' diff --git a/keystone/doc/source/conf.py b/keystone/doc/source/conf.py index ab53410..8b1926b 100644 --- a/keystone/doc/source/conf.py +++ b/keystone/doc/source/conf.py @@ -71,8 +71,8 @@ # The encoding of source files. #source_encoding = 'utf-8-sig' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'keystone' diff --git a/keystone/keystone/openstack/common/db/sqlalchemy/migration.py b/keystone/keystone/openstack/common/db/sqlalchemy/migration.py index fd09f19..e0cfe5c 100644 --- a/keystone/keystone/openstack/common/db/sqlalchemy/migration.py +++ b/keystone/keystone/openstack/common/db/sqlalchemy/migration.py @@ -64,7 +64,7 @@ def _get_unique_constraints(self, table): data = table.metadata.bind.execute( """SELECT sql - FROM sqlite_master + FROM sqlite_main WHERE type='table' AND name=:table_name""", diff --git a/keystone/keystone/openstack/common/gettextutils.py b/keystone/keystone/openstack/common/gettextutils.py index 3ab62d0..e161fb1 100644 --- a/keystone/keystone/openstack/common/gettextutils.py +++ b/keystone/keystone/openstack/common/gettextutils.py @@ -326,9 +326,9 @@ def get_available_languages(domain): # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was - # renamed to locale_identifiers() in >=1.0, the requirements master list + # renamed to locale_identifiers() in >=1.0, the requirements main list # requires >=0.9.6, uncapped, so defensively work with both. We can remove - # this check when the master list updates to >=1.0, and update all projects + # this check when the main list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() diff --git a/keystone/keystone/tests/core.py b/keystone/keystone/tests/core.py index c5f9f38..79baa00 100644 --- a/keystone/keystone/tests/core.py +++ b/keystone/keystone/tests/core.py @@ -151,7 +151,7 @@ def checkout_vendor(repo, rev): common_utils.git('clone', repo, revdir) os.chdir(revdir) - common_utils.git('checkout', '-q', 'master') + common_utils.git('checkout', '-q', 'main') common_utils.git('pull', '-q') common_utils.git('checkout', '-q', rev) diff --git a/keystone/keystone/tests/test_keystoneclient.py b/keystone/keystone/tests/test_keystoneclient.py index aaa5d9f..139dcc4 100644 --- a/keystone/keystone/tests/test_keystoneclient.py +++ b/keystone/keystone/tests/test_keystoneclient.py @@ -1072,11 +1072,11 @@ def start_fake_response(self, status, headers): client.tenants.list() -class KcMasterTestCase(CompatTestCase, KeystoneClientTests): - checkout_info = (KEYSTONECLIENT_REPO, 'master') +class KcMainTestCase(CompatTestCase, KeystoneClientTests): + checkout_info = (KEYSTONECLIENT_REPO, 'main') -class KcOptTestCase(KcMasterTestCase): +class KcOptTestCase(KcMainTestCase): # Set KSCTEST_PATH to the keystoneclient directory, then run this test. # # For example, to test your local keystoneclient, diff --git a/neutron/doc/source/conf.py b/neutron/doc/source/conf.py index 51801f5..b377da0 100644 --- a/neutron/doc/source/conf.py +++ b/neutron/doc/source/conf.py @@ -64,8 +64,8 @@ # The encoding of source files. #source_encoding = 'utf-8' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'Neutron' diff --git a/neutron/neutron/openstack/common/db/sqlalchemy/session.py b/neutron/neutron/openstack/common/db/sqlalchemy/session.py index 9e095f0..7289031 100644 --- a/neutron/neutron/openstack/common/db/sqlalchemy/session.py +++ b/neutron/neutron/openstack/common/db/sqlalchemy/session.py @@ -332,11 +332,11 @@ def soft_delete_multi_models(): group='DATABASE'), cfg.DeprecatedOpt('connection', group='sql'), ]), - cfg.StrOpt('slave_connection', + cfg.StrOpt('subordinate_connection', default='', secret=True, help='The SQLAlchemy connection string used to connect to the ' - 'slave database'), + 'subordinate database'), cfg.IntOpt('idle_timeout', default=3600, deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout', @@ -463,21 +463,21 @@ def connect(self, dbapi_con, con_record): def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False, - slave_session=False, mysql_traditional_mode=False): + subordinate_session=False, mysql_traditional_mode=False): """Return a SQLAlchemy session.""" global _MAKER global _SLAVE_MAKER maker = _MAKER - if slave_session: + if subordinate_session: maker = _SLAVE_MAKER if maker is None: - engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session, + engine = get_engine(sqlite_fk=sqlite_fk, subordinate_engine=subordinate_session, mysql_traditional_mode=mysql_traditional_mode) maker = get_maker(engine, autocommit, expire_on_commit) - if slave_session: + if subordinate_session: _SLAVE_MAKER = maker else: _MAKER = maker @@ -620,7 +620,7 @@ def _wrap(*args, **kwargs): return _wrap -def get_engine(sqlite_fk=False, slave_engine=False, +def get_engine(sqlite_fk=False, subordinate_engine=False, mysql_traditional_mode=False): """Return a SQLAlchemy engine.""" global _ENGINE @@ -628,14 +628,14 @@ def get_engine(sqlite_fk=False, slave_engine=False, engine = _ENGINE db_uri = CONF.database.connection - if slave_engine: + if subordinate_engine: engine = _SLAVE_ENGINE - db_uri = CONF.database.slave_connection + db_uri = CONF.database.subordinate_connection if engine is None: engine = create_engine(db_uri, sqlite_fk=sqlite_fk, mysql_traditional_mode=mysql_traditional_mode) - if slave_engine: + if subordinate_engine: _SLAVE_ENGINE = engine else: _ENGINE = engine @@ -730,7 +730,7 @@ def create_engine(sql_connection, sqlite_fk=False, mysql_traditional_mode=False): """Return a new SQLAlchemy engine.""" # NOTE(geekinutah): At this point we could be connecting to the normal - # db handle or the slave db handle. Things like + # db handle or the subordinate db handle. Things like # _wrap_db_error aren't going to work well if their # backends don't match. Let's check. _assert_matching_drivers() @@ -893,12 +893,12 @@ def _do_query(self, q): def _assert_matching_drivers(): - """Make sure slave handle and normal handle have the same driver.""" + """Make sure subordinate handle and normal handle have the same driver.""" # NOTE(geekinutah): There's no use case for writing to one backend and # reading from another. Who knows what the future holds? - if CONF.database.slave_connection == '': + if CONF.database.subordinate_connection == '': return normal = sqlalchemy.engine.url.make_url(CONF.database.connection) - slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection) - assert normal.drivername == slave.drivername + subordinate = sqlalchemy.engine.url.make_url(CONF.database.subordinate_connection) + assert normal.drivername == subordinate.drivername diff --git a/neutron/neutron/openstack/common/gettextutils.py b/neutron/neutron/openstack/common/gettextutils.py index 1c33bfb..b2f984a 100644 --- a/neutron/neutron/openstack/common/gettextutils.py +++ b/neutron/neutron/openstack/common/gettextutils.py @@ -300,9 +300,9 @@ def get_available_languages(domain): # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was - # renamed to locale_identifiers() in >=1.0, the requirements master list + # renamed to locale_identifiers() in >=1.0, the requirements main list # requires >=0.9.6, uncapped, so defensively work with both. We can remove - # this check when the master list updates to >=1.0, and update all projects + # this check when the main list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() diff --git a/neutron/neutron/plugins/bigswitch/servermanager.py b/neutron/neutron/plugins/bigswitch/servermanager.py index c261836..18f349b 100644 --- a/neutron/neutron/plugins/bigswitch/servermanager.py +++ b/neutron/neutron/plugins/bigswitch/servermanager.py @@ -444,8 +444,8 @@ def rest_call(self, action, resource, data, headers, ignore_codes, timeout=None) # Store the first response as the error to be bubbled up to the # user since it was a good server. Subsequent servers will most - # likely be cluster slaves and won't have a useful error for the - # user (e.g. 302 redirect to master) + # likely be cluster subordinates and won't have a useful error for the + # user (e.g. 302 redirect to main) if not first_response: first_response = ret if not self.server_failure(ret, ignore_codes): diff --git a/neutron/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py b/neutron/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py index 66b4427..29b4a99 100755 --- a/neutron/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py +++ b/neutron/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py @@ -318,7 +318,7 @@ def ensure_bridge(self, bridge_name, interface=None, ips=None, # Check if the interface is part of the bridge if not self.interface_exists_on_bridge(bridge_name, interface): try: - # Check if the interface is not enslaved in another bridge + # Check if the interface is not ensubordinated in another bridge if self.is_device_on_bridge(interface): bridge = self.get_bridge_for_tap_device(interface) utils.execute(['brctl', 'delif', bridge, interface], diff --git a/neutron/neutron/tests/unit/test_linux_ip_lib.py b/neutron/neutron/tests/unit/test_linux_ip_lib.py index 7eb58ad..5748b96 100644 --- a/neutron/neutron/tests/unit/test_linux_ip_lib.py +++ b/neutron/neutron/tests/unit/test_linux_ip_lib.py @@ -50,11 +50,11 @@ 'state UP qlen 1000' '\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0', '9: bar.9@eth0: mtu 1500 qdisc ' - ' noqueue master brq0b24798c-07 state UP mode DEFAULT' + ' noqueue main brq0b24798c-07 state UP mode DEFAULT' '\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0' '\ vlan protocol 802.1q id 9 ', '10: bar@eth0: mtu 1500 qdisc ' - ' noqueue master brq0b24798c-07 state UP mode DEFAULT' + ' noqueue main brq0b24798c-07 state UP mode DEFAULT' '\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0' '\ vlan protocol 802.1Q id 10 ', '11: bar:bar@eth0: mtu 1500 qdisc mq ' diff --git a/nova/doc/source/conf.py b/nova/doc/source/conf.py index 0ae450f..a7a920d 100644 --- a/nova/doc/source/conf.py +++ b/nova/doc/source/conf.py @@ -55,8 +55,8 @@ # The encoding of source files. #source_encoding = 'utf-8' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'nova' diff --git a/nova/nova/api/openstack/compute/contrib/extended_ips.py b/nova/nova/api/openstack/compute/contrib/extended_ips.py index 20356c0..19c80ee 100644 --- a/nova/nova/api/openstack/compute/contrib/extended_ips.py +++ b/nova/nova/api/openstack/compute/contrib/extended_ips.py @@ -47,7 +47,7 @@ def _extend_server(self, context, server, instance): def show(self, req, resp_obj, id): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedIpsServerTemplate()) server = resp_obj.obj['server'] db_instance = req.get_db_instance(server['id']) @@ -59,7 +59,7 @@ def show(self, req, resp_obj, id): def detail(self, req, resp_obj): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedIpsServersTemplate()) servers = list(resp_obj.obj['servers']) for server in servers: @@ -96,7 +96,7 @@ def construct(self): root = xmlutil.TemplateElement('server', selector='server') xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(root) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Extended_ips.alias: Extended_ips.namespace}) @@ -105,5 +105,5 @@ def construct(self): root = xmlutil.TemplateElement('servers') elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(elem) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Extended_ips.alias: Extended_ips.namespace}) diff --git a/nova/nova/api/openstack/compute/contrib/extended_ips_mac.py b/nova/nova/api/openstack/compute/contrib/extended_ips_mac.py index 5975b44..ab86ac7 100644 --- a/nova/nova/api/openstack/compute/contrib/extended_ips_mac.py +++ b/nova/nova/api/openstack/compute/contrib/extended_ips_mac.py @@ -45,7 +45,7 @@ def _extend_server(self, context, server, instance): def show(self, req, resp_obj, id): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedIpsMacServerTemplate()) server = resp_obj.obj['server'] db_instance = req.get_db_instance(server['id']) @@ -57,7 +57,7 @@ def show(self, req, resp_obj, id): def detail(self, req, resp_obj): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedIpsMacServersTemplate()) servers = list(resp_obj.obj['servers']) for server in servers: @@ -93,7 +93,7 @@ class ExtendedIpsMacServerTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('server', selector='server') make_server(root) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Extended_ips_mac.alias: Extended_ips_mac.namespace}) @@ -102,5 +102,5 @@ def construct(self): root = xmlutil.TemplateElement('servers') elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(elem) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Extended_ips_mac.alias: Extended_ips_mac.namespace}) diff --git a/nova/nova/api/openstack/compute/contrib/extended_server_attributes.py b/nova/nova/api/openstack/compute/contrib/extended_server_attributes.py index a8b441f..87c8232 100644 --- a/nova/nova/api/openstack/compute/contrib/extended_server_attributes.py +++ b/nova/nova/api/openstack/compute/contrib/extended_server_attributes.py @@ -39,7 +39,7 @@ def _extend_server(self, context, server, instance): def show(self, req, resp_obj, id): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedServerAttributeTemplate()) server = resp_obj.obj['server'] db_instance = req.get_db_instance(server['id']) @@ -51,7 +51,7 @@ def show(self, req, resp_obj, id): def detail(self, req, resp_obj): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedServerAttributesTemplate()) servers = list(resp_obj.obj['servers']) @@ -92,7 +92,7 @@ def construct(self): make_server(root) alias = Extended_server_attributes.alias namespace = Extended_server_attributes.namespace - return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace}) class ExtendedServerAttributesTemplate(xmlutil.TemplateBuilder): @@ -102,4 +102,4 @@ def construct(self): make_server(elem) alias = Extended_server_attributes.alias namespace = Extended_server_attributes.namespace - return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace}) diff --git a/nova/nova/api/openstack/compute/contrib/extended_status.py b/nova/nova/api/openstack/compute/contrib/extended_status.py index 7508630..65a3b61 100644 --- a/nova/nova/api/openstack/compute/contrib/extended_status.py +++ b/nova/nova/api/openstack/compute/contrib/extended_status.py @@ -36,7 +36,7 @@ def _extend_server(self, server, instance): def show(self, req, resp_obj, id): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedStatusTemplate()) server = resp_obj.obj['server'] db_instance = req.get_db_instance(server['id']) @@ -48,7 +48,7 @@ def show(self, req, resp_obj, id): def detail(self, req, resp_obj): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedStatusesTemplate()) servers = list(resp_obj.obj['servers']) for server in servers: @@ -86,7 +86,7 @@ class ExtendedStatusTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('server', selector='server') make_server(root) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Extended_status.alias: Extended_status.namespace}) @@ -95,5 +95,5 @@ def construct(self): root = xmlutil.TemplateElement('servers') elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(elem) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Extended_status.alias: Extended_status.namespace}) diff --git a/nova/nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py b/nova/nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py index 3c67746..ac93a5e 100644 --- a/nova/nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py +++ b/nova/nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py @@ -33,7 +33,7 @@ def construct(self): elem = xmlutil.SubTemplateElement(root, 'virtual_interface', selector='virtual_interfaces') make_vif(elem) - return xmlutil.SlaveTemplate(root, 1, + return xmlutil.SubordinateTemplate(root, 1, nsmap={Extended_virtual_interfaces_net.alias: Extended_virtual_interfaces_net.namespace}) @@ -48,7 +48,7 @@ def index(self, req, resp_obj, server_id): key = "%s:net_id" % Extended_virtual_interfaces_net.alias context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedVirtualInterfaceNetTemplate()) for vif in resp_obj.obj['virtual_interfaces']: vif1 = self.network_api.get_vif_by_mac_address(context, diff --git a/nova/nova/api/openstack/compute/contrib/extended_volumes.py b/nova/nova/api/openstack/compute/contrib/extended_volumes.py index 930ff12..49b8129 100644 --- a/nova/nova/api/openstack/compute/contrib/extended_volumes.py +++ b/nova/nova/api/openstack/compute/contrib/extended_volumes.py @@ -39,7 +39,7 @@ def _extend_server(self, context, server, instance): def show(self, req, resp_obj, id): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedVolumesServerTemplate()) server = resp_obj.obj['server'] db_instance = req.get_db_instance(server['id']) @@ -51,7 +51,7 @@ def show(self, req, resp_obj, id): def detail(self, req, resp_obj): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedVolumesServersTemplate()) servers = list(resp_obj.obj['servers']) for server in servers: @@ -90,7 +90,7 @@ class ExtendedVolumesServerTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('server', selector='server') make_server(root) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Extended_volumes.alias: Extended_volumes.namespace}) @@ -99,5 +99,5 @@ def construct(self): root = xmlutil.TemplateElement('servers') elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(elem) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Extended_volumes.alias: Extended_volumes.namespace}) diff --git a/nova/nova/api/openstack/compute/contrib/flavor_access.py b/nova/nova/api/openstack/compute/contrib/flavor_access.py index c97e7ca..e5d7094 100644 --- a/nova/nova/api/openstack/compute/contrib/flavor_access.py +++ b/nova/nova/api/openstack/compute/contrib/flavor_access.py @@ -46,7 +46,7 @@ def construct(self): make_flavor(root) alias = Flavor_access.alias namespace = Flavor_access.namespace - return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace}) class FlavorsTemplate(xmlutil.TemplateBuilder): @@ -56,7 +56,7 @@ def construct(self): make_flavor(elem) alias = Flavor_access.alias namespace = Flavor_access.namespace - return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace}) class FlavorAccessTemplate(xmlutil.TemplateBuilder): @@ -65,7 +65,7 @@ def construct(self): elem = xmlutil.SubTemplateElement(root, 'access', selector='flavor_access') make_flavor_access(elem) - return xmlutil.MasterTemplate(root, 1) + return xmlutil.MainTemplate(root, 1) def _marshall_flavor_access(flavor): @@ -127,7 +127,7 @@ def _extend_flavor(self, flavor_rval, flavor_ref): def show(self, req, resp_obj, id): context = req.environ['nova.context'] if soft_authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=FlavorTemplate()) db_flavor = req.get_db_flavor(id) @@ -137,7 +137,7 @@ def show(self, req, resp_obj, id): def detail(self, req, resp_obj): context = req.environ['nova.context'] if soft_authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=FlavorsTemplate()) flavors = list(resp_obj.obj['flavors']) @@ -149,7 +149,7 @@ def detail(self, req, resp_obj): def create(self, req, body, resp_obj): context = req.environ['nova.context'] if soft_authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=FlavorTemplate()) db_flavor = req.get_db_flavor(resp_obj.obj['flavor']['id']) diff --git a/nova/nova/api/openstack/compute/contrib/image_size.py b/nova/nova/api/openstack/compute/contrib/image_size.py index 2199873..f4cd98c 100644 --- a/nova/nova/api/openstack/compute/contrib/image_size.py +++ b/nova/nova/api/openstack/compute/contrib/image_size.py @@ -29,7 +29,7 @@ def construct(self): root = xmlutil.TemplateElement('images') elem = xmlutil.SubTemplateElement(root, 'image', selector='images') make_image(elem) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Image_size.alias: Image_size.namespace}) @@ -37,7 +37,7 @@ class ImageSizeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('image', selector='image') make_image(root) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Image_size.alias: Image_size.namespace}) @@ -51,7 +51,7 @@ def _extend_image(self, image, image_cache): def show(self, req, resp_obj, id): context = req.environ["nova.context"] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ImageSizeTemplate()) image_resp = resp_obj.obj['image'] # image guaranteed to be in the cache due to the core API adding @@ -63,7 +63,7 @@ def show(self, req, resp_obj, id): def detail(self, req, resp_obj): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ImagesSizeTemplate()) images_resp = list(resp_obj.obj['images']) # images guaranteed to be in the cache due to the core API adding diff --git a/nova/nova/api/openstack/compute/contrib/server_usage.py b/nova/nova/api/openstack/compute/contrib/server_usage.py index e57b611..bb2e6bf 100644 --- a/nova/nova/api/openstack/compute/contrib/server_usage.py +++ b/nova/nova/api/openstack/compute/contrib/server_usage.py @@ -41,7 +41,7 @@ def _extend_server(self, server, instance): def show(self, req, resp_obj, id): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ServerUsageTemplate()) server = resp_obj.obj['server'] db_instance = req.get_db_instance(server['id']) @@ -53,7 +53,7 @@ def show(self, req, resp_obj, id): def detail(self, req, resp_obj): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ServerUsagesTemplate()) servers = list(resp_obj.obj['servers']) for server in servers: @@ -89,7 +89,7 @@ class ServerUsageTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('server', selector='server') make_server(root) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Server_usage.alias: Server_usage.namespace}) @@ -98,5 +98,5 @@ def construct(self): root = xmlutil.TemplateElement('servers') elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(elem) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Server_usage.alias: Server_usage.namespace}) diff --git a/nova/nova/api/openstack/compute/servers.py b/nova/nova/api/openstack/compute/servers.py index 7fd1082..49918b9 100644 --- a/nova/nova/api/openstack/compute/servers.py +++ b/nova/nova/api/openstack/compute/servers.py @@ -124,7 +124,7 @@ class ServerTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('server', selector='server') make_server(root, detailed=True) - return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap) + return xmlutil.MainTemplate(root, 1, nsmap=server_nsmap) class MinimalServersTemplate(xmlutil.TemplateBuilder): @@ -133,7 +133,7 @@ def construct(self): elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(elem) xmlutil.make_links(root, 'servers_links') - return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap) + return xmlutil.MainTemplate(root, 1, nsmap=server_nsmap) class ServersTemplate(xmlutil.TemplateBuilder): @@ -141,27 +141,27 @@ def construct(self): root = xmlutil.TemplateElement('servers') elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(elem, detailed=True) - return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap) + return xmlutil.MainTemplate(root, 1, nsmap=server_nsmap) class ServerAdminPassTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('server') root.set('adminPass') - return xmlutil.SlaveTemplate(root, 1, nsmap=server_nsmap) + return xmlutil.SubordinateTemplate(root, 1, nsmap=server_nsmap) class ServerMultipleCreateTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('server') root.set('reservation_id') - return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap) + return xmlutil.MainTemplate(root, 1, nsmap=server_nsmap) def FullServerTemplate(): - master = ServerTemplate() - master.attach(ServerAdminPassTemplate()) - return master + main = ServerTemplate() + main.attach(ServerAdminPassTemplate()) + return main class CommonDeserializer(wsgi.MetadataXMLDeserializer): diff --git a/nova/nova/api/openstack/wsgi.py b/nova/nova/api/openstack/wsgi.py index 06d27e6..134c455 100644 --- a/nova/nova/api/openstack/wsgi.py +++ b/nova/nova/api/openstack/wsgi.py @@ -594,7 +594,7 @@ def preserialize(self, content_type, default_serializers=None): self.serializer = serializer() def attach(self, **kwargs): - """Attach slave templates to serializers.""" + """Attach subordinate templates to serializers.""" if self.media_type in kwargs: self.serializer.attach(kwargs[self.media_type]) diff --git a/nova/nova/api/openstack/xmlutil.py b/nova/nova/api/openstack/xmlutil.py index 0a692ff..ff7d9f7 100644 --- a/nova/nova/api/openstack/xmlutil.py +++ b/nova/nova/api/openstack/xmlutil.py @@ -671,13 +671,13 @@ def wrap(self): # We are a template return self - def apply(self, master): - """Hook method for determining slave applicability. + def apply(self, main): + """Hook method for determining subordinate applicability. An overridable hook method used to determine if this template - is applicable as a slave to a given master template. + is applicable as a subordinate to a given main template. - :param master: The master template to test. + :param main: The main template to test. """ return True @@ -692,17 +692,17 @@ def tree(self): return "%r: %s" % (self, self.root.tree()) -class MasterTemplate(Template): - """Represent a master template. +class MainTemplate(Template): + """Represent a main template. - Master templates are versioned derivatives of templates that - additionally allow slave templates to be attached. Slave + Main templates are versioned derivatives of templates that + additionally allow subordinate templates to be attached. Subordinate templates allow modification of the serialized result without - directly changing the master. + directly changing the main. """ def __init__(self, root, version, nsmap=None): - """Initialize a master template. + """Initialize a main template. :param root: The root element of the template. :param version: The version number of the template. @@ -711,9 +711,9 @@ def __init__(self, root, version, nsmap=None): template. """ - super(MasterTemplate, self).__init__(root, nsmap) + super(MainTemplate, self).__init__(root, nsmap) self.version = version - self.slaves = [] + self.subordinates = [] def __repr__(self): """Return string representation of the template.""" @@ -727,88 +727,88 @@ def _siblings(self): An overridable hook method to return the siblings of the root element. This is the root element plus the root elements of - all the slave templates. + all the subordinate templates. """ - return [self.root] + [slave.root for slave in self.slaves] + return [self.root] + [subordinate.root for subordinate in self.subordinates] def _nsmap(self): """Hook method for computing the namespace dictionary. An overridable hook method to return the namespace dictionary. - The namespace dictionary is computed by taking the master + The namespace dictionary is computed by taking the main template's namespace dictionary and updating it from all the - slave templates. + subordinate templates. """ nsmap = self.nsmap.copy() - for slave in self.slaves: - nsmap.update(slave._nsmap()) + for subordinate in self.subordinates: + nsmap.update(subordinate._nsmap()) return nsmap - def attach(self, *slaves): - """Attach one or more slave templates. + def attach(self, *subordinates): + """Attach one or more subordinate templates. - Attaches one or more slave templates to the master template. - Slave templates must have a root element with the same tag as - the master template. The slave template's apply() method will - be called to determine if the slave should be applied to this - master; if it returns False, that slave will be skipped. - (This allows filtering of slaves based on the version of the - master template.) + Attaches one or more subordinate templates to the main template. + Subordinate templates must have a root element with the same tag as + the main template. The subordinate template's apply() method will + be called to determine if the subordinate should be applied to this + main; if it returns False, that subordinate will be skipped. + (This allows filtering of subordinates based on the version of the + main template.) """ - slave_list = [] - for slave in slaves: - slave = slave.wrap() + subordinate_list = [] + for subordinate in subordinates: + subordinate = subordinate.wrap() # Make sure we have a tree match - if slave.root.tag != self.root.tag: - msg = _("Template tree mismatch; adding slave %(slavetag)s to " - "master %(mastertag)s") % {'slavetag': slave.root.tag, - 'mastertag': self.root.tag} + if subordinate.root.tag != self.root.tag: + msg = _("Template tree mismatch; adding subordinate %(subordinatetag)s to " + "main %(maintag)s") % {'subordinatetag': subordinate.root.tag, + 'maintag': self.root.tag} raise ValueError(msg) - # Make sure slave applies to this template - if not slave.apply(self): + # Make sure subordinate applies to this template + if not subordinate.apply(self): continue - slave_list.append(slave) + subordinate_list.append(subordinate) - # Add the slaves - self.slaves.extend(slave_list) + # Add the subordinates + self.subordinates.extend(subordinate_list) def copy(self): - """Return a copy of this master template.""" + """Return a copy of this main template.""" - # Return a copy of the MasterTemplate + # Return a copy of the MainTemplate tmp = self.__class__(self.root, self.version, self.nsmap) - tmp.slaves = self.slaves[:] + tmp.subordinates = self.subordinates[:] return tmp -class SlaveTemplate(Template): - """Represent a slave template. +class SubordinateTemplate(Template): + """Represent a subordinate template. - Slave templates are versioned derivatives of templates. Each - slave has a minimum version and optional maximum version of the - master template to which they can be attached. + Subordinate templates are versioned derivatives of templates. Each + subordinate has a minimum version and optional maximum version of the + main template to which they can be attached. """ def __init__(self, root, min_vers, max_vers=None, nsmap=None): - """Initialize a slave template. + """Initialize a subordinate template. :param root: The root element of the template. - :param min_vers: The minimum permissible version of the master - template for this slave template to apply. - :param max_vers: An optional upper bound for the master + :param min_vers: The minimum permissible version of the main + template for this subordinate template to apply. + :param max_vers: An optional upper bound for the main template version. :param nsmap: An optional namespace dictionary to be associated with the root element of the template. """ - super(SlaveTemplate, self).__init__(root, nsmap) + super(SubordinateTemplate, self).__init__(root, nsmap) self.min_vers = min_vers self.max_vers = max_vers @@ -819,23 +819,23 @@ def __repr__(self): (self.__class__.__module__, self.__class__.__name__, self.min_vers, self.max_vers, id(self))) - def apply(self, master): - """Hook method for determining slave applicability. + def apply(self, main): + """Hook method for determining subordinate applicability. An overridable hook method used to determine if this template - is applicable as a slave to a given master template. This - version requires the master template to have a version number + is applicable as a subordinate to a given main template. This + version requires the main template to have a version number between min_vers and max_vers. - :param master: The master template to test. + :param main: The main template to test. """ - # Does the master meet our minimum version requirement? - if master.version < self.min_vers: + # Does the main meet our minimum version requirement? + if main.version < self.min_vers: return False # How about our maximum version requirement? - if self.max_vers is not None and master.version > self.max_vers: + if self.max_vers is not None and main.version > self.max_vers: return False return True diff --git a/nova/nova/compute/manager.py b/nova/nova/compute/manager.py index 45e143c..e1eed1a 100644 --- a/nova/nova/compute/manager.py +++ b/nova/nova/compute/manager.py @@ -662,7 +662,7 @@ def _get_instances_on_driver(self, context, filters=None): driver_uuids = self.driver.list_instance_uuids() filters['uuid'] = driver_uuids local_instances = instance_obj.InstanceList.get_by_filters( - context, filters, use_slave=True) + context, filters, use_subordinate=True) return local_instances except NotImplementedError: pass @@ -671,7 +671,7 @@ def _get_instances_on_driver(self, context, filters=None): # to brute force. driver_instances = self.driver.list_instances() instances = instance_obj.InstanceList.get_by_filters(context, filters, - use_slave=True) + use_subordinate=True) name_map = dict((instance.name, instance) for instance in instances) local_instances = [] for driver_instance in driver_instances: @@ -1126,7 +1126,7 @@ def refresh_provider_fw_rules(self, context): """This call passes straight through to the virtualization driver.""" return self.driver.refresh_provider_fw_rules() - def _get_instance_nw_info(self, context, instance, use_slave=False): + def _get_instance_nw_info(self, context, instance, use_subordinate=False): """Get a list of dictionaries of network data of an instance.""" if (not hasattr(instance, 'system_metadata') or len(instance['system_metadata']) == 0): @@ -1137,7 +1137,7 @@ def _get_instance_nw_info(self, context, instance, use_slave=False): # succeed. instance = instance_obj.Instance.get_by_uuid(context, instance['uuid'], - use_slave=use_slave) + use_subordinate=use_subordinate) network_info = self.network_api.get_instance_nw_info(context, instance) @@ -1484,7 +1484,7 @@ def _check_instance_build_time(self, context): 'host': self.host} building_insts = instance_obj.InstanceList.get_by_filters(context, - filters, expected_attrs=[], use_slave=True) + filters, expected_attrs=[], use_subordinate=True) for instance in building_insts: if timeutils.is_older_than(instance['created_at'], timeout): @@ -4850,7 +4850,7 @@ def _heal_instance_info_cache(self, context): # The list of instances to heal is empty so rebuild it LOG.debug(_('Rebuilding the list of instances to heal')) db_instances = instance_obj.InstanceList.get_by_host( - context, self.host, expected_attrs=[], use_slave=True) + context, self.host, expected_attrs=[], use_subordinate=True) for inst in db_instances: # We don't want to refersh the cache for instances # which are building or deleting so don't put them @@ -4880,7 +4880,7 @@ def _heal_instance_info_cache(self, context): inst = instance_obj.Instance.get_by_uuid( context, instance_uuids.pop(0), expected_attrs=['system_metadata', 'info_cache'], - use_slave=True) + use_subordinate=True) except exception.InstanceNotFound: # Instance is gone. Try to grab another. continue @@ -4903,7 +4903,7 @@ def _heal_instance_info_cache(self, context): try: # Call to network API to get instance info.. this will # force an update to the instance's info_cache - self._get_instance_nw_info(context, instance, use_slave=True) + self._get_instance_nw_info(context, instance, use_subordinate=True) LOG.debug(_('Updated the network info_cache for instance'), instance=instance) except Exception: @@ -4919,7 +4919,7 @@ def _poll_rebooting_instances(self, context): filters = {'task_state': task_states.REBOOTING, 'host': self.host} rebooting = instance_obj.InstanceList.get_by_filters( - context, filters, expected_attrs=[], use_slave=True) + context, filters, expected_attrs=[], use_subordinate=True) to_poll = [] for instance in rebooting: @@ -4936,7 +4936,7 @@ def _poll_rescued_instances(self, context): 'host': self.host} rescued_instances = self.conductor_api.instance_get_all_by_filters( context, filters, columns_to_join=["system_metadata"], - use_slave=True) + use_subordinate=True) to_unrescue = [] for instance in rescued_instances: @@ -4955,7 +4955,7 @@ def _poll_unconfirmed_resizes(self, context): mig_list_cls = migration_obj.MigrationList migrations = mig_list_cls.get_unconfirmed_by_dest_compute( context, CONF.resize_confirm_window, self.host, - use_slave=True) + use_subordinate=True) migrations_info = dict(migration_count=len(migrations), confirm_window=CONF.resize_confirm_window) @@ -4983,7 +4983,7 @@ def _set_migration_to_error(migration, reason, **kwargs): try: instance = instance_obj.Instance.get_by_uuid(context, instance_uuid, expected_attrs=expected_attrs, - use_slave=True) + use_subordinate=True) except exception.InstanceNotFound: reason = (_("Instance %s not found") % instance_uuid) @@ -5020,7 +5020,7 @@ def _poll_shelved_instances(self, context): 'host': self.host} shelved_instances = instance_obj.InstanceList.get_by_filters( context, filters=filters, expected_attrs=['system_metadata'], - use_slave=True) + use_subordinate=True) to_gc = [] for instance in shelved_instances: @@ -5115,7 +5115,7 @@ def _poll_bandwidth_usage(self, context): instances = instance_obj.InstanceList.get_by_host(context, self.host, - use_slave=True) + use_subordinate=True) try: bw_counters = self.driver.get_all_bw_counters(instances) except NotImplementedError: @@ -5138,7 +5138,7 @@ def _poll_bandwidth_usage(self, context): last_ctr_in = None last_ctr_out = None # TODO(geekinutah): Once bw_usage_cache object is created - # need to revisit this and slaveify. + # need to revisit this and subordinateify. usage = self.conductor_api.bw_usage_get(context, bw_ctr['uuid'], start_time, @@ -5149,7 +5149,7 @@ def _poll_bandwidth_usage(self, context): last_ctr_in = usage['last_ctr_in'] last_ctr_out = usage['last_ctr_out'] else: - # TODO(geekinutah): Same here, pls slaveify + # TODO(geekinutah): Same here, pls subordinateify usage = self.conductor_api.bw_usage_get( context, bw_ctr['uuid'], prev_time, bw_ctr['mac_address']) @@ -5242,7 +5242,7 @@ def _sync_power_states(self, context): """ db_instances = instance_obj.InstanceList.get_by_host(context, self.host, - use_slave=True) + use_subordinate=True) num_vm_instances = self.driver.get_num_instances() num_db_instances = len(db_instances) @@ -5273,7 +5273,7 @@ def _sync_power_states(self, context): self._sync_instance_power_state(context, db_instance, vm_power_state, - use_slave=True) + use_subordinate=True) except exception.InstanceNotFound: # NOTE(hanlind): If the instance gets deleted during sync, # silently ignore and move on to next instance. @@ -5284,7 +5284,7 @@ def _sync_power_states(self, context): instance=db_instance) def _sync_instance_power_state(self, context, db_instance, vm_power_state, - use_slave=False): + use_subordinate=False): """Align instance power state between the database and hypervisor. If the instance is not found on the hypervisor, but is in the database, @@ -5293,7 +5293,7 @@ def _sync_instance_power_state(self, context, db_instance, vm_power_state, # We re-query the DB to get the latest instance info to minimize # (not eliminate) race condition. - db_instance.refresh(use_slave=use_slave) + db_instance.refresh(use_subordinate=use_subordinate) db_power_state = db_instance.power_state vm_state = db_instance.vm_state @@ -5439,7 +5439,7 @@ def _reclaim_queued_deletes(self, context): instances = instance_obj.InstanceList.get_by_filters( context, filters, expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, - use_slave=True) + use_subordinate=True) for instance in instances: if self._deleted_old_enough(instance, interval): bdms = (block_device_obj.BlockDeviceMappingList. @@ -5522,7 +5522,7 @@ def _cleanup_running_deleted_instances(self, context): for instance in self._running_deleted_instances(context): bdms = (block_device_obj.BlockDeviceMappingList. get_by_instance_uuid(context, instance.uuid, - use_slave=True)) + use_subordinate=True)) if action == "log": LOG.warning(_("Detected instance with name label " @@ -5620,11 +5620,11 @@ def _error_out_instance_on_exception(self, context, instance_uuid, @aggregate_object_compat @wrap_exception() - def add_aggregate_host(self, context, aggregate, host, slave_info): + def add_aggregate_host(self, context, aggregate, host, subordinate_info): """Notify hypervisor of change (for hypervisor pools).""" try: self.driver.add_to_aggregate(context, aggregate, host, - slave_info=slave_info) + subordinate_info=subordinate_info) except NotImplementedError: LOG.debug(_('Hypervisor driver does not support ' 'add_aggregate_host')) @@ -5637,11 +5637,11 @@ def add_aggregate_host(self, context, aggregate, host, slave_info): @aggregate_object_compat @wrap_exception() - def remove_aggregate_host(self, context, host, slave_info, aggregate): + def remove_aggregate_host(self, context, host, subordinate_info, aggregate): """Removes a host from a physical hypervisor pool.""" try: self.driver.remove_from_aggregate(context, aggregate, host, - slave_info=slave_info) + subordinate_info=subordinate_info) except NotImplementedError: LOG.debug(_('Hypervisor driver does not support ' 'remove_aggregate_host')) @@ -5698,7 +5698,7 @@ def _run_image_cache_manager_pass(self, context): 'soft_deleted': True, 'host': nodes} filtered_instances = instance_obj.InstanceList.get_by_filters(context, - filters, expected_attrs=[], use_slave=True) + filters, expected_attrs=[], use_subordinate=True) self.driver.manage_image_cache(context, filtered_instances) diff --git a/nova/nova/compute/rpcapi.py b/nova/nova/compute/rpcapi.py index a1adfbf..9ca3006 100644 --- a/nova/nova/compute/rpcapi.py +++ b/nova/nova/compute/rpcapi.py @@ -141,7 +141,7 @@ class ComputeAPI(object): 2.0 - Remove 1.x backwards compat 2.1 - Adds orig_sys_metadata to rebuild_instance() - 2.2 - Adds slave_info parameter to add_aggregate_host() and + 2.2 - Adds subordinate_info parameter to add_aggregate_host() and remove_aggregate_host() 2.3 - Adds volume_id to reserve_block_device_name() 2.4 - Add bdms to terminate_instance @@ -272,7 +272,7 @@ def _get_compat_version(self, current, havana_compat): return current def add_aggregate_host(self, ctxt, aggregate, host_param, host, - slave_info=None): + subordinate_info=None): '''Add aggregate host. :param ctxt: request context @@ -293,7 +293,7 @@ def add_aggregate_host(self, ctxt, aggregate, host_param, host, cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'add_aggregate_host', aggregate=aggregate, host=host_param, - slave_info=slave_info) + subordinate_info=subordinate_info) def add_fixed_ip_to_instance(self, ctxt, instance, network_id): if self.client.can_send_version('3.12'): @@ -643,7 +643,7 @@ def refresh_provider_fw_rules(self, ctxt, host): cctxt.cast(ctxt, 'refresh_provider_fw_rules') def remove_aggregate_host(self, ctxt, aggregate, host_param, host, - slave_info=None): + subordinate_info=None): '''Remove aggregate host. :param ctxt: request context @@ -664,7 +664,7 @@ def remove_aggregate_host(self, ctxt, aggregate, host_param, host, cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'remove_aggregate_host', aggregate=aggregate, host=host_param, - slave_info=slave_info) + subordinate_info=subordinate_info) def remove_fixed_ip_from_instance(self, ctxt, instance, address): if self.client.can_send_version('3.13'): diff --git a/nova/nova/conductor/api.py b/nova/nova/conductor/api.py index 5a38ec8..e9119eb 100644 --- a/nova/nova/conductor/api.py +++ b/nova/nova/conductor/api.py @@ -84,13 +84,13 @@ def instance_get_all_by_host_and_node(self, context, host, node): def instance_get_all_by_filters(self, context, filters, sort_key='created_at', sort_dir='desc', - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return self._manager.instance_get_all_by_filters(context, filters, sort_key, sort_dir, columns_to_join, - use_slave) + use_subordinate) def instance_get_active_by_window_joined(self, context, begin, end=None, project_id=None, host=None): diff --git a/nova/nova/conductor/manager.py b/nova/nova/conductor/manager.py index 8023e04..bb8d08d 100644 --- a/nova/nova/conductor/manager.py +++ b/nova/nova/conductor/manager.py @@ -354,10 +354,10 @@ def block_device_mapping_destroy(self, context, bdms=None, def instance_get_all_by_filters(self, context, filters, sort_key, sort_dir, columns_to_join=None, - use_slave=False): + use_subordinate=False): result = self.db.instance_get_all_by_filters( context, filters, sort_key, sort_dir, - columns_to_join=columns_to_join, use_slave=use_slave) + columns_to_join=columns_to_join, use_subordinate=use_subordinate) return jsonutils.to_primitive(result) # NOTE(hanlind): This method can be removed in v2.0 of the RPC API. @@ -929,9 +929,9 @@ def block_device_mapping_get_all_by_instance(self, context, instance, instance, legacy) def instance_get_all_by_filters(self, context, filters, sort_key, - sort_dir, columns_to_join, use_slave): + sort_dir, columns_to_join, use_subordinate): return self.manager.instance_get_all_by_filters(context, filters, - sort_key, sort_dir, columns_to_join, use_slave) + sort_key, sort_dir, columns_to_join, use_subordinate) def instance_get_active_by_window_joined(self, context, begin, end, project_id, host): diff --git a/nova/nova/conductor/rpcapi.py b/nova/nova/conductor/rpcapi.py index 821596e..52991ef 100644 --- a/nova/nova/conductor/rpcapi.py +++ b/nova/nova/conductor/rpcapi.py @@ -124,7 +124,7 @@ class ConductorAPI(object): 1.62 - Added object_backport() 1.63 - Changed the format of values['stats'] from a dict to a JSON string in compute_node_update() - 1.64 - Added use_slave to instance_get_all_filters() + 1.64 - Added use_subordinate to instance_get_all_filters() ... - Remove instance_type_get() ... - Remove aggregate_get() ... - Remove aggregate_get_by_host() @@ -232,10 +232,10 @@ def block_device_mapping_get_all_by_instance(self, context, instance, def instance_get_all_by_filters(self, context, filters, sort_key, sort_dir, columns_to_join=None, - use_slave=False): + use_subordinate=False): msg_kwargs = dict(filters=filters, sort_key=sort_key, sort_dir=sort_dir, columns_to_join=columns_to_join, - use_slave=use_slave) + use_subordinate=use_subordinate) cctxt = self.client.prepare() return cctxt.call(context, 'instance_get_all_by_filters', **msg_kwargs) diff --git a/nova/nova/console/xvp.py b/nova/nova/console/xvp.py index 81418e4..10bada3 100644 --- a/nova/nova/console/xvp.py +++ b/nova/nova/console/xvp.py @@ -40,7 +40,7 @@ help='Generated XVP conf file'), cfg.StrOpt('console_xvp_pid', default='/var/run/xvp.pid', - help='XVP master process pid file'), + help='XVP main process pid file'), cfg.StrOpt('console_xvp_log', default='/var/log/xvp.log', help='XVP log file'), diff --git a/nova/nova/db/api.py b/nova/nova/db/api.py index b3b76b8..15b0334 100644 --- a/nova/nova/db/api.py +++ b/nova/nova/db/api.py @@ -446,12 +446,12 @@ def migration_get_by_instance_and_status(context, instance_uuid, status): def migration_get_unconfirmed_by_dest_compute(context, confirm_window, - dest_compute, use_slave=False): + dest_compute, use_subordinate=False): """Finds all unconfirmed migrations within the confirmation window for a specific destination compute host. """ return IMPL.migration_get_unconfirmed_by_dest_compute(context, - confirm_window, dest_compute, use_slave=use_slave) + confirm_window, dest_compute, use_subordinate=use_subordinate) def migration_get_in_progress_by_host_and_node(context, host, node): @@ -588,10 +588,10 @@ def virtual_interface_get_by_uuid(context, vif_uuid): return IMPL.virtual_interface_get_by_uuid(context, vif_uuid) -def virtual_interface_get_by_instance(context, instance_id, use_slave=False): +def virtual_interface_get_by_instance(context, instance_id, use_subordinate=False): """Gets all virtual_interfaces for instance.""" return IMPL.virtual_interface_get_by_instance(context, instance_id, - use_slave=use_slave) + use_subordinate=use_subordinate) def virtual_interface_get_by_instance_and_network(context, instance_id, @@ -632,10 +632,10 @@ def instance_destroy(context, instance_uuid, constraint=None, return rv -def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False): +def instance_get_by_uuid(context, uuid, columns_to_join=None, use_subordinate=False): """Get an instance or raise if it does not exist.""" return IMPL.instance_get_by_uuid(context, uuid, - columns_to_join, use_slave=use_slave) + columns_to_join, use_subordinate=use_subordinate) def instance_get(context, instance_id, columns_to_join=None): @@ -651,13 +651,13 @@ def instance_get_all(context, columns_to_join=None): def instance_get_all_by_filters(context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): """Get all instances that match all filters.""" return IMPL.instance_get_all_by_filters(context, filters, sort_key, sort_dir, limit=limit, marker=marker, columns_to_join=columns_to_join, - use_slave=use_slave) + use_subordinate=use_subordinate) def instance_get_active_by_window_joined(context, begin, end=None, @@ -672,11 +672,11 @@ def instance_get_active_by_window_joined(context, begin, end=None, def instance_get_all_by_host(context, host, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): """Get all instances belonging to a host.""" return IMPL.instance_get_all_by_host(context, host, columns_to_join, - use_slave=use_slave) + use_subordinate=use_subordinate) def instance_get_all_by_host_and_node(context, host, node): @@ -1185,11 +1185,11 @@ def block_device_mapping_update_or_create(context, values, legacy=True): def block_device_mapping_get_all_by_instance(context, instance_uuid, - use_slave=False): + use_subordinate=False): """Get all block device mapping belonging to an instance.""" return IMPL.block_device_mapping_get_all_by_instance(context, instance_uuid, - use_slave) + use_subordinate) def block_device_mapping_get_by_volume_id(context, volume_id, @@ -1621,7 +1621,7 @@ def agent_build_update(context, agent_build_id, values): #################### -def bw_usage_get(context, uuid, start_period, mac, use_slave=False): +def bw_usage_get(context, uuid, start_period, mac, use_subordinate=False): """Return bw usage for instance and mac in a given audit period.""" return IMPL.bw_usage_get(context, uuid, start_period, mac) diff --git a/nova/nova/db/sqlalchemy/api.py b/nova/nova/db/sqlalchemy/api.py index 5a135d4..ef67990 100644 --- a/nova/nova/db/sqlalchemy/api.py +++ b/nova/nova/db/sqlalchemy/api.py @@ -71,10 +71,10 @@ ] connection_opts = [ - cfg.StrOpt('slave_connection', + cfg.StrOpt('subordinate_connection', secret=True, help='The SQLAlchemy connection string used to connect to the ' - 'slave database'), + 'subordinate database'), ] CONF = cfg.CONF @@ -92,12 +92,12 @@ _SLAVE_FACADE = None -def _create_facade_lazily(use_slave=False): +def _create_facade_lazily(use_subordinate=False): global _MASTER_FACADE global _SLAVE_FACADE - return_slave = use_slave and CONF.database.slave_connection - if not return_slave: + return_subordinate = use_subordinate and CONF.database.subordinate_connection + if not return_subordinate: if _MASTER_FACADE is None: _MASTER_FACADE = db_session.EngineFacade( CONF.database.connection, @@ -107,19 +107,19 @@ def _create_facade_lazily(use_slave=False): else: if _SLAVE_FACADE is None: _SLAVE_FACADE = db_session.EngineFacade( - CONF.database.slave_connection, + CONF.database.subordinate_connection, **dict(CONF.database.iteritems()) ) return _SLAVE_FACADE -def get_engine(use_slave=False): - facade = _create_facade_lazily(use_slave) +def get_engine(use_subordinate=False): + facade = _create_facade_lazily(use_subordinate) return facade.get_engine() -def get_session(use_slave=False, **kwargs): - facade = _create_facade_lazily(use_slave) +def get_session(use_subordinate=False, **kwargs): + facade = _create_facade_lazily(use_subordinate) return facade.get_session(**kwargs) @@ -215,7 +215,7 @@ def model_query(context, model, *args, **kwargs): """Query helper that accounts for context's `read_deleted` field. :param context: context to query under - :param use_slave: If true, use slave_connection + :param use_subordinate: If true, use subordinate_connection :param session: if present, the session to use :param read_deleted: if present, overrides context's read_deleted field. :param project_only: if present and context is user-type, then restrict @@ -227,11 +227,11 @@ def model_query(context, model, *args, **kwargs): model parameter. """ - use_slave = kwargs.get('use_slave') or False - if CONF.database.slave_connection == '': - use_slave = False + use_subordinate = kwargs.get('use_subordinate') or False + if CONF.database.subordinate_connection == '': + use_subordinate = False - session = kwargs.get('session') or get_session(use_slave=use_slave) + session = kwargs.get('session') or get_session(use_subordinate=use_subordinate) read_deleted = kwargs.get('read_deleted') or context.read_deleted project_only = kwargs.get('project_only', False) @@ -1436,9 +1436,9 @@ def virtual_interface_create(context, values): return vif_ref -def _virtual_interface_query(context, session=None, use_slave=False): +def _virtual_interface_query(context, session=None, use_subordinate=False): return model_query(context, models.VirtualInterface, session=session, - read_deleted="no", use_slave=use_slave) + read_deleted="no", use_subordinate=use_subordinate) @require_context @@ -1484,12 +1484,12 @@ def virtual_interface_get_by_uuid(context, vif_uuid): @require_context @require_instance_exists_using_uuid -def virtual_interface_get_by_instance(context, instance_uuid, use_slave=False): +def virtual_interface_get_by_instance(context, instance_uuid, use_subordinate=False): """Gets all virtual interfaces for instance. :param instance_uuid: = uuid of the instance to retrieve vifs for """ - vif_refs = _virtual_interface_query(context, use_slave=use_slave).\ + vif_refs = _virtual_interface_query(context, use_subordinate=use_subordinate).\ filter_by(instance_uuid=instance_uuid).\ all() return vif_refs @@ -1684,16 +1684,16 @@ def instance_destroy(context, instance_uuid, constraint=None): @require_context -def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False): +def instance_get_by_uuid(context, uuid, columns_to_join=None, use_subordinate=False): return _instance_get_by_uuid(context, uuid, - columns_to_join=columns_to_join, use_slave=use_slave) + columns_to_join=columns_to_join, use_subordinate=use_subordinate) def _instance_get_by_uuid(context, uuid, session=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): result = _build_instance_get(context, session=session, columns_to_join=columns_to_join, - use_slave=use_slave).\ + use_subordinate=use_subordinate).\ filter_by(uuid=uuid).\ first() @@ -1722,9 +1722,9 @@ def instance_get(context, instance_id, columns_to_join=None): def _build_instance_get(context, session=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): query = model_query(context, models.Instance, session=session, - project_only=True, use_slave=use_slave).\ + project_only=True, use_subordinate=use_subordinate).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('info_cache')) if columns_to_join is None: @@ -1742,7 +1742,7 @@ def _build_instance_get(context, session=None, def _instances_fill_metadata(context, instances, - manual_joins=None, use_slave=False): + manual_joins=None, use_subordinate=False): """Selectively fill instances with manually-joined metadata. Note that instance will be converted to a dict. @@ -1760,13 +1760,13 @@ def _instances_fill_metadata(context, instances, meta = collections.defaultdict(list) if 'metadata' in manual_joins: for row in _instance_metadata_get_multi(context, uuids, - use_slave=use_slave): + use_subordinate=use_subordinate): meta[row['instance_uuid']].append(row) sys_meta = collections.defaultdict(list) if 'system_metadata' in manual_joins: for row in _instance_system_metadata_get_multi(context, uuids, - use_slave=use_slave): + use_subordinate=use_subordinate): sys_meta[row['instance_uuid']].append(row) pcidevs = collections.defaultdict(list) @@ -1818,7 +1818,7 @@ def instance_get_all(context, columns_to_join=None): @require_context def instance_get_all_by_filters(context, filters, sort_key, sort_dir, limit=None, marker=None, columns_to_join=None, - use_slave=False): + use_subordinate=False): """Return instances that match all filters. Deleted instances will be returned by default, unless there's a filter that says otherwise. @@ -1855,10 +1855,10 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir, sort_fn = {'desc': desc, 'asc': asc} - if CONF.database.slave_connection == '': - use_slave = False + if CONF.database.subordinate_connection == '': + use_subordinate = False - session = get_session(use_slave=use_slave) + session = get_session(use_subordinate=use_subordinate) if columns_to_join is None: columns_to_join = ['info_cache', 'security_groups'] @@ -2066,14 +2066,14 @@ def instance_get_active_by_window_joined(context, begin, end=None, def _instance_get_all_query(context, project_only=False, - joins=None, use_slave=False): + joins=None, use_subordinate=False): if joins is None: joins = ['info_cache', 'security_groups'] query = model_query(context, models.Instance, project_only=project_only, - use_slave=use_slave) + use_subordinate=use_subordinate) for join in joins: query = query.options(joinedload(join)) return query @@ -2082,12 +2082,12 @@ def _instance_get_all_query(context, project_only=False, @require_admin_context def instance_get_all_by_host(context, host, columns_to_join=None, - use_slave=False): + use_subordinate=False): return _instances_fill_metadata(context, _instance_get_all_query(context, - use_slave=use_slave).filter_by(host=host).all(), + use_subordinate=use_subordinate).filter_by(host=host).all(), manual_joins=columns_to_join, - use_slave=use_slave) + use_subordinate=use_subordinate) def _instance_get_all_uuids_by_host(context, host, session=None): @@ -3459,12 +3459,12 @@ def get_snapshot_uuid_by_ec2_id(context, ec2_id): def _block_device_mapping_get_query(context, session=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): if columns_to_join is None: columns_to_join = [] query = model_query(context, models.BlockDeviceMapping, - session=session, use_slave=use_slave) + session=session, use_subordinate=use_subordinate) for column in columns_to_join: query = query.options(joinedload(column)) @@ -3546,8 +3546,8 @@ def block_device_mapping_update_or_create(context, values, legacy=True): @require_context def block_device_mapping_get_all_by_instance(context, instance_uuid, - use_slave=False): - return _block_device_mapping_get_query(context, use_slave=use_slave).\ + use_subordinate=False): + return _block_device_mapping_get_query(context, use_subordinate=use_subordinate).\ filter_by(instance_uuid=instance_uuid).\ all() @@ -4052,12 +4052,12 @@ def migration_get_by_instance_and_status(context, instance_uuid, status): @require_admin_context def migration_get_unconfirmed_by_dest_compute(context, confirm_window, - dest_compute, use_slave=False): + dest_compute, use_subordinate=False): confirm_window = (timeutils.utcnow() - datetime.timedelta(seconds=confirm_window)) return model_query(context, models.Migration, read_deleted="yes", - use_slave=use_slave).\ + use_subordinate=use_subordinate).\ filter(models.Migration.updated_at <= confirm_window).\ filter_by(status="finished").\ filter_by(dest_compute=dest_compute).\ @@ -4577,11 +4577,11 @@ def cell_get_all(context): # User-provided metadata def _instance_metadata_get_multi(context, instance_uuids, - session=None, use_slave=False): + session=None, use_subordinate=False): if not instance_uuids: return [] return model_query(context, models.InstanceMetadata, - session=session, use_slave=use_slave).\ + session=session, use_subordinate=use_subordinate).\ filter( models.InstanceMetadata.instance_uuid.in_(instance_uuids)) @@ -4643,11 +4643,11 @@ def instance_metadata_update(context, instance_uuid, metadata, delete): def _instance_system_metadata_get_multi(context, instance_uuids, - session=None, use_slave=False): + session=None, use_subordinate=False): if not instance_uuids: return [] return model_query(context, models.InstanceSystemMetadata, - session=session, use_slave=use_slave).\ + session=session, use_subordinate=use_subordinate).\ filter( models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids)) @@ -4750,9 +4750,9 @@ def agent_build_update(context, agent_build_id, values): #################### @require_context -def bw_usage_get(context, uuid, start_period, mac, use_slave=False): +def bw_usage_get(context, uuid, start_period, mac, use_subordinate=False): return model_query(context, models.BandwidthUsage, read_deleted="yes", - use_slave=use_slave).\ + use_subordinate=use_subordinate).\ filter_by(start_period=start_period).\ filter_by(uuid=uuid).\ filter_by(mac=mac).\ diff --git a/nova/nova/db/sqlalchemy/utils.py b/nova/nova/db/sqlalchemy/utils.py index 0b0f2c9..82eb2e8 100644 --- a/nova/nova/db/sqlalchemy/utils.py +++ b/nova/nova/db/sqlalchemy/utils.py @@ -114,7 +114,7 @@ def _get_unique_constraints_in_sqlite(migrate_engine, table_name): """ SELECT sql FROM - sqlite_master + sqlite_main WHERE type = 'table' AND name = :table_name; diff --git a/nova/nova/network/ldapdns.py b/nova/nova/network/ldapdns.py index 4d5bb0c..665c47a 100644 --- a/nova/nova/network/ldapdns.py +++ b/nova/nova/network/ldapdns.py @@ -42,9 +42,9 @@ default='password', help='Password for LDAP DNS', secret=True), - cfg.StrOpt('ldap_dns_soa_hostmaster', - default='hostmaster@example.org', - help='Hostmaster for LDAP DNS driver Statement of Authority'), + cfg.StrOpt('ldap_dns_soa_hostmain', + default='hostmain@example.org', + help='Hostmain for LDAP DNS driver Statement of Authority'), cfg.MultiStrOpt('ldap_dns_servers', default=['dns.example.org'], help='DNS Servers for LDAP DNS driver'), @@ -156,7 +156,7 @@ def _soa(cls): date = time.strftime('%Y%m%d%H%M%S') soa = '%s %s %s %s %s %s %s' % ( CONF.ldap_dns_servers[0], - CONF.ldap_dns_soa_hostmaster, + CONF.ldap_dns_soa_hostmain, date, CONF.ldap_dns_soa_refresh, CONF.ldap_dns_soa_retry, diff --git a/nova/nova/network/linux_net.py b/nova/nova/network/linux_net.py index 9440bfc..bbe8944 100644 --- a/nova/nova/network/linux_net.py +++ b/nova/nova/network/linux_net.py @@ -1538,7 +1538,7 @@ def ensure_bridge(bridge, interface, net_attrs=None, gateway=True, out, err = _execute('brctl', 'addif', bridge, interface, check_exit_code=False, run_as_root=True) if (err and err != "device %s is already a member of a bridge; " - "can't enslave it to bridge %s.\n" % (interface, bridge)): + "can't ensubordinate it to bridge %s.\n" % (interface, bridge)): msg = _('Failed to add interface: %s') % err raise exception.NovaException(msg) diff --git a/nova/nova/network/manager.py b/nova/nova/network/manager.py index 7e74a34..7e5b17b 100644 --- a/nova/nova/network/manager.py +++ b/nova/nova/network/manager.py @@ -581,14 +581,14 @@ def get_instance_nw_info(self, context, instance_id, rxtx_factor, where network = dict containing pertinent data from a network db object and info = dict containing pertinent networking data """ - use_slave = kwargs.get('use_slave') or False + use_subordinate = kwargs.get('use_subordinate') or False if not uuidutils.is_uuid_like(instance_id): instance_id = instance_uuid instance_uuid = instance_id vifs = vif_obj.VirtualInterfaceList.get_by_instance_uuid(context, - instance_uuid, use_slave=use_slave) + instance_uuid, use_subordinate=use_subordinate) networks = {} for vif in vifs: diff --git a/nova/nova/network/neutronv2/api.py b/nova/nova/network/neutronv2/api.py index 763503b..ea3e98d 100644 --- a/nova/nova/network/neutronv2/api.py +++ b/nova/nova/network/neutronv2/api.py @@ -470,13 +470,13 @@ def show_port(self, context, port_id): return neutronv2.get_client(context).show_port(port_id) def get_instance_nw_info(self, context, instance, networks=None, - port_ids=None, use_slave=False): + port_ids=None, use_subordinate=False): """Return network information for specified instance and update cache. """ - # NOTE(geekinutah): It would be nice if use_slave had us call - # special APIs that pummeled slaves instead of - # the master. For now we just ignore this arg. + # NOTE(geekinutah): It would be nice if use_subordinate had us call + # special APIs that pummeled subordinates instead of + # the main. For now we just ignore this arg. with lockutils.lock('refresh_cache-%s' % instance['uuid']): result = self._get_instance_nw_info(context, instance, networks, port_ids) diff --git a/nova/nova/objects/block_device.py b/nova/nova/objects/block_device.py index 87dd730..7472e1b 100644 --- a/nova/nova/objects/block_device.py +++ b/nova/nova/objects/block_device.py @@ -175,7 +175,7 @@ def obj_load_attr(self, attrname): class BlockDeviceMappingList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Version 1.1: BlockDeviceMapping <= version 1.1 - # Version 1.2: Added use_slave to get_by_instance_uuid + # Version 1.2: Added use_subordinate to get_by_instance_uuid VERSION = '1.2' fields = { @@ -188,9 +188,9 @@ class BlockDeviceMappingList(base.ObjectListBase, base.NovaObject): } @base.remotable_classmethod - def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False): + def get_by_instance_uuid(cls, context, instance_uuid, use_subordinate=False): db_bdms = db.block_device_mapping_get_all_by_instance( - context, instance_uuid, use_slave=use_slave) + context, instance_uuid, use_subordinate=use_subordinate) return base.obj_make_list( context, cls(), BlockDeviceMapping, db_bdms or []) diff --git a/nova/nova/objects/instance.py b/nova/nova/objects/instance.py index d9b8a2b..c5cfb4f 100644 --- a/nova/nova/objects/instance.py +++ b/nova/nova/objects/instance.py @@ -72,7 +72,7 @@ class Instance(base.NovaPersistentObject, base.NovaObject): # Version 1.7: String attributes updated to support unicode # Version 1.8: 'security_groups' and 'pci_devices' cannot be None # Version 1.9: Make uuid a non-None real string - # Version 1.10: Added use_slave to refresh and get_by_uuid + # Version 1.10: Added use_subordinate to refresh and get_by_uuid # Version 1.11: Update instance from database during destroy # Version 1.12: Added ephemeral_key_uuid # Version 1.13: Added delete_metadata_key() @@ -304,13 +304,13 @@ def _from_db_object(context, instance, db_inst, expected_attrs=None): return instance @base.remotable_classmethod - def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False): + def get_by_uuid(cls, context, uuid, expected_attrs=None, use_subordinate=False): if expected_attrs is None: expected_attrs = ['info_cache', 'security_groups'] columns_to_join = _expected_cols(expected_attrs) db_inst = db.instance_get_by_uuid(context, uuid, columns_to_join=columns_to_join, - use_slave=use_slave) + use_subordinate=use_subordinate) return cls._from_db_object(context, cls(), db_inst, expected_attrs) @@ -483,12 +483,12 @@ def _handle_cell_update_from_api(): self.obj_reset_changes() @base.remotable - def refresh(self, context, use_slave=False): + def refresh(self, context, use_subordinate=False): extra = [field for field in INSTANCE_OPTIONAL_ATTRS if self.obj_attr_is_set(field)] current = self.__class__.get_by_uuid(context, uuid=self.uuid, expected_attrs=extra, - use_slave=use_slave) + use_subordinate=use_subordinate) # NOTE(danms): We orphan the instance copy so we do not unexpectedly # trigger a lazy-load (which would mean we failed to calculate the # expected_attrs properly) @@ -597,10 +597,10 @@ def _make_instance_list(context, inst_list, db_inst_list, expected_attrs): class InstanceList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version - # Version 1.1: Added use_slave to get_by_host + # Version 1.1: Added use_subordinate to get_by_host # Instance <= version 1.9 # Version 1.2: Instance <= version 1.11 - # Version 1.3: Added use_slave to get_by_filters + # Version 1.3: Added use_subordinate to get_by_filters # Version 1.4: Instance <= version 1.12 # Version 1.5: Added method get_active_by_window_joined. # Version 1.6: Instance <= version 1.13 @@ -622,19 +622,19 @@ class InstanceList(base.ObjectListBase, base.NovaObject): @base.remotable_classmethod def get_by_filters(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, - marker=None, expected_attrs=None, use_slave=False): + marker=None, expected_attrs=None, use_subordinate=False): db_inst_list = db.instance_get_all_by_filters( context, filters, sort_key, sort_dir, limit=limit, marker=marker, columns_to_join=_expected_cols(expected_attrs), - use_slave=use_slave) + use_subordinate=use_subordinate) return _make_instance_list(context, cls(), db_inst_list, expected_attrs) @base.remotable_classmethod - def get_by_host(cls, context, host, expected_attrs=None, use_slave=False): + def get_by_host(cls, context, host, expected_attrs=None, use_subordinate=False): db_inst_list = db.instance_get_all_by_host( context, host, columns_to_join=_expected_cols(expected_attrs), - use_slave=use_slave) + use_subordinate=use_subordinate) return _make_instance_list(context, cls(), db_inst_list, expected_attrs) diff --git a/nova/nova/objects/migration.py b/nova/nova/objects/migration.py index 8630ebe..b52c134 100644 --- a/nova/nova/objects/migration.py +++ b/nova/nova/objects/migration.py @@ -83,7 +83,7 @@ def instance(self): class MigrationList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Migration <= 1.1 - # Version 1.1: Added use_slave to get_unconfirmed_by_dest_compute + # Version 1.1: Added use_subordinate to get_unconfirmed_by_dest_compute VERSION = '1.1' fields = { @@ -97,9 +97,9 @@ class MigrationList(base.ObjectListBase, base.NovaObject): @base.remotable_classmethod def get_unconfirmed_by_dest_compute(cls, context, confirm_window, - dest_compute, use_slave=False): + dest_compute, use_subordinate=False): db_migrations = db.migration_get_unconfirmed_by_dest_compute( - context, confirm_window, dest_compute, use_slave=use_slave) + context, confirm_window, dest_compute, use_subordinate=use_subordinate) return base.obj_make_list(context, MigrationList(), Migration, db_migrations) diff --git a/nova/nova/objects/virtual_interface.py b/nova/nova/objects/virtual_interface.py index cd62961..03db681 100644 --- a/nova/nova/objects/virtual_interface.py +++ b/nova/nova/objects/virtual_interface.py @@ -93,7 +93,7 @@ def get_all(cls, context): return base.obj_make_list(context, cls(), VirtualInterface, db_vifs) @base.remotable_classmethod - def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False): + def get_by_instance_uuid(cls, context, instance_uuid, use_subordinate=False): db_vifs = db.virtual_interface_get_by_instance(context, instance_uuid, - use_slave=use_slave) + use_subordinate=use_subordinate) return base.obj_make_list(context, cls(), VirtualInterface, db_vifs) diff --git a/nova/nova/openstack/common/db/sqlalchemy/migration.py b/nova/nova/openstack/common/db/sqlalchemy/migration.py index f48c14a..2d83390 100644 --- a/nova/nova/openstack/common/db/sqlalchemy/migration.py +++ b/nova/nova/openstack/common/db/sqlalchemy/migration.py @@ -64,7 +64,7 @@ def _get_unique_constraints(self, table): data = table.metadata.bind.execute( """SELECT sql - FROM sqlite_master + FROM sqlite_main WHERE type='table' AND name=:table_name""", diff --git a/nova/nova/openstack/common/gettextutils.py b/nova/nova/openstack/common/gettextutils.py index 1b95c57..205dc24 100644 --- a/nova/nova/openstack/common/gettextutils.py +++ b/nova/nova/openstack/common/gettextutils.py @@ -326,9 +326,9 @@ def get_available_languages(domain): # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was - # renamed to locale_identifiers() in >=1.0, the requirements master list + # renamed to locale_identifiers() in >=1.0, the requirements main list # requires >=0.9.6, uncapped, so defensively work with both. We can remove - # this check when the master list updates to >=1.0, and update all projects + # this check when the main list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() diff --git a/nova/nova/tests/api/ec2/test_cloud.py b/nova/nova/tests/api/ec2/test_cloud.py index 00ea03e..48d505b 100644 --- a/nova/nova/tests/api/ec2/test_cloud.py +++ b/nova/nova/tests/api/ec2/test_cloud.py @@ -2425,7 +2425,7 @@ def fake_show(meh, context, id): self.stubs.Set(fake._FakeImageService, 'show', fake_show) def fake_block_device_mapping_get_all_by_instance(context, inst_id, - use_slave=False): + use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': volumes[0], 'snapshot_id': snapshots[0], @@ -2500,7 +2500,7 @@ def test_create_image_instance_store(self): ec2_instance_id = self._run_instance(**kwargs) def fake_block_device_mapping_get_all_by_instance(context, inst_id, - use_slave=False): + use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': volumes[0], 'snapshot_id': snapshots[0], diff --git a/nova/nova/tests/api/openstack/compute/contrib/test_disk_config.py b/nova/nova/tests/api/openstack/compute/contrib/test_disk_config.py index 9f60a03..d40be0d 100644 --- a/nova/nova/tests/api/openstack/compute/contrib/test_disk_config.py +++ b/nova/nova/tests/api/openstack/compute/contrib/test_disk_config.py @@ -65,7 +65,7 @@ def fake_instance_get(context, id_): self.stubs.Set(db, 'instance_get', fake_instance_get) def fake_instance_get_by_uuid(context, uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): for instance in FAKE_INSTANCES: if uuid == instance['uuid']: return instance diff --git a/nova/nova/tests/api/openstack/compute/contrib/test_instance_actions.py b/nova/nova/tests/api/openstack/compute/contrib/test_instance_actions.py index 0e931a2..3f98c61 100644 --- a/nova/nova/tests/api/openstack/compute/contrib/test_instance_actions.py +++ b/nova/nova/tests/api/openstack/compute/contrib/test_instance_actions.py @@ -78,7 +78,7 @@ def test_list_actions_restricted_by_project(self): def fake_instance_get_by_uuid(context, instance_id, columns_to_join=None, - use_slave=False): + use_subordinate=False): return fake_instance.fake_db_instance( **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id}) @@ -96,7 +96,7 @@ def test_get_action_restricted_by_project(self): def fake_instance_get_by_uuid(context, instance_id, columns_to_join=None, - use_slave=False): + use_subordinate=False): return fake_instance.fake_db_instance( **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id}) @@ -118,7 +118,7 @@ def setUp(self): def fake_get(self, context, instance_uuid): return {'uuid': instance_uuid} - def fake_instance_get_by_uuid(context, instance_id, use_slave=False): + def fake_instance_get_by_uuid(context, instance_id, use_subordinate=False): return {'name': 'fake', 'project_id': context.project_id} self.stubs.Set(compute_api.API, 'get', fake_get) diff --git a/nova/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/nova/tests/api/openstack/compute/contrib/test_security_groups.py index 7029de5..003e268 100644 --- a/nova/nova/tests/api/openstack/compute/contrib/test_security_groups.py +++ b/nova/nova/tests/api/openstack/compute/contrib/test_security_groups.py @@ -81,7 +81,7 @@ def security_group_rule_db(rule, id=None): def return_server(context, server_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'id': int(server_id), 'power_state': 0x01, @@ -92,7 +92,7 @@ def return_server(context, server_id, def return_server_by_uuid(context, server_uuid, columns_to_join=None, - use_slave=False): + use_subordinate=False): return fake_instance.fake_db_instance( **{'id': 1, 'power_state': 0x01, @@ -402,7 +402,7 @@ def test_get_security_group_by_instance(self): expected = {'security_groups': groups} def return_instance(context, server_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertEqual(server_id, FAKE_UUID1) return return_server_by_uuid(context, server_id) @@ -429,7 +429,7 @@ def test_get_security_group_empty_for_instance(self, mock_sec_group, expected = {'security_groups': []} def return_instance(context, server_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertEqual(server_id, FAKE_UUID1) return return_server_by_uuid(context, server_id) mock_db_get_ins.side_effect = return_instance @@ -1699,7 +1699,7 @@ def construct(self): root.set('id') root.set('imageRef') root.set('flavorRef') - return xmlutil.MasterTemplate(root, 1, + return xmlutil.MainTemplate(root, 1, nsmap={None: xmlutil.XMLNS_V11}) def _encode_body(self, body): diff --git a/nova/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py b/nova/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py index 1d6177d..0bad7fb 100644 --- a/nova/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py +++ b/nova/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py @@ -25,7 +25,7 @@ def fake_instance_get(context, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): result = fakes.stub_instance(id=1, uuid=instance_id) result['created_at'] = None result['deleted_at'] = None diff --git a/nova/nova/tests/api/openstack/compute/contrib/test_shelve.py b/nova/nova/tests/api/openstack/compute/contrib/test_shelve.py index 4cd3939..bfcc58c 100644 --- a/nova/nova/tests/api/openstack/compute/contrib/test_shelve.py +++ b/nova/nova/tests/api/openstack/compute/contrib/test_shelve.py @@ -26,7 +26,7 @@ def fake_instance_get_by_uuid(context, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id}) diff --git a/nova/nova/tests/api/openstack/compute/contrib/test_volumes.py b/nova/nova/tests/api/openstack/compute/contrib/test_volumes.py index 9c1448d..e77a6cd 100644 --- a/nova/nova/tests/api/openstack/compute/contrib/test_volumes.py +++ b/nova/nova/tests/api/openstack/compute/contrib/test_volumes.py @@ -96,7 +96,7 @@ def fake_compute_volume_snapshot_create(self, context, volume_id, pass -def fake_bdms_get_all_by_instance(context, instance_uuid, use_slave=False): +def fake_bdms_get_all_by_instance(context, instance_uuid, use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'instance_uuid': instance_uuid, diff --git a/nova/nova/tests/api/openstack/compute/plugins/v3/test_instance_actions.py b/nova/nova/tests/api/openstack/compute/plugins/v3/test_instance_actions.py index e053b70..a4a0e16 100644 --- a/nova/nova/tests/api/openstack/compute/plugins/v3/test_instance_actions.py +++ b/nova/nova/tests/api/openstack/compute/plugins/v3/test_instance_actions.py @@ -79,7 +79,7 @@ def test_list_actions_restricted_by_project(self): policy.set_rules(rules) def fake_instance_get_by_uuid(context, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id}) @@ -96,7 +96,7 @@ def test_get_action_restricted_by_project(self): policy.set_rules(rules) def fake_instance_get_by_uuid(context, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id}) @@ -120,7 +120,7 @@ def fake_get(self, context, instance_uuid, expected_attrs=None, return {'uuid': instance_uuid} def fake_instance_get_by_uuid(context, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id}) diff --git a/nova/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py b/nova/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py index 6313007..ebb9686 100644 --- a/nova/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py +++ b/nova/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py @@ -466,7 +466,7 @@ def test_rebuild_admin_password_pass_disabled(self): def test_rebuild_server_not_found(self): def server_not_found(self, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): raise exception.InstanceNotFound(instance_id=instance_id) self.stubs.Set(db, 'instance_get_by_uuid', server_not_found) @@ -856,7 +856,7 @@ def _fake_id(x): image_service.create(None, original_image) def fake_block_device_mapping_get_all_by_instance(context, inst_id, - use_slave=False): + use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': _fake_id('a'), 'source_type': 'snapshot', @@ -931,7 +931,7 @@ def _fake_id(x): image_service = glance.get_default_image_service() def fake_block_device_mapping_get_all_by_instance(context, inst_id, - use_slave=False): + use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': _fake_id('a'), 'source_type': 'snapshot', diff --git a/nova/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py b/nova/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py index 996d604..fc03b1f 100644 --- a/nova/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py +++ b/nova/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py @@ -89,7 +89,7 @@ def return_server(context, server_id, columns_to_join=None): def return_server_by_uuid(context, server_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'id': 1, 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', @@ -101,7 +101,7 @@ def return_server_by_uuid(context, server_uuid, def return_server_nonexistent(context, server_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): raise exception.InstanceNotFound(instance_id=server_id) @@ -575,7 +575,7 @@ def _return_server_in_build(self, context, server_id, 'vm_state': vm_states.BUILDING}) def _return_server_in_build_by_uuid(self, context, server_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'id': 1, 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', diff --git a/nova/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index a491532..5219b09 100644 --- a/nova/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -107,7 +107,7 @@ def fake_start_stop_invalid_state(self, context, instance): def fake_instance_get_by_uuid_not_found(context, uuid, - columns_to_join, use_slave=False): + columns_to_join, use_subordinate=False): raise exception.InstanceNotFound(instance_id=uuid) @@ -689,7 +689,7 @@ def fake_get_all(compute_self, context, search_opts=None, def test_tenant_id_filter_converts_to_project_id_for_admin(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=[]): self.assertIsNotNone(filters) self.assertEqual(filters['project_id'], 'newfake') @@ -709,7 +709,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_tenant_id_filter_no_admin_context(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=[]): self.assertNotEqual(filters, None) self.assertEqual(filters['project_id'], 'fake') @@ -725,7 +725,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_tenant_id_filter_implies_all_tenants(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=[]): self.assertNotEqual(filters, None) # The project_id assertion checks that the project_id @@ -747,7 +747,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_param_normal(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=[]): self.assertNotIn('project_id', filters) return [fakes.stub_instance(100)] @@ -764,7 +764,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_param_one(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=[]): self.assertNotIn('project_id', filters) return [fakes.stub_instance(100)] @@ -781,7 +781,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_param_zero(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=[]): self.assertNotIn('all_tenants', filters) return [fakes.stub_instance(100)] @@ -798,7 +798,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_param_false(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=[]): self.assertNotIn('all_tenants', filters) return [fakes.stub_instance(100)] @@ -831,7 +831,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_admin_restricted_tenant(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=[]): self.assertIsNotNone(filters) self.assertEqual(filters['project_id'], 'fake') @@ -849,7 +849,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_pass_policy(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=[]): self.assertIsNotNone(filters) self.assertNotIn('project_id', filters) diff --git a/nova/nova/tests/api/openstack/compute/plugins/v3/test_shelve.py b/nova/nova/tests/api/openstack/compute/plugins/v3/test_shelve.py index 59f6450..40ac16c 100644 --- a/nova/nova/tests/api/openstack/compute/plugins/v3/test_shelve.py +++ b/nova/nova/tests/api/openstack/compute/plugins/v3/test_shelve.py @@ -26,7 +26,7 @@ def fake_instance_get_by_uuid(context, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id}) diff --git a/nova/nova/tests/api/openstack/compute/test_server_actions.py b/nova/nova/tests/api/openstack/compute/test_server_actions.py index f26739b..647d961 100644 --- a/nova/nova/tests/api/openstack/compute/test_server_actions.py +++ b/nova/nova/tests/api/openstack/compute/test_server_actions.py @@ -631,7 +631,7 @@ def test_rebuild_admin_pass_pass_disabled(self): def test_rebuild_server_not_found(self): def server_not_found(self, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): raise exception.InstanceNotFound(instance_id=instance_id) self.stubs.Set(db, 'instance_get_by_uuid', server_not_found) @@ -1022,7 +1022,7 @@ def _fake_id(x): image_service.create(None, original_image) def fake_block_device_mapping_get_all_by_instance(context, inst_id, - use_slave=False): + use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': _fake_id('a'), 'source_type': 'snapshot', @@ -1097,7 +1097,7 @@ def _fake_id(x): image_service = glance.get_default_image_service() def fake_block_device_mapping_get_all_by_instance(context, inst_id, - use_slave=False): + use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': _fake_id('a'), 'source_type': 'snapshot', diff --git a/nova/nova/tests/api/openstack/compute/test_server_metadata.py b/nova/nova/tests/api/openstack/compute/test_server_metadata.py index 08937f5..71c2519 100644 --- a/nova/nova/tests/api/openstack/compute/test_server_metadata.py +++ b/nova/nova/tests/api/openstack/compute/test_server_metadata.py @@ -90,7 +90,7 @@ def return_server(context, server_id, columns_to_join=None): def return_server_by_uuid(context, server_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'id': 1, 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', @@ -102,7 +102,7 @@ def return_server_by_uuid(context, server_uuid, def return_server_nonexistent(context, server_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): raise exception.InstanceNotFound(instance_id=server_id) @@ -568,7 +568,7 @@ def _return_server_in_build(self, context, server_id, 'vm_state': vm_states.BUILDING}) def _return_server_in_build_by_uuid(self, context, server_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'id': 1, 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', diff --git a/nova/nova/tests/api/openstack/compute/test_servers.py b/nova/nova/tests/api/openstack/compute/test_servers.py index 7b06f25..5e4a600 100644 --- a/nova/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/nova/tests/api/openstack/compute/test_servers.py @@ -659,7 +659,7 @@ def fake_get_all(compute_self, context, search_opts=None, def test_tenant_id_filter_converts_to_project_id_for_admin(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertIsNotNone(filters) self.assertEqual(filters['project_id'], 'newfake') self.assertFalse(filters.get('tenant_id')) @@ -678,7 +678,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_param_normal(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertNotIn('project_id', filters) return [fakes.stub_instance(100)] @@ -694,7 +694,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_param_one(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertNotIn('project_id', filters) return [fakes.stub_instance(100)] @@ -710,7 +710,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_param_zero(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertNotIn('all_tenants', filters) return [fakes.stub_instance(100)] @@ -726,7 +726,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_param_false(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertNotIn('all_tenants', filters) return [fakes.stub_instance(100)] @@ -757,7 +757,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_admin_restricted_tenant(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertIsNotNone(filters) self.assertEqual(filters['project_id'], 'fake') return [fakes.stub_instance(100)] @@ -774,7 +774,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_pass_policy(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertIsNotNone(filters) self.assertNotIn('project_id', filters) return [fakes.stub_instance(100)] diff --git a/nova/nova/tests/api/openstack/fakes.py b/nova/nova/tests/api/openstack/fakes.py index e4a8006..1231c29 100644 --- a/nova/nova/tests/api/openstack/fakes.py +++ b/nova/nova/tests/api/openstack/fakes.py @@ -440,7 +440,7 @@ def get_fake_uuid(token=0): def fake_instance_get(**kwargs): - def _return_server(context, uuid, columns_to_join=None, use_slave=False): + def _return_server(context, uuid, columns_to_join=None, use_subordinate=False): return stub_instance(1, **kwargs) return _return_server @@ -463,8 +463,8 @@ def _return_servers(context, *args, **kwargs): if 'columns_to_join' in kwargs: kwargs.pop('columns_to_join') - if 'use_slave' in kwargs: - kwargs.pop('use_slave') + if 'use_subordinate' in kwargs: + kwargs.pop('use_subordinate') for i in xrange(num_servers): uuid = get_fake_uuid(i) @@ -711,7 +711,7 @@ def stub_snapshot_get_all(self, context): stub_snapshot(102, project_id='superduperfake')] -def stub_bdm_get_all_by_instance(context, instance_uuid, use_slave=False): +def stub_bdm_get_all_by_instance(context, instance_uuid, use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'volume_id1', 'instance_uuid': instance_uuid}), diff --git a/nova/nova/tests/api/openstack/test_xmlutil.py b/nova/nova/tests/api/openstack/test_xmlutil.py index 75822a5..774b5da 100644 --- a/nova/nova/tests/api/openstack/test_xmlutil.py +++ b/nova/nova/tests/api/openstack/test_xmlutil.py @@ -391,17 +391,17 @@ def test__render(self): attr2=xmlutil.ConstantSelector(2), attr3=xmlutil.ConstantSelector(3)) - # Create a master template element - master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1']) + # Create a main template element + main_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1']) - # Create a couple of slave template element - slave_elems = [ + # Create a couple of subordinate template element + subordinate_elems = [ xmlutil.TemplateElement('test', attr2=attrs['attr2']), xmlutil.TemplateElement('test', attr3=attrs['attr3']), ] # Try the render - elem = master_elem._render(None, None, slave_elems, None) + elem = main_elem._render(None, None, subordinate_elems, None) # Verify the particulars of the render self.assertEqual(elem.tag, 'test') @@ -413,7 +413,7 @@ def test__render(self): parent = etree.Element('parent') # Try the render again... - elem = master_elem._render(parent, None, slave_elems, dict(a='foo')) + elem = main_elem._render(parent, None, subordinate_elems, dict(a='foo')) # Verify the particulars of the render self.assertEqual(len(parent), 1) @@ -550,47 +550,47 @@ def test__nsmap(self): self.assertEqual(len(nsmap), 1) self.assertEqual(nsmap['a'], 'foo') - def test_master_attach(self): - # Set up a master template + def test_main_attach(self): + # Set up a main template elem = xmlutil.TemplateElement('test') - tmpl = xmlutil.MasterTemplate(elem, 1) + tmpl = xmlutil.MainTemplate(elem, 1) - # Make sure it has a root but no slaves + # Make sure it has a root but no subordinates self.assertEqual(tmpl.root, elem) - self.assertEqual(len(tmpl.slaves), 0) + self.assertEqual(len(tmpl.subordinates), 0) self.assertTrue(repr(tmpl)) - # Try to attach an invalid slave + # Try to attach an invalid subordinate bad_elem = xmlutil.TemplateElement('test2') self.assertRaises(ValueError, tmpl.attach, bad_elem) - self.assertEqual(len(tmpl.slaves), 0) + self.assertEqual(len(tmpl.subordinates), 0) - # Try to attach an invalid and a valid slave + # Try to attach an invalid and a valid subordinate good_elem = xmlutil.TemplateElement('test') self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem) - self.assertEqual(len(tmpl.slaves), 0) + self.assertEqual(len(tmpl.subordinates), 0) # Try to attach an inapplicable template class InapplicableTemplate(xmlutil.Template): - def apply(self, master): + def apply(self, main): return False inapp_tmpl = InapplicableTemplate(good_elem) tmpl.attach(inapp_tmpl) - self.assertEqual(len(tmpl.slaves), 0) + self.assertEqual(len(tmpl.subordinates), 0) # Now try attaching an applicable template tmpl.attach(good_elem) - self.assertEqual(len(tmpl.slaves), 1) - self.assertEqual(tmpl.slaves[0].root, good_elem) + self.assertEqual(len(tmpl.subordinates), 1) + self.assertEqual(tmpl.subordinates[0].root, good_elem) - def test_master_copy(self): - # Construct a master template + def test_main_copy(self): + # Construct a main template elem = xmlutil.TemplateElement('test') - tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo')) + tmpl = xmlutil.MainTemplate(elem, 1, nsmap=dict(a='foo')) - # Give it a slave - slave = xmlutil.TemplateElement('test') - tmpl.attach(slave) + # Give it a subordinate + subordinate = xmlutil.TemplateElement('test') + tmpl.attach(subordinate) # Construct a copy copy = tmpl.copy() @@ -600,43 +600,43 @@ def test_master_copy(self): self.assertEqual(tmpl.root, copy.root) self.assertEqual(tmpl.version, copy.version) self.assertEqual(id(tmpl.nsmap), id(copy.nsmap)) - self.assertNotEqual(id(tmpl.slaves), id(copy.slaves)) - self.assertEqual(len(tmpl.slaves), len(copy.slaves)) - self.assertEqual(tmpl.slaves[0], copy.slaves[0]) + self.assertNotEqual(id(tmpl.subordinates), id(copy.subordinates)) + self.assertEqual(len(tmpl.subordinates), len(copy.subordinates)) + self.assertEqual(tmpl.subordinates[0], copy.subordinates[0]) - def test_slave_apply(self): - # Construct a master template + def test_subordinate_apply(self): + # Construct a main template elem = xmlutil.TemplateElement('test') - master = xmlutil.MasterTemplate(elem, 3) + main = xmlutil.MainTemplate(elem, 3) - # Construct a slave template with applicable minimum version - slave = xmlutil.SlaveTemplate(elem, 2) - self.assertEqual(slave.apply(master), True) - self.assertTrue(repr(slave)) + # Construct a subordinate template with applicable minimum version + subordinate = xmlutil.SubordinateTemplate(elem, 2) + self.assertEqual(subordinate.apply(main), True) + self.assertTrue(repr(subordinate)) - # Construct a slave template with equal minimum version - slave = xmlutil.SlaveTemplate(elem, 3) - self.assertEqual(slave.apply(master), True) + # Construct a subordinate template with equal minimum version + subordinate = xmlutil.SubordinateTemplate(elem, 3) + self.assertEqual(subordinate.apply(main), True) - # Construct a slave template with inapplicable minimum version - slave = xmlutil.SlaveTemplate(elem, 4) - self.assertEqual(slave.apply(master), False) + # Construct a subordinate template with inapplicable minimum version + subordinate = xmlutil.SubordinateTemplate(elem, 4) + self.assertEqual(subordinate.apply(main), False) - # Construct a slave template with applicable version range - slave = xmlutil.SlaveTemplate(elem, 2, 4) - self.assertEqual(slave.apply(master), True) + # Construct a subordinate template with applicable version range + subordinate = xmlutil.SubordinateTemplate(elem, 2, 4) + self.assertEqual(subordinate.apply(main), True) - # Construct a slave template with low version range - slave = xmlutil.SlaveTemplate(elem, 1, 2) - self.assertEqual(slave.apply(master), False) + # Construct a subordinate template with low version range + subordinate = xmlutil.SubordinateTemplate(elem, 1, 2) + self.assertEqual(subordinate.apply(main), False) - # Construct a slave template with high version range - slave = xmlutil.SlaveTemplate(elem, 4, 5) - self.assertEqual(slave.apply(master), False) + # Construct a subordinate template with high version range + subordinate = xmlutil.SubordinateTemplate(elem, 4, 5) + self.assertEqual(subordinate.apply(main), False) - # Construct a slave template with matching version range - slave = xmlutil.SlaveTemplate(elem, 3, 3) - self.assertEqual(slave.apply(master), True) + # Construct a subordinate template with matching version range + subordinate = xmlutil.SubordinateTemplate(elem, 3, 3) + self.assertEqual(subordinate.apply(main), True) def test__serialize(self): # Our test object to serialize @@ -657,7 +657,7 @@ def test__serialize(self): }, } - # Set up our master template + # Set up our main template root = xmlutil.TemplateElement('test', selector='test', name='name') value = xmlutil.SubTemplateElement(root, 'value', selector='values') @@ -665,22 +665,22 @@ def test__serialize(self): attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs') xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items, key=0, value=1) - master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo')) + main = xmlutil.MainTemplate(root, 1, nsmap=dict(f='foo')) - # Set up our slave template - root_slave = xmlutil.TemplateElement('test', selector='test') - image = xmlutil.SubTemplateElement(root_slave, 'image', + # Set up our subordinate template + root_subordinate = xmlutil.TemplateElement('test', selector='test') + image = xmlutil.SubTemplateElement(root_subordinate, 'image', selector='image', id='id') image.text = xmlutil.Selector('name') - slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar')) + subordinate = xmlutil.SubordinateTemplate(root_subordinate, 1, nsmap=dict(b='bar')) - # Attach the slave to the master... - master.attach(slave) + # Attach the subordinate to the main... + main.attach(subordinate) # Try serializing our object - siblings = master._siblings() - nsmap = master._nsmap() - result = master._serialize(None, obj, siblings, nsmap) + siblings = main._siblings() + nsmap = main._nsmap() + result = main._serialize(None, obj, siblings, nsmap) # Now we get to manually walk the element tree... self.assertEqual(result.tag, 'test') @@ -712,14 +712,14 @@ def test_serialize_with_colon_tagname_support(self): expected_xml = (("\n" '999' '')) - # Set up our master template + # Set up our main template root = xmlutil.TemplateElement('extra_specs', selector='extra_specs', colon_ns=True) value = xmlutil.SubTemplateElement(root, 'foo:bar', selector='foo:bar', colon_ns=True) value.text = xmlutil.Selector() - master = xmlutil.MasterTemplate(root, 1) - result = master.serialize(obj) + main = xmlutil.MainTemplate(root, 1) + result = main.serialize(obj) self.assertEqual(expected_xml, result) def test__serialize_with_empty_datum_selector(self): @@ -733,76 +733,76 @@ def test__serialize_with_empty_datum_selector(self): root = xmlutil.TemplateElement('test', selector='test', name='name') - master = xmlutil.MasterTemplate(root, 1) - root_slave = xmlutil.TemplateElement('test', selector='test') - image = xmlutil.SubTemplateElement(root_slave, 'image', + main = xmlutil.MainTemplate(root, 1) + root_subordinate = xmlutil.TemplateElement('test', selector='test') + image = xmlutil.SubTemplateElement(root_subordinate, 'image', selector='image') image.set('id') xmlutil.make_links(image, 'links') - slave = xmlutil.SlaveTemplate(root_slave, 1) - master.attach(slave) + subordinate = xmlutil.SubordinateTemplate(root_subordinate, 1) + main.attach(subordinate) - siblings = master._siblings() - result = master._serialize(None, obj, siblings) + siblings = main._siblings() + result = main._serialize(None, obj, siblings) self.assertEqual(result.tag, 'test') self.assertEqual(result[0].tag, 'image') self.assertEqual(result[0].get('id'), str(obj['test']['image'])) -class MasterTemplateBuilder(xmlutil.TemplateBuilder): +class MainTemplateBuilder(xmlutil.TemplateBuilder): def construct(self): elem = xmlutil.TemplateElement('test') - return xmlutil.MasterTemplate(elem, 1) + return xmlutil.MainTemplate(elem, 1) -class SlaveTemplateBuilder(xmlutil.TemplateBuilder): +class SubordinateTemplateBuilder(xmlutil.TemplateBuilder): def construct(self): elem = xmlutil.TemplateElement('test') - return xmlutil.SlaveTemplate(elem, 1) + return xmlutil.SubordinateTemplate(elem, 1) class TemplateBuilderTest(test.NoDBTestCase): - def test_master_template_builder(self): + def test_main_template_builder(self): # Make sure the template hasn't been built yet - self.assertIsNone(MasterTemplateBuilder._tmpl) + self.assertIsNone(MainTemplateBuilder._tmpl) # Now, construct the template - tmpl1 = MasterTemplateBuilder() + tmpl1 = MainTemplateBuilder() # Make sure that there is a template cached... - self.assertIsNotNone(MasterTemplateBuilder._tmpl) + self.assertIsNotNone(MainTemplateBuilder._tmpl) # Make sure it wasn't what was returned... - self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1) + self.assertNotEqual(MainTemplateBuilder._tmpl, tmpl1) # Make sure it doesn't get rebuilt - cached = MasterTemplateBuilder._tmpl - tmpl2 = MasterTemplateBuilder() - self.assertEqual(MasterTemplateBuilder._tmpl, cached) + cached = MainTemplateBuilder._tmpl + tmpl2 = MainTemplateBuilder() + self.assertEqual(MainTemplateBuilder._tmpl, cached) # Make sure we're always getting fresh copies self.assertNotEqual(tmpl1, tmpl2) # Make sure we can override the copying behavior - tmpl3 = MasterTemplateBuilder(False) - self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3) + tmpl3 = MainTemplateBuilder(False) + self.assertEqual(MainTemplateBuilder._tmpl, tmpl3) - def test_slave_template_builder(self): + def test_subordinate_template_builder(self): # Make sure the template hasn't been built yet - self.assertIsNone(SlaveTemplateBuilder._tmpl) + self.assertIsNone(SubordinateTemplateBuilder._tmpl) # Now, construct the template - tmpl1 = SlaveTemplateBuilder() + tmpl1 = SubordinateTemplateBuilder() # Make sure there is a template cached... - self.assertIsNotNone(SlaveTemplateBuilder._tmpl) + self.assertIsNotNone(SubordinateTemplateBuilder._tmpl) # Make sure it was what was returned... - self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1) + self.assertEqual(SubordinateTemplateBuilder._tmpl, tmpl1) # Make sure it doesn't get rebuilt - tmpl2 = SlaveTemplateBuilder() - self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1) + tmpl2 = SubordinateTemplateBuilder() + self.assertEqual(SubordinateTemplateBuilder._tmpl, tmpl1) # Make sure we're always getting the cached copy self.assertEqual(tmpl1, tmpl2) @@ -828,7 +828,7 @@ def test_make_flat_dict(self): expected_xml = ("\n" 'foobar') root = xmlutil.make_flat_dict('wrapper') - tmpl = xmlutil.MasterTemplate(root, 1) + tmpl = xmlutil.MainTemplate(root, 1) result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar'))) self.assertEqual(result, expected_xml) @@ -836,7 +836,7 @@ def test_make_flat_dict(self): 'foobar' "") root = xmlutil.make_flat_dict('wrapper', ns='ns') - tmpl = xmlutil.MasterTemplate(root, 1) + tmpl = xmlutil.MainTemplate(root, 1) result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar'))) self.assertEqual(result, expected_xml) @@ -846,10 +846,10 @@ def test_make_flat_dict_with_colon_tagname_support(self): expected_xml = (("\n" '999' '')) - # Set up our master template + # Set up our main template root = xmlutil.make_flat_dict('extra_specs', colon_ns=True) - master = xmlutil.MasterTemplate(root, 1) - result = master.serialize(obj) + main = xmlutil.MainTemplate(root, 1) + result = main.serialize(obj) self.assertEqual(expected_xml, result) def test_make_flat_dict_with_parent(self): @@ -866,8 +866,8 @@ def test_make_flat_dict_with_parent(self): root.set('id') extra = xmlutil.make_flat_dict('extra_info', root=root) root.append(extra) - master = xmlutil.MasterTemplate(root, 1) - result = master.serialize(obj) + main = xmlutil.MainTemplate(root, 1) + result = main.serialize(obj) self.assertEqual(expected_xml, result) def test_make_flat_dict_with_dicts(self): @@ -884,8 +884,8 @@ def test_make_flat_dict_with_dicts(self): ignore_sub_dicts=True) extra = xmlutil.make_flat_dict('extra_info', selector='extra_info') root.append(extra) - master = xmlutil.MasterTemplate(root, 1) - result = master.serialize(obj) + main = xmlutil.MainTemplate(root, 1) + result = main.serialize(obj) self.assertEqual(expected_xml, result) def test_safe_parse_xml(self): diff --git a/nova/nova/tests/compute/test_compute.py b/nova/nova/tests/compute/test_compute.py index b126a52..50a7f6d 100644 --- a/nova/nova/tests/compute/test_compute.py +++ b/nova/nova/tests/compute/test_compute.py @@ -603,7 +603,7 @@ def test_poll_bandwidth_usage_not_implemented(self): time.time().AndReturn(21) instance_obj.InstanceList.get_by_host(ctxt, 'fake-mini', - use_slave=True).AndReturn([]) + use_subordinate=True).AndReturn([]) self.compute.driver.get_all_bw_counters([]).AndRaise( NotImplementedError) self.mox.ReplayAll() @@ -5731,9 +5731,9 @@ def test_cleanup_running_deleted_instances_reap(self): self.compute._shutdown_instance(ctxt, inst1, bdms, notify=False).\ AndRaise(test.TestingException) block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(ctxt, - inst1.uuid, use_slave=True).AndReturn(bdms) + inst1.uuid, use_subordinate=True).AndReturn(bdms) block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(ctxt, - inst2.uuid, use_slave=True).AndReturn(bdms) + inst2.uuid, use_subordinate=True).AndReturn(bdms) self.compute._shutdown_instance(ctxt, inst2, bdms, notify=False).\ AndReturn(None) @@ -5838,7 +5838,7 @@ def test_get_instance_nw_info(self): db.instance_get_by_uuid(self.context, fake_inst['uuid'], columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) self.compute.network_api.get_instance_nw_info(self.context, mox.IsA(instance_obj.Instance)).AndReturn(fake_nw_info) @@ -5869,13 +5869,13 @@ def test_heal_instance_info_cache(self): 'get_nw_info': 0, 'expected_instance': None} def fake_instance_get_all_by_host(context, host, - columns_to_join, use_slave=False): + columns_to_join, use_subordinate=False): call_info['get_all_by_host'] += 1 self.assertEqual([], columns_to_join) return instances[:] def fake_instance_get_by_uuid(context, instance_uuid, - columns_to_join, use_slave=False): + columns_to_join, use_subordinate=False): if instance_uuid not in instance_map: raise exception.InstanceNotFound(instance_id=instance_uuid) call_info['get_by_uuid'] += 1 @@ -5884,7 +5884,7 @@ def fake_instance_get_by_uuid(context, instance_uuid, return instance_map[instance_uuid] # NOTE(comstud): Override the stub in setUp() - def fake_get_instance_nw_info(context, instance, use_slave=False): + def fake_get_instance_nw_info(context, instance, use_subordinate=False): # Note that this exception gets caught in compute/manager # and is ignored. However, the below increment of # 'get_nw_info' won't happen, and you'll get an assert @@ -5958,7 +5958,7 @@ def test_poll_rescued_instances(self): unrescued_instances = {'fake_uuid1': False, 'fake_uuid2': False} def fake_instance_get_all_by_filters(context, filters, - columns_to_join, use_slave=False): + columns_to_join, use_subordinate=False): self.assertEqual(columns_to_join, ["system_metadata"]) return instances @@ -6017,7 +6017,7 @@ def test_poll_unconfirmed_resizes(self): migrations.append(fake_mig) def fake_instance_get_by_uuid(context, instance_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertIn('metadata', columns_to_join) self.assertIn('system_metadata', columns_to_join) # raise InstanceNotFound exception for uuid 'noexist' @@ -6028,7 +6028,7 @@ def fake_instance_get_by_uuid(context, instance_uuid, return instance def fake_migration_get_unconfirmed_by_dest_compute(context, - resize_confirm_window, dest_compute, use_slave=False): + resize_confirm_window, dest_compute, use_subordinate=False): self.assertEqual(dest_compute, CONF.host) return migrations @@ -6150,7 +6150,7 @@ def test_instance_build_timeout_mixed_instances(self): sort_dir, marker=None, columns_to_join=[], - use_slave=True, + use_subordinate=True, limit=None) self.assertThat(conductor_instance_update.mock_calls, testtools_matchers.HasLength(len(old_instances))) @@ -6506,7 +6506,7 @@ def test_reclaim_queued_deletes_continue_on_error(self): instance_obj.InstanceList.get_by_filters( ctxt, mox.IgnoreArg(), expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, - use_slave=True + use_subordinate=True ).AndReturn(instances) # The first instance delete fails. @@ -6547,12 +6547,12 @@ def test_sync_power_states(self): {'state': power_state.RUNNING}) self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(), power_state.RUNNING, - use_slave=True) + use_subordinate=True) self.compute.driver.get_info(mox.IgnoreArg()).AndReturn( {'state': power_state.SHUTDOWN}) self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(), power_state.SHUTDOWN, - use_slave=True) + use_subordinate=True) self.mox.ReplayAll() self.compute._sync_power_states(ctxt) @@ -7512,7 +7512,7 @@ def test_get(self): instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault'])) def fake_db_get(_context, _instance_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return exp_instance self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get) @@ -7533,7 +7533,7 @@ def test_get_with_admin_context(self): instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault'])) def fake_db_get(context, instance_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return exp_instance self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get) @@ -9848,7 +9848,7 @@ def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore): fake_driver_add_to_aggregate) self.compute.add_aggregate_host(self.context, host="host", - aggregate=jsonutils.to_primitive(self.aggr), slave_info=None) + aggregate=jsonutils.to_primitive(self.aggr), subordinate_info=None) self.assertTrue(fake_driver_add_to_aggregate.called) def test_remove_aggregate_host(self): @@ -9862,36 +9862,36 @@ def fake_driver_remove_from_aggregate(context, aggregate, host, self.compute.remove_aggregate_host(self.context, aggregate=jsonutils.to_primitive(self.aggr), host="host", - slave_info=None) + subordinate_info=None) self.assertTrue(fake_driver_remove_from_aggregate.called) - def test_add_aggregate_host_passes_slave_info_to_driver(self): + def test_add_aggregate_host_passes_subordinate_info_to_driver(self): def driver_add_to_aggregate(context, aggregate, host, **kwargs): self.assertEqual(self.context, context) self.assertEqual(aggregate['id'], self.aggr['id']) self.assertEqual(host, "the_host") - self.assertEqual("SLAVE_INFO", kwargs.get("slave_info")) + self.assertEqual("SLAVE_INFO", kwargs.get("subordinate_info")) self.stubs.Set(self.compute.driver, "add_to_aggregate", driver_add_to_aggregate) self.compute.add_aggregate_host(self.context, host="the_host", - slave_info="SLAVE_INFO", + subordinate_info="SLAVE_INFO", aggregate=jsonutils.to_primitive(self.aggr)) - def test_remove_from_aggregate_passes_slave_info_to_driver(self): + def test_remove_from_aggregate_passes_subordinate_info_to_driver(self): def driver_remove_from_aggregate(context, aggregate, host, **kwargs): self.assertEqual(self.context, context) self.assertEqual(aggregate['id'], self.aggr['id']) self.assertEqual(host, "the_host") - self.assertEqual("SLAVE_INFO", kwargs.get("slave_info")) + self.assertEqual("SLAVE_INFO", kwargs.get("subordinate_info")) self.stubs.Set(self.compute.driver, "remove_from_aggregate", driver_remove_from_aggregate) self.compute.remove_aggregate_host(self.context, aggregate=jsonutils.to_primitive(self.aggr), host="the_host", - slave_info="SLAVE_INFO") + subordinate_info="SLAVE_INFO") class ComputePolicyTestCase(BaseTestCase): diff --git a/nova/nova/tests/compute/test_compute_api.py b/nova/nova/tests/compute/test_compute_api.py index e46567f..ab44607 100644 --- a/nova/nova/tests/compute/test_compute_api.py +++ b/nova/nova/tests/compute/test_compute_api.py @@ -699,7 +699,7 @@ def test_delete_fast_if_host_not_set(self): db.block_device_mapping_get_all_by_instance(self.context, inst.uuid, - use_slave=False).AndReturn([]) + use_subordinate=False).AndReturn([]) inst.save() self.compute_api._create_reservations(self.context, inst, inst.instance_type_id, @@ -803,7 +803,7 @@ def test_delete_soft_rollback(self): timeutils.set_time_override(delete_time) db.block_device_mapping_get_all_by_instance( - self.context, inst.uuid, use_slave=False).AndReturn([]) + self.context, inst.uuid, use_subordinate=False).AndReturn([]) inst.save().AndRaise(test.TestingException) self.mox.ReplayAll() @@ -1573,7 +1573,7 @@ def test_snapshot_volume_backed(self): 'is_public': False } - def fake_get_all_by_instance(context, instance, use_slave=False): + def fake_get_all_by_instance(context, instance, use_subordinate=False): return copy.deepcopy(instance_bdms) def fake_image_create(context, image_meta, data): diff --git a/nova/nova/tests/compute/test_compute_mgr.py b/nova/nova/tests/compute/test_compute_mgr.py index 0d3f78c..886da10 100644 --- a/nova/nova/tests/compute/test_compute_mgr.py +++ b/nova/nova/tests/compute/test_compute_mgr.py @@ -166,7 +166,7 @@ def _do_mock_calls(defer_iptables_apply): context.get_admin_context().AndReturn(fake_context) db.instance_get_all_by_host( fake_context, our_host, columns_to_join=['info_cache'], - use_slave=False + use_subordinate=False ).AndReturn(startup_instances) if defer_iptables_apply: self.compute.driver.filter_defer_apply_on() @@ -249,7 +249,7 @@ def test_init_host_with_deleted_migration(self): context.get_admin_context().AndReturn(fake_context) db.instance_get_all_by_host(fake_context, our_host, columns_to_join=['info_cache'], - use_slave=False + use_subordinate=False ).AndReturn([]) self.compute.init_virt_events() @@ -610,7 +610,7 @@ def test_get_instances_on_driver(self): inst in driver_instances]}, 'created_at', 'desc', columns_to_join=None, limit=None, marker=None, - use_slave=True).AndReturn( + use_subordinate=True).AndReturn( driver_instances) self.mox.ReplayAll() @@ -651,7 +651,7 @@ def test_get_instances_on_driver_fallback(self): fake_context, filters, 'created_at', 'desc', columns_to_join=None, limit=None, marker=None, - use_slave=True).AndReturn(all_instances) + use_subordinate=True).AndReturn(all_instances) self.mox.ReplayAll() @@ -693,7 +693,7 @@ def _get_sync_instance(self, power_state, vm_state, task_state=None): def test_sync_instance_power_state_match(self): instance = self._get_sync_instance(power_state.RUNNING, vm_states.ACTIVE) - instance.refresh(use_slave=False) + instance.refresh(use_subordinate=False) self.mox.ReplayAll() self.compute._sync_instance_power_state(self.context, instance, power_state.RUNNING) @@ -701,7 +701,7 @@ def test_sync_instance_power_state_match(self): def test_sync_instance_power_state_running_stopped(self): instance = self._get_sync_instance(power_state.RUNNING, vm_states.ACTIVE) - instance.refresh(use_slave=False) + instance.refresh(use_subordinate=False) instance.save() self.mox.ReplayAll() self.compute._sync_instance_power_state(self.context, instance, @@ -711,7 +711,7 @@ def test_sync_instance_power_state_running_stopped(self): def _test_sync_to_stop(self, power_state, vm_state, driver_power_state, stop=True, force=False): instance = self._get_sync_instance(power_state, vm_state) - instance.refresh(use_slave=False) + instance.refresh(use_subordinate=False) instance.save() self.mox.StubOutWithMock(self.compute.compute_api, 'stop') self.mox.StubOutWithMock(self.compute.compute_api, 'force_stop') diff --git a/nova/nova/tests/compute/test_compute_utils.py b/nova/nova/tests/compute/test_compute_utils.py index 2304e95..85e9f4c 100644 --- a/nova/nova/tests/compute/test_compute_utils.py +++ b/nova/nova/tests/compute/test_compute_utils.py @@ -73,7 +73,7 @@ def setUp(self): self.data = [] self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', - lambda context, instance, use_slave=False: self.data) + lambda context, instance, use_subordinate=False: self.data) def _update_flavor(self, flavor_info): self.flavor = { diff --git a/nova/nova/tests/compute/test_compute_xen.py b/nova/nova/tests/compute/test_compute_xen.py index 52cff50..34ca996 100644 --- a/nova/nova/tests/compute/test_compute_xen.py +++ b/nova/nova/tests/compute/test_compute_xen.py @@ -51,7 +51,7 @@ def test_sync_power_states_instance_not_found(self): self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state') instance_obj.InstanceList.get_by_host(ctxt, - self.compute.host, use_slave=True).AndReturn(instance_list) + self.compute.host, use_subordinate=True).AndReturn(instance_list) self.compute.driver.get_num_instances().AndReturn(1) vm_utils.lookup(self.compute.driver._session, instance['name'], False).AndReturn(None) diff --git a/nova/nova/tests/compute/test_rpcapi.py b/nova/nova/tests/compute/test_rpcapi.py index d4026ea..3e531e7 100644 --- a/nova/nova/tests/compute/test_rpcapi.py +++ b/nova/nova/tests/compute/test_rpcapi.py @@ -102,13 +102,13 @@ def _test_compute_api(self, method, rpc_method, **kwargs): def test_add_aggregate_host(self): self._test_compute_api('add_aggregate_host', 'cast', aggregate={'id': 'fake_id'}, host_param='host', host='host', - slave_info={}) + subordinate_info={}) # NOTE(russellb) Havana compat self.flags(compute='havana', group='upgrade_levels') self._test_compute_api('add_aggregate_host', 'cast', aggregate={'id': 'fake_id'}, host_param='host', host='host', - slave_info={}, version='2.14') + subordinate_info={}, version='2.14') def test_add_fixed_ip_to_instance(self): self._test_compute_api('add_fixed_ip_to_instance', 'cast', @@ -538,13 +538,13 @@ def test_refresh_security_group_members(self): def test_remove_aggregate_host(self): self._test_compute_api('remove_aggregate_host', 'cast', aggregate={'id': 'fake_id'}, host_param='host', host='host', - slave_info={}) + subordinate_info={}) # NOTE(russellb) Havana compat self.flags(compute='havana', group='upgrade_levels') self._test_compute_api('remove_aggregate_host', 'cast', aggregate={'id': 'fake_id'}, host_param='host', host='host', - slave_info={}, version='2.15') + subordinate_info={}, version='2.15') def test_remove_fixed_ip_from_instance(self): self._test_compute_api('remove_fixed_ip_from_instance', 'cast', diff --git a/nova/nova/tests/conductor/test_conductor.py b/nova/nova/tests/conductor/test_conductor.py index dff74a7..a1d4579 100644 --- a/nova/nova/tests/conductor/test_conductor.py +++ b/nova/nova/tests/conductor/test_conductor.py @@ -616,21 +616,21 @@ def test_instance_get_all_by_filters(self): self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') db.instance_get_all_by_filters(self.context, filters, 'fake-key', 'fake-sort', - columns_to_join=None, use_slave=False) + columns_to_join=None, use_subordinate=False) self.mox.ReplayAll() self.conductor.instance_get_all_by_filters(self.context, filters, 'fake-key', 'fake-sort') - def test_instance_get_all_by_filters_use_slave(self): + def test_instance_get_all_by_filters_use_subordinate(self): filters = {'foo': 'bar'} self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') db.instance_get_all_by_filters(self.context, filters, 'fake-key', 'fake-sort', - columns_to_join=None, use_slave=True) + columns_to_join=None, use_subordinate=True) self.mox.ReplayAll() self.conductor.instance_get_all_by_filters(self.context, filters, 'fake-key', 'fake-sort', - use_slave=True) + use_subordinate=True) def test_instance_get_all_by_host(self): self.mox.StubOutWithMock(db, 'instance_get_all_by_host') @@ -898,21 +898,21 @@ def test_instance_get_all_by_filters(self): self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') db.instance_get_all_by_filters(self.context, filters, 'fake-key', 'fake-sort', - columns_to_join=None, use_slave=False) + columns_to_join=None, use_subordinate=False) self.mox.ReplayAll() self.conductor.instance_get_all_by_filters(self.context, filters, 'fake-key', 'fake-sort') - def test_instance_get_all_by_filters_use_slave(self): + def test_instance_get_all_by_filters_use_subordinate(self): filters = {'foo': 'bar'} self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') db.instance_get_all_by_filters(self.context, filters, 'fake-key', 'fake-sort', - columns_to_join=None, use_slave=True) + columns_to_join=None, use_subordinate=True) self.mox.ReplayAll() self.conductor.instance_get_all_by_filters(self.context, filters, 'fake-key', 'fake-sort', - use_slave=True) + use_subordinate=True) def _test_stubbed(self, name, dbargs, condargs, db_result_listified=False, db_exception=None): diff --git a/nova/nova/tests/db/test_db_api.py b/nova/nova/tests/db/test_db_api.py index a573753..d018b49 100644 --- a/nova/nova/tests/db/test_db_api.py +++ b/nova/nova/tests/db/test_db_api.py @@ -1215,8 +1215,8 @@ def test_security_group_get_no_instances(self): session = get_session() self.mox.StubOutWithMock(sqlalchemy_api, 'get_session') - sqlalchemy_api.get_session(use_slave=False).AndReturn(session) - sqlalchemy_api.get_session(use_slave=False).AndReturn(session) + sqlalchemy_api.get_session(use_subordinate=False).AndReturn(session) + sqlalchemy_api.get_session(use_subordinate=False).AndReturn(session) self.mox.ReplayAll() security_group = db.security_group_get(self.ctxt, sid, diff --git a/nova/nova/tests/integrated/test_api_samples.py b/nova/nova/tests/integrated/test_api_samples.py index 3098aff..bce0c9b 100644 --- a/nova/nova/tests/integrated/test_api_samples.py +++ b/nova/nova/tests/integrated/test_api_samples.py @@ -3822,7 +3822,7 @@ class VolumeAttachmentsSampleBase(ServersSampleBase): def _stub_db_bdms_get_all_by_instance(self, server_id): def fake_bdms_get_all_by_instance(context, instance_uuid, - use_slave=False): + use_subordinate=False): bdms = [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803', diff --git a/nova/nova/tests/integrated/v3/test_extended_volumes.py b/nova/nova/tests/integrated/v3/test_extended_volumes.py index 22e0479..30a666e 100644 --- a/nova/nova/tests/integrated/v3/test_extended_volumes.py +++ b/nova/nova/tests/integrated/v3/test_extended_volumes.py @@ -31,7 +31,7 @@ class ExtendedVolumesSampleJsonTests(test_servers.ServersSampleBase): def _stub_compute_api_get_instance_bdms(self, server_id): def fake_bdms_get_all_by_instance(context, instance_uuid, - use_slave=False): + use_subordinate=False): bdms = [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803', diff --git a/nova/nova/tests/network/test_linux_net.py b/nova/nova/tests/network/test_linux_net.py index 9ab75fe..7f07e92 100644 --- a/nova/nova/tests/network/test_linux_net.py +++ b/nova/nova/tests/network/test_linux_net.py @@ -259,7 +259,7 @@ def setUp(self): self.context = context.RequestContext('testuser', 'testproject', is_admin=True) - def get_vifs(_context, instance_uuid, use_slave): + def get_vifs(_context, instance_uuid, use_subordinate): return [vif for vif in vifs if vif['instance_uuid'] == instance_uuid] diff --git a/nova/nova/tests/network/test_manager.py b/nova/nova/tests/network/test_manager.py index f7f3f58..21d1b8f 100644 --- a/nova/nova/tests/network/test_manager.py +++ b/nova/nova/tests/network/test_manager.py @@ -420,7 +420,7 @@ def test_add_fixed_ip_instance_using_id_without_vpn(self, reserve): inst = fake_inst(display_name=HOST, uuid=FAKEUUID) db.instance_get_by_uuid(self.context, - mox.IgnoreArg(), use_slave=False, + mox.IgnoreArg(), use_subordinate=False, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(inst) @@ -473,7 +473,7 @@ def test_add_fixed_ip_instance_using_uuid_without_vpn(self, reserve): inst = fake_inst(display_name=HOST, uuid=FAKEUUID) db.instance_get_by_uuid(self.context, - mox.IgnoreArg(), use_slave=False, + mox.IgnoreArg(), use_subordinate=False, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(inst) @@ -570,7 +570,7 @@ def test_instance_dns(self, reserve): inst = fake_inst(display_name=HOST, uuid=FAKEUUID) db.instance_get_by_uuid(self.context, - mox.IgnoreArg(), use_slave=False, + mox.IgnoreArg(), use_subordinate=False, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(inst) @@ -703,7 +703,7 @@ def test_vpn_allocate_fixed_ip(self): db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0]) db.instance_get_by_uuid(mox.IgnoreArg(), - mox.IgnoreArg(), use_slave=False, + mox.IgnoreArg(), use_subordinate=False, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst(display_name=HOST, @@ -751,7 +751,7 @@ def test_allocate_fixed_ip(self): db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0]) db.instance_get_by_uuid(mox.IgnoreArg(), - mox.IgnoreArg(), use_slave=False, + mox.IgnoreArg(), use_subordinate=False, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst(display_name=HOST, @@ -1318,7 +1318,7 @@ def test_add_fixed_ip_instance_without_vpn_requested_networks(self): ).AndReturn(dict(test_network.fake_network, **networks[0])) db.instance_get_by_uuid(mox.IgnoreArg(), - mox.IgnoreArg(), use_slave=False, + mox.IgnoreArg(), use_subordinate=False, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst(display_name=HOST, @@ -1602,7 +1602,7 @@ def test_get_instance_nw_info_client_exceptions(self): 'virtual_interface_get_by_instance') manager.db.virtual_interface_get_by_instance( self.context, FAKEUUID, - use_slave=False).AndRaise(exception.InstanceNotFound( + use_subordinate=False).AndRaise(exception.InstanceNotFound( instance_id=FAKEUUID)) self.mox.ReplayAll() self.assertRaises(messaging.ExpectedException, diff --git a/nova/nova/tests/objects/test_instance.py b/nova/nova/tests/objects/test_instance.py index 2d35fd9..24886e6 100644 --- a/nova/nova/tests/objects/test_instance.py +++ b/nova/nova/tests/objects/test_instance.py @@ -104,7 +104,7 @@ def test_get_without_expected(self): self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(self.context, 'uuid', columns_to_join=[], - use_slave=False + use_subordinate=False ).AndReturn(self.fake_instance) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, 'uuid', @@ -123,7 +123,7 @@ def test_get_with_expected(self): db.instance_get_by_uuid( self.context, 'uuid', columns_to_join=exp_cols, - use_slave=False + use_subordinate=False ).AndReturn(self.fake_instance) fake_faults = test_instance_fault.fake_faults db.instance_fault_get_by_instance_uuids( @@ -155,13 +155,13 @@ def test_load(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(self.fake_instance) fake_inst2 = dict(self.fake_instance, system_metadata=[{'key': 'foo', 'value': 'bar'}]) db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['system_metadata'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst2) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, fake_uuid) @@ -186,7 +186,7 @@ def test_get_remote(self): db.instance_get_by_uuid(self.context, 'fake-uuid', columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_instance) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, 'fake-uuid') @@ -204,13 +204,13 @@ def test_refresh(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(dict(self.fake_instance, host='orig-host')) db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(dict(self.fake_instance, host='new-host')) self.mox.StubOutWithMock(instance_info_cache.InstanceInfoCache, @@ -232,7 +232,7 @@ def test_refresh_does_not_recurse(self): self.mox.StubOutWithMock(instance.Instance, 'get_by_uuid') instance.Instance.get_by_uuid(self.context, uuid=inst.uuid, expected_attrs=['metadata'], - use_slave=False + use_subordinate=False ).AndReturn(inst_copy) self.mox.ReplayAll() self.assertRaises(exception.OrphanedObjectError, inst.refresh) @@ -279,7 +279,7 @@ def _save_test_helper(self, cell_type, save_kwargs): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(old_ref) db.instance_update_and_get_original( self.context, fake_uuid, expected_updates, @@ -359,7 +359,7 @@ def test_save_rename_sends_notification(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(old_ref) db.instance_update_and_get_original( self.context, fake_uuid, expected_updates, update_cells=False, @@ -372,7 +372,7 @@ def test_save_rename_sends_notification(self): self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, old_ref['uuid'], - use_slave=False) + use_subordinate=False) self.assertEqual('hello', inst.display_name) inst.display_name = 'goodbye' inst.save() @@ -386,7 +386,7 @@ def test_get_deleted(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, fake_uuid) @@ -400,7 +400,7 @@ def test_get_not_cleaned(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, fake_uuid) @@ -414,7 +414,7 @@ def test_get_cleaned(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, fake_uuid) @@ -438,7 +438,7 @@ def test_with_info_cache(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) db.instance_info_cache_update(self.context, fake_uuid, {'network_info': nwinfo2_json}) @@ -455,7 +455,7 @@ def test_with_info_cache_none(self): self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, fake_uuid, @@ -481,7 +481,7 @@ def test_with_security_groups(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) db.security_group_update(self.context, 1, {'description': 'changed'} ).AndReturn(fake_inst['security_groups'][0]) @@ -506,7 +506,7 @@ def test_with_empty_security_groups(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, fake_uuid) @@ -518,7 +518,7 @@ def test_with_empty_pci_devices(self): self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['pci_devices'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, fake_uuid, @@ -564,7 +564,7 @@ def test_with_pci_devices(self): self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['pci_devices'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, fake_uuid, @@ -582,7 +582,7 @@ def test_with_fault(self): self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=[], - use_slave=False + use_subordinate=False ).AndReturn(self.fake_instance) db.instance_fault_get_by_instance_uuids( self.context, [fake_uuid]).AndReturn({fake_uuid: fake_faults}) @@ -874,11 +874,11 @@ def test_get_all_by_filters(self): db.instance_get_all_by_filters(self.context, {'foo': 'bar'}, 'uuid', 'asc', limit=None, marker=None, columns_to_join=['metadata'], - use_slave=False).AndReturn(fakes) + use_subordinate=False).AndReturn(fakes) self.mox.ReplayAll() inst_list = instance.InstanceList.get_by_filters( self.context, {'foo': 'bar'}, 'uuid', 'asc', - expected_attrs=['metadata'], use_slave=False) + expected_attrs=['metadata'], use_subordinate=False) for i in range(0, len(fakes)): self.assertIsInstance(inst_list.objects[i], instance.Instance) @@ -895,12 +895,12 @@ def test_get_all_by_filters_works_for_cleaned(self): {'deleted': True, 'cleaned': False}, 'uuid', 'asc', limit=None, marker=None, columns_to_join=['metadata'], - use_slave=False).AndReturn( + use_subordinate=False).AndReturn( [fakes[1]]) self.mox.ReplayAll() inst_list = instance.InstanceList.get_by_filters( self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc', - expected_attrs=['metadata'], use_slave=False) + expected_attrs=['metadata'], use_subordinate=False) self.assertEqual(1, len(inst_list)) self.assertIsInstance(inst_list.objects[0], instance.Instance) @@ -913,7 +913,7 @@ def test_get_by_host(self): self.mox.StubOutWithMock(db, 'instance_get_all_by_host') db.instance_get_all_by_host(self.context, 'foo', columns_to_join=None, - use_slave=False).AndReturn(fakes) + use_subordinate=False).AndReturn(fakes) self.mox.ReplayAll() inst_list = instance.InstanceList.get_by_host(self.context, 'foo') for i in range(0, len(fakes)): @@ -1001,7 +1001,7 @@ def test_with_fault(self): self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids') db.instance_get_all_by_host(self.context, 'host', columns_to_join=[], - use_slave=False + use_subordinate=False ).AndReturn(fake_insts) db.instance_fault_get_by_instance_uuids( self.context, [x['uuid'] for x in fake_insts] @@ -1009,7 +1009,7 @@ def test_with_fault(self): self.mox.ReplayAll() instances = instance.InstanceList.get_by_host(self.context, 'host', expected_attrs=['fault'], - use_slave=False) + use_subordinate=False) self.assertEqual(2, len(instances)) self.assertEqual(fake_faults['fake-uuid'][0], dict(instances[0].fault.iteritems())) diff --git a/nova/nova/tests/objects/test_migration.py b/nova/nova/tests/objects/test_migration.py index 23e1b85..ed4b4ea 100644 --- a/nova/nova/tests/objects/test_migration.py +++ b/nova/nova/tests/objects/test_migration.py @@ -116,7 +116,7 @@ def test_instance(self): db.instance_get_by_uuid(ctxt, fake_migration['instance_uuid'], columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) mig = migration.Migration._from_db_object(ctxt, migration.Migration(), @@ -133,11 +133,11 @@ def test_get_unconfirmed_by_dest_compute(self): db, 'migration_get_unconfirmed_by_dest_compute') db.migration_get_unconfirmed_by_dest_compute( ctxt, 'window', 'foo', - use_slave=False).AndReturn(db_migrations) + use_subordinate=False).AndReturn(db_migrations) self.mox.ReplayAll() migrations = ( migration.MigrationList.get_unconfirmed_by_dest_compute( - ctxt, 'window', 'foo', use_slave=False)) + ctxt, 'window', 'foo', use_subordinate=False)) self.assertEqual(2, len(migrations)) for index, db_migration in enumerate(db_migrations): self.compare_obj(migrations[index], db_migration) diff --git a/nova/nova/tests/test_metadata.py b/nova/nova/tests/test_metadata.py index fc56c10..26b1983 100644 --- a/nova/nova/tests/test_metadata.py +++ b/nova/nova/tests/test_metadata.py @@ -202,7 +202,7 @@ def test_format_instance_mapping(self): 'default_ephemeral_device': None, 'default_swap_device': None}) - def fake_bdm_get(ctxt, uuid, use_slave=False): + def fake_bdm_get(ctxt, uuid, use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 87654321, 'snapshot_id': None, diff --git a/nova/nova/tests/virt/libvirt/test_libvirt.py b/nova/nova/tests/virt/libvirt/test_libvirt.py index 61b5fb3..341824a 100644 --- a/nova/nova/tests/virt/libvirt/test_libvirt.py +++ b/nova/nova/tests/virt/libvirt/test_libvirt.py @@ -5173,7 +5173,7 @@ def _test_destroy_removes_disk(self, volume_fail=False): db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(instance) self.mox.StubOutWithMock(driver, "block_device_info_get_mapping") driver.block_device_info_get_mapping(vol @@ -5280,7 +5280,7 @@ def test_delete_instance_files(self): db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(instance) os.path.exists(mox.IgnoreArg()).AndReturn(False) os.path.exists(mox.IgnoreArg()).AndReturn(True) diff --git a/nova/nova/tests/virt/xenapi/test_xenapi.py b/nova/nova/tests/virt/xenapi/test_xenapi.py index a546ad2..7442111 100644 --- a/nova/nova/tests/virt/xenapi/test_xenapi.py +++ b/nova/nova/tests/virt/xenapi/test_xenapi.py @@ -1444,7 +1444,7 @@ def test_uuid_find(self): fake_inst2 = fake_instance.fake_db_instance(id=456) db.instance_get_all_by_host(self.context, fake_inst['host'], columns_to_join=None, - use_slave=False + use_subordinate=False ).AndReturn([fake_inst, fake_inst2]) self.mox.ReplayAll() expected_name = CONF.instance_name_template % fake_inst['id'] @@ -1461,7 +1461,7 @@ def fake_aggregate_get_by_host(self, *args, **kwargs): self.stubs.Set(db, "aggregate_get_by_host", fake_aggregate_get_by_host) - self.stubs.Set(self.conn._session, "is_slave", True) + self.stubs.Set(self.conn._session, "is_subordinate", True) self.assertRaises(test.TestingException, self.conn._session._get_host_uuid) @@ -2890,7 +2890,7 @@ def setUp(self): pool_states.POOL_FLAG: 'XenAPI'}} self.aggr = db.aggregate_create(self.context, values) self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI', - 'master_compute': 'host', + 'main_compute': 'host', 'availability_zone': 'fake_zone', pool_states.KEY: pool_states.ACTIVE, 'host': xenapi_fake.get_record('host', @@ -2900,18 +2900,18 @@ def test_pool_add_to_aggregate_called_by_driver(self): calls = [] - def pool_add_to_aggregate(context, aggregate, host, slave_info=None): + def pool_add_to_aggregate(context, aggregate, host, subordinate_info=None): self.assertEqual("CONTEXT", context) self.assertEqual("AGGREGATE", aggregate) self.assertEqual("HOST", host) - self.assertEqual("SLAVEINFO", slave_info) + self.assertEqual("SLAVEINFO", subordinate_info) calls.append(pool_add_to_aggregate) self.stubs.Set(self.conn._pool, "add_to_aggregate", pool_add_to_aggregate) self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST", - slave_info="SLAVEINFO") + subordinate_info="SLAVEINFO") self.assertIn(pool_add_to_aggregate, calls) @@ -2920,18 +2920,18 @@ def test_pool_remove_from_aggregate_called_by_driver(self): calls = [] def pool_remove_from_aggregate(context, aggregate, host, - slave_info=None): + subordinate_info=None): self.assertEqual("CONTEXT", context) self.assertEqual("AGGREGATE", aggregate) self.assertEqual("HOST", host) - self.assertEqual("SLAVEINFO", slave_info) + self.assertEqual("SLAVEINFO", subordinate_info) calls.append(pool_remove_from_aggregate) self.stubs.Set(self.conn._pool, "remove_from_aggregate", pool_remove_from_aggregate) self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST", - slave_info="SLAVEINFO") + subordinate_info="SLAVEINFO") self.assertIn(pool_remove_from_aggregate, calls) @@ -2947,11 +2947,11 @@ def fake_init_pool(id, name): self.assertThat(self.fake_metadata, matchers.DictMatches(result['metadetails'])) - def test_join_slave(self): - # Ensure join_slave gets called when the request gets to master. - def fake_join_slave(id, compute_uuid, host, url, user, password): - fake_join_slave.called = True - self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave) + def test_join_subordinate(self): + # Ensure join_subordinate gets called when the request gets to main. + def fake_join_subordinate(id, compute_uuid, host, url, user, password): + fake_join_subordinate.called = True + self.stubs.Set(self.conn._pool, "_join_subordinate", fake_join_subordinate) aggregate = self._aggregate_setup(hosts=['host', 'host2'], metadata=self.fake_metadata) @@ -2961,7 +2961,7 @@ def fake_join_slave(id, compute_uuid, host, url, user, password): user='fake_user', passwd='fake_pass', xenhost_uuid='fake_uuid')) - self.assertTrue(fake_join_slave.called) + self.assertTrue(fake_join_subordinate.called) def test_add_to_aggregate_first_host(self): def fake_pool_set_name_label(self, session, pool_ref, name): @@ -3001,19 +3001,19 @@ def test_remove_from_empty_aggregate(self): self.conn._pool.remove_from_aggregate, self.context, result, "test_host") - def test_remove_slave(self): - # Ensure eject slave gets called. - def fake_eject_slave(id, compute_uuid, host_uuid): - fake_eject_slave.called = True - self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave) + def test_remove_subordinate(self): + # Ensure eject subordinate gets called. + def fake_eject_subordinate(id, compute_uuid, host_uuid): + fake_eject_subordinate.called = True + self.stubs.Set(self.conn._pool, "_eject_subordinate", fake_eject_subordinate) self.fake_metadata['host2'] = 'fake_host2_uuid' aggregate = self._aggregate_setup(hosts=['host', 'host2'], metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE) self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2") - self.assertTrue(fake_eject_slave.called) + self.assertTrue(fake_eject_subordinate.called) - def test_remove_master_solo(self): + def test_remove_main_solo(self): # Ensure metadata are cleared after removal. def fake_clear_pool(id): fake_clear_pool.called = True @@ -3028,8 +3028,8 @@ def fake_clear_pool(id): pool_states.KEY: pool_states.ACTIVE}, matchers.DictMatches(result['metadetails'])) - def test_remote_master_non_empty_pool(self): - # Ensure AggregateError is raised if removing the master. + def test_remote_main_non_empty_pool(self): + # Ensure AggregateError is raised if removing the main. aggregate = self._aggregate_setup(hosts=['host', 'host2'], metadata=self.fake_metadata) @@ -3137,7 +3137,7 @@ def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore): self.compute.add_aggregate_host, self.context, host="fake_host", aggregate=jsonutils.to_primitive(self.aggr), - slave_info=None) + subordinate_info=None) excepted = db.aggregate_get(self.context, self.aggr['id']) self.assertEqual(excepted['metadetails'][pool_states.KEY], pool_states.ERROR) @@ -3149,16 +3149,16 @@ def __init__(self): self._mock_calls = [] def add_aggregate_host(self, ctxt, aggregate, - host_param, host, slave_info): + host_param, host, subordinate_info): self._mock_calls.append(( self.add_aggregate_host, ctxt, aggregate, - host_param, host, slave_info)) + host_param, host, subordinate_info)) def remove_aggregate_host(self, ctxt, aggregate_id, host_param, - host, slave_info): + host, subordinate_info): self._mock_calls.append(( self.remove_aggregate_host, ctxt, aggregate_id, - host_param, host, slave_info)) + host_param, host, subordinate_info)) class StubDependencies(object): @@ -3173,10 +3173,10 @@ def _is_hv_pool(self, *_ignore): def _get_metadata(self, *_ignore): return { pool_states.KEY: {}, - 'master_compute': 'master' + 'main_compute': 'main' } - def _create_slave_info(self, *ignore): + def _create_subordinate_info(self, *ignore): return "SLAVE_INFO" @@ -3190,32 +3190,32 @@ class HypervisorPoolTestCase(test.NoDBTestCase): 'id': 98, 'hosts': [], 'metadata': { - 'master_compute': 'master', + 'main_compute': 'main', pool_states.POOL_FLAG: {}, pool_states.KEY: {} } } - def test_slave_asks_master_to_add_slave_to_pool(self): - slave = ResourcePoolWithStubs() + def test_subordinate_asks_main_to_add_subordinate_to_pool(self): + subordinate = ResourcePoolWithStubs() - slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave") + subordinate.add_to_aggregate("CONTEXT", self.fake_aggregate, "subordinate") self.assertIn( - (slave.compute_rpcapi.add_aggregate_host, + (subordinate.compute_rpcapi.add_aggregate_host, "CONTEXT", jsonutils.to_primitive(self.fake_aggregate), - "slave", "master", "SLAVE_INFO"), - slave.compute_rpcapi._mock_calls) + "subordinate", "main", "SLAVE_INFO"), + subordinate.compute_rpcapi._mock_calls) - def test_slave_asks_master_to_remove_slave_from_pool(self): - slave = ResourcePoolWithStubs() + def test_subordinate_asks_main_to_remove_subordinate_from_pool(self): + subordinate = ResourcePoolWithStubs() - slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave") + subordinate.remove_from_aggregate("CONTEXT", self.fake_aggregate, "subordinate") self.assertIn( - (slave.compute_rpcapi.remove_aggregate_host, - "CONTEXT", 98, "slave", "master", "SLAVE_INFO"), - slave.compute_rpcapi._mock_calls) + (subordinate.compute_rpcapi.remove_aggregate_host, + "CONTEXT", 98, "subordinate", "main", "SLAVE_INFO"), + subordinate.compute_rpcapi._mock_calls) class SwapXapiHostTestCase(test.NoDBTestCase): diff --git a/nova/nova/virt/xenapi/client/session.py b/nova/nova/virt/xenapi/client/session.py index 23cb988..d0d237b 100644 --- a/nova/nova/virt/xenapi/client/session.py +++ b/nova/nova/virt/xenapi/client/session.py @@ -79,7 +79,7 @@ def __init__(self, url, user, pw): import XenAPI self.XenAPI = XenAPI self._sessions = queue.Queue() - self.is_slave = False + self.is_subordinate = False exception = self.XenAPI.Failure(_("Unable to log in to XenAPI " "(is the Dom0 disk full?)")) url = self._create_first_session(url, user, pw, exception) @@ -109,13 +109,13 @@ def _create_first_session(self, url, user, pw, exception): with timeout.Timeout(CONF.xenserver.login_timeout, exception): session.login_with_password(user, pw) except self.XenAPI.Failure as e: - # if user and pw of the master are different, we're doomed! + # if user and pw of the main are different, we're doomed! if e.details[0] == 'HOST_IS_SLAVE': - master = e.details[1] - url = pool.swap_xapi_host(url, master) + main = e.details[1] + url = pool.swap_xapi_host(url, main) session = self.XenAPI.Session(url) session.login_with_password(user, pw) - self.is_slave = True + self.is_subordinate = True else: raise self._sessions.put(session) @@ -129,7 +129,7 @@ def _populate_session_pool(self, url, user, pw, exception): self._sessions.put(session) def _get_host_uuid(self): - if self.is_slave: + if self.is_subordinate: aggr = aggregate_obj.AggregateList.get_by_host( context.get_admin_context(), CONF.host, key=pool_states.POOL_FLAG)[0] diff --git a/nova/nova/virt/xenapi/fake.py b/nova/nova/virt/xenapi/fake.py index 506d49c..080478e 100644 --- a/nova/nova/virt/xenapi/fake.py +++ b/nova/nova/virt/xenapi/fake.py @@ -114,7 +114,7 @@ def create_host(name_label, hostname='fake_name', address='fake_addr'): # Create a pool if we don't have one already if len(_db_content['pool']) == 0: pool_ref = _create_pool('') - _db_content['pool'][pool_ref]['master'] = host_ref + _db_content['pool'][pool_ref]['main'] = host_ref _db_content['pool'][pool_ref]['default-SR'] = host_default_sr_ref _db_content['pool'][pool_ref]['suspend-image-SR'] = host_default_sr_ref @@ -833,7 +833,7 @@ def __getattr__(self, name): return self._session elif name == 'xenapi': return _Dispatcher(self.xenapi_request, None) - elif name.startswith('login') or name.startswith('slave_local'): + elif name.startswith('login') or name.startswith('subordinate_local'): return lambda *params: self._login(name, params) elif name.startswith('Async'): return lambda *params: self._async(name, params) diff --git a/nova/nova/virt/xenapi/pool.py b/nova/nova/virt/xenapi/pool.py index 80b5c08..50c40ce 100644 --- a/nova/nova/virt/xenapi/pool.py +++ b/nova/nova/virt/xenapi/pool.py @@ -68,7 +68,7 @@ def undo_aggregate_operation(self, context, op, aggregate, 'during operation on %(host)s'), {'aggregate_id': aggregate['id'], 'host': host}) - def add_to_aggregate(self, context, aggregate, host, slave_info=None): + def add_to_aggregate(self, context, aggregate, host, subordinate_info=None): """Add a compute host to an aggregate.""" if not pool_states.is_hv_pool(aggregate['metadata']): return @@ -86,38 +86,38 @@ def add_to_aggregate(self, context, aggregate, host, slave_info=None): if (aggregate['metadata'][pool_states.KEY] == pool_states.CREATED): aggregate.update_metadata({pool_states.KEY: pool_states.CHANGING}) if len(aggregate['hosts']) == 1: - # this is the first host of the pool -> make it master + # this is the first host of the pool -> make it main self._init_pool(aggregate['id'], aggregate['name']) - # save metadata so that we can find the master again - metadata = {'master_compute': host, + # save metadata so that we can find the main again + metadata = {'main_compute': host, host: self._host_uuid, pool_states.KEY: pool_states.ACTIVE} aggregate.update_metadata(metadata) else: # the pool is already up and running, we need to figure out # whether we can serve the request from this host or not. - master_compute = aggregate['metadata']['master_compute'] - if master_compute == CONF.host and master_compute != host: - # this is the master -> do a pool-join - # To this aim, nova compute on the slave has to go down. + main_compute = aggregate['metadata']['main_compute'] + if main_compute == CONF.host and main_compute != host: + # this is the main -> do a pool-join + # To this aim, nova compute on the subordinate has to go down. # NOTE: it is assumed that ONLY nova compute is running now - self._join_slave(aggregate['id'], host, - slave_info.get('compute_uuid'), - slave_info.get('url'), slave_info.get('user'), - slave_info.get('passwd')) - metadata = {host: slave_info.get('xenhost_uuid'), } + self._join_subordinate(aggregate['id'], host, + subordinate_info.get('compute_uuid'), + subordinate_info.get('url'), subordinate_info.get('user'), + subordinate_info.get('passwd')) + metadata = {host: subordinate_info.get('xenhost_uuid'), } aggregate.update_metadata(metadata) - elif master_compute and master_compute != host: - # send rpc cast to master, asking to add the following + elif main_compute and main_compute != host: + # send rpc cast to main, asking to add the following # host with specified credentials. - slave_info = self._create_slave_info() + subordinate_info = self._create_subordinate_info() self.compute_rpcapi.add_aggregate_host( - context, aggregate, host, master_compute, slave_info) + context, aggregate, host, main_compute, subordinate_info) - def remove_from_aggregate(self, context, aggregate, host, slave_info=None): + def remove_from_aggregate(self, context, aggregate, host, subordinate_info=None): """Remove a compute host from an aggregate.""" - slave_info = slave_info or dict() + subordinate_info = subordinate_info or dict() if not pool_states.is_hv_pool(aggregate['metadata']): return @@ -130,19 +130,19 @@ def remove_from_aggregate(self, context, aggregate, host, slave_info=None): aggregate_id=aggregate['id'], reason=invalid[aggregate['metadata'][pool_states.KEY]]) - master_compute = aggregate['metadata']['master_compute'] - if master_compute == CONF.host and master_compute != host: - # this is the master -> instruct it to eject a host from the pool + main_compute = aggregate['metadata']['main_compute'] + if main_compute == CONF.host and main_compute != host: + # this is the main -> instruct it to eject a host from the pool host_uuid = aggregate['metadata'][host] - self._eject_slave(aggregate['id'], - slave_info.get('compute_uuid'), host_uuid) + self._eject_subordinate(aggregate['id'], + subordinate_info.get('compute_uuid'), host_uuid) aggregate.update_metadata({host: None}) - elif master_compute == host: - # Remove master from its own pool -> destroy pool only if the - # master is on its own, otherwise raise fault. Destroying a - # pool made only by master is fictional + elif main_compute == host: + # Remove main from its own pool -> destroy pool only if the + # main is on its own, otherwise raise fault. Destroying a + # pool made only by main is fictional if len(aggregate['hosts']) > 1: - # NOTE: this could be avoided by doing a master + # NOTE: this could be avoided by doing a main # re-election, but this is simpler for now. raise exception.InvalidAggregateAction( aggregate_id=aggregate['id'], @@ -151,32 +151,32 @@ def remove_from_aggregate(self, context, aggregate, host, slave_info=None): 'from the pool; pool not empty') % host) self._clear_pool(aggregate['id']) - aggregate.update_metadata({'master_compute': None, host: None}) - elif master_compute and master_compute != host: - # A master exists -> forward pool-eject request to master - slave_info = self._create_slave_info() + aggregate.update_metadata({'main_compute': None, host: None}) + elif main_compute and main_compute != host: + # A main exists -> forward pool-eject request to main + subordinate_info = self._create_subordinate_info() self.compute_rpcapi.remove_aggregate_host( - context, aggregate['id'], host, master_compute, slave_info) + context, aggregate['id'], host, main_compute, subordinate_info) else: # this shouldn't have happened raise exception.AggregateError(aggregate_id=aggregate['id'], action='remove_from_aggregate', reason=_('Unable to eject %s ' - 'from the pool; No master found') + 'from the pool; No main found') % host) - def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd): - """Joins a slave into a XenServer resource pool.""" + def _join_subordinate(self, aggregate_id, host, compute_uuid, url, user, passwd): + """Joins a subordinate into a XenServer resource pool.""" try: args = {'compute_uuid': compute_uuid, 'url': url, 'user': user, 'password': passwd, 'force': jsonutils.dumps(CONF.xenserver.use_join_force), - 'master_addr': self._host_addr, - 'master_user': CONF.xenserver.connection_username, - 'master_pass': CONF.xenserver.connection_password, } + 'main_addr': self._host_addr, + 'main_user': CONF.xenserver.connection_username, + 'main_pass': CONF.xenserver.connection_password, } self._session.call_plugin('xenhost', 'host_join', args) except self._session.XenAPI.Failure as e: LOG.error(_("Pool-Join failed: %s"), e) @@ -185,8 +185,8 @@ def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd): reason=_('Unable to join %s ' 'in the pool') % host) - def _eject_slave(self, aggregate_id, compute_uuid, host_uuid): - """Eject a slave from a XenServer resource pool.""" + def _eject_subordinate(self, aggregate_id, compute_uuid, host_uuid): + """Eject a subordinate from a XenServer resource pool.""" try: # shutdown nova-compute; if there are other VMs running, e.g. # guest instances, the eject will fail. That's a precaution @@ -225,7 +225,7 @@ def _clear_pool(self, aggregate_id): action='remove_from_aggregate', reason=str(e.details)) - def _create_slave_info(self): + def _create_subordinate_info(self): """XenServer specific info needed to join the hypervisor pool.""" # replace the address from the xenapi connection url # because this might be 169.254.0.1, i.e. xenapi diff --git a/nova/nova/virt/xenapi/pool_states.py b/nova/nova/virt/xenapi/pool_states.py index ae431dd..f4acdf5 100644 --- a/nova/nova/virt/xenapi/pool_states.py +++ b/nova/nova/virt/xenapi/pool_states.py @@ -25,7 +25,7 @@ A 'created' pool becomes 'changing' during the first request of adding a host. During a 'changing' status no other requests will be accepted; this is to allow the hypervisor layer to instantiate the underlying pool -without any potential race condition that may incur in master/slave-based +without any potential race condition that may incur in main/subordinate-based configurations. The pool goes into the 'active' state when the underlying pool has been correctly instantiated. All other operations (e.g. add/remove hosts) that succeed will keep the diff --git a/nova/tools/db/schema_diff.py b/nova/tools/db/schema_diff.py index fdcad31..829de01 100755 --- a/nova/tools/db/schema_diff.py +++ b/nova/tools/db/schema_diff.py @@ -26,7 +26,7 @@ commit hash) and a SQLAlchemy-Migrate version number: Run like: - ./tools/db/schema_diff.py mysql master:latest my_branch:82 + ./tools/db/schema_diff.py mysql main:latest my_branch:82 """ from __future__ import print_function @@ -216,12 +216,12 @@ def parse_options(): try: orig_branch, orig_version = sys.argv[2].split(':') except IndexError: - usage('original branch and version required (e.g. master:82)') + usage('original branch and version required (e.g. main:82)') try: new_branch, new_version = sys.argv[3].split(':') except IndexError: - usage('new branch and version required (e.g. master:82)') + usage('new branch and version required (e.g. main:82)') return db_type, orig_branch, orig_version, new_branch, new_version diff --git a/oslo.config/doc/source/conf.py b/oslo.config/doc/source/conf.py index 120ad02..3be815e 100644 --- a/oslo.config/doc/source/conf.py +++ b/oslo.config/doc/source/conf.py @@ -20,8 +20,8 @@ # The suffix of source filenames. source_suffix = '.rst' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'oslo.config' diff --git a/oslo.messaging/doc/source/conf.py b/oslo.messaging/doc/source/conf.py index b55a285..a82591e 100644 --- a/oslo.messaging/doc/source/conf.py +++ b/oslo.messaging/doc/source/conf.py @@ -20,8 +20,8 @@ # The suffix of source filenames. source_suffix = '.rst' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'oslo.messaging' diff --git a/oslo.messaging/tests/drivers/test_matchmaker_redis.py b/oslo.messaging/tests/drivers/test_matchmaker_redis.py index 1c20dab..4896934 100644 --- a/oslo.messaging/tests/drivers/test_matchmaker_redis.py +++ b/oslo.messaging/tests/drivers/test_matchmaker_redis.py @@ -66,10 +66,10 @@ def test_direct(self): [('cert.controller1', 'controller1')]) def test_register(self): - self.matcher.register('cert', 'keymaster') + self.matcher.register('cert', 'keymain') self.assertEqual( sorted(self.matcher.redis.smembers('cert')), - ['cert.controller1', 'cert.keymaster']) + ['cert.controller1', 'cert.keymain']) def test_unregister(self): self.matcher.unregister('conductor', 'controller1') diff --git a/oslo.rootwrap/doc/source/conf.py b/oslo.rootwrap/doc/source/conf.py index c387e21..0000ee1 100755 --- a/oslo.rootwrap/doc/source/conf.py +++ b/oslo.rootwrap/doc/source/conf.py @@ -33,8 +33,8 @@ # The suffix of source filenames. source_suffix = '.rst' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'oslo.rootwrap' diff --git a/oslo.vmware/doc/source/conf.py b/oslo.vmware/doc/source/conf.py index 90faf0b..eb572d6 100644 --- a/oslo.vmware/doc/source/conf.py +++ b/oslo.vmware/doc/source/conf.py @@ -52,8 +52,8 @@ # The suffix of source filenames. source_suffix = '.rst' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'oslo.vmware' diff --git a/pbr/doc/source/conf.py b/pbr/doc/source/conf.py index 13ca86b..e7ea0fa 100644 --- a/pbr/doc/source/conf.py +++ b/pbr/doc/source/conf.py @@ -20,8 +20,8 @@ # The suffix of source filenames. source_suffix = '.rst' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = 'pbr' diff --git a/pbr/pbr/packaging.py b/pbr/pbr/packaging.py index 99fa2ef..e45916c 100644 --- a/pbr/pbr/packaging.py +++ b/pbr/pbr/packaging.py @@ -772,7 +772,7 @@ def _sphinx_run(self): raise if self.link_index: - src = app.config.master_doc + app.builder.out_suffix + src = app.config.main_doc + app.builder.out_suffix dst = app.builder.get_outfilename('index') os.symlink(src, dst) diff --git a/pycadf/doc/source/conf.py b/pycadf/doc/source/conf.py index 7e69c85..928c1da 100644 --- a/pycadf/doc/source/conf.py +++ b/pycadf/doc/source/conf.py @@ -50,8 +50,8 @@ # The encoding of source files. #source_encoding = 'utf-8-sig' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'pyCADF' diff --git a/python-cinderclient/cinderclient/openstack/common/gettextutils.py b/python-cinderclient/cinderclient/openstack/common/gettextutils.py index 1516be1..9c80dd8 100644 --- a/python-cinderclient/cinderclient/openstack/common/gettextutils.py +++ b/python-cinderclient/cinderclient/openstack/common/gettextutils.py @@ -331,9 +331,9 @@ def get_available_languages(domain): # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was - # renamed to locale_identifiers() in >=1.0, the requirements master list + # renamed to locale_identifiers() in >=1.0, the requirements main list # requires >=0.9.6, uncapped, so defensively work with both. We can remove - # this check when the master list updates to >=1.0, and update all projects + # this check when the main list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() diff --git a/python-cinderclient/doc/source/conf.py b/python-cinderclient/doc/source/conf.py index d433af0..299273e 100644 --- a/python-cinderclient/doc/source/conf.py +++ b/python-cinderclient/doc/source/conf.py @@ -39,8 +39,8 @@ # The encoding of source files. #source_encoding = 'utf-8' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = 'python-cinderclient' diff --git a/python-glanceclient/doc/source/conf.py b/python-glanceclient/doc/source/conf.py index 77162fa..ac7607a 100644 --- a/python-glanceclient/doc/source/conf.py +++ b/python-glanceclient/doc/source/conf.py @@ -24,8 +24,8 @@ # The suffix of source filenames. source_suffix = '.rst' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = 'python-glanceclient' diff --git a/python-glanceclient/glanceclient/openstack/common/gettextutils.py b/python-glanceclient/glanceclient/openstack/common/gettextutils.py index 7a366d5..5deee75 100644 --- a/python-glanceclient/glanceclient/openstack/common/gettextutils.py +++ b/python-glanceclient/glanceclient/openstack/common/gettextutils.py @@ -300,9 +300,9 @@ def get_available_languages(domain): # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was - # renamed to locale_identifiers() in >=1.0, the requirements master list + # renamed to locale_identifiers() in >=1.0, the requirements main list # requires >=0.9.6, uncapped, so defensively work with both. We can remove - # this check when the master list updates to >=1.0, and update all projects + # this check when the main list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() diff --git a/python-heatclient/doc/source/conf.py b/python-heatclient/doc/source/conf.py index e611dbe..2bdb2ed 100644 --- a/python-heatclient/doc/source/conf.py +++ b/python-heatclient/doc/source/conf.py @@ -50,8 +50,8 @@ # The encoding of source files. #source_encoding = 'utf-8' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = 'python-heatclient' diff --git a/python-heatclient/heatclient/openstack/common/gettextutils.py b/python-heatclient/heatclient/openstack/common/gettextutils.py index 07805a4..1705a9c 100644 --- a/python-heatclient/heatclient/openstack/common/gettextutils.py +++ b/python-heatclient/heatclient/openstack/common/gettextutils.py @@ -300,9 +300,9 @@ def get_available_languages(domain): # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was - # renamed to locale_identifiers() in >=1.0, the requirements master list + # renamed to locale_identifiers() in >=1.0, the requirements main list # requires >=0.9.6, uncapped, so defensively work with both. We can remove - # this check when the master list updates to >=1.0, and update all projects + # this check when the main list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() diff --git a/python-keystoneclient/doc/source/conf.py b/python-keystoneclient/doc/source/conf.py index 30d0259..3648aab 100644 --- a/python-keystoneclient/doc/source/conf.py +++ b/python-keystoneclient/doc/source/conf.py @@ -60,8 +60,8 @@ # The encoding of source files. #source_encoding = 'utf-8' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = 'python-keystoneclient' diff --git a/python-keystoneclient/keystoneclient/openstack/common/gettextutils.py b/python-keystoneclient/keystoneclient/openstack/common/gettextutils.py index 55a60df..32cbfb4 100644 --- a/python-keystoneclient/keystoneclient/openstack/common/gettextutils.py +++ b/python-keystoneclient/keystoneclient/openstack/common/gettextutils.py @@ -331,9 +331,9 @@ def get_available_languages(domain): # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was - # renamed to locale_identifiers() in >=1.0, the requirements master list + # renamed to locale_identifiers() in >=1.0, the requirements main list # requires >=0.9.6, uncapped, so defensively work with both. We can remove - # this check when the master list updates to >=1.0, and update all projects + # this check when the main list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() diff --git a/python-neutronclient/doc/source/conf.py b/python-neutronclient/doc/source/conf.py index 9648d80..2538da1 100644 --- a/python-neutronclient/doc/source/conf.py +++ b/python-neutronclient/doc/source/conf.py @@ -15,8 +15,8 @@ # The suffix of source filenames. source_suffix = '.rst' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. copyright = u'OpenStack Foundation' diff --git a/python-novaclient/doc/source/conf.py b/python-novaclient/doc/source/conf.py index d33aea2..82e4f42 100644 --- a/python-novaclient/doc/source/conf.py +++ b/python-novaclient/doc/source/conf.py @@ -99,8 +99,8 @@ def gen_ref(ver, title, names): # The encoding of source files. #source_encoding = 'utf-8' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = 'python-novaclient' diff --git a/python-openstackclient/doc/source/conf.py b/python-openstackclient/doc/source/conf.py index 7c7a00b..aeef902 100644 --- a/python-openstackclient/doc/source/conf.py +++ b/python-openstackclient/doc/source/conf.py @@ -43,8 +43,8 @@ # The encoding of source files. #source_encoding = 'utf-8-sig' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'OpenStack Command Line Client' diff --git a/python-openstackclient/openstackclient/tests/identity/v3/fakes.py b/python-openstackclient/openstackclient/tests/identity/v3/fakes.py index 7acaa7f..5d44d31 100644 --- a/python-openstackclient/openstackclient/tests/identity/v3/fakes.py +++ b/python-openstackclient/openstackclient/tests/identity/v3/fakes.py @@ -45,7 +45,7 @@ mapping_rules_file_path = '/tmp/path/to/file' # Copied from # (https://github.com/openstack/keystone/blob\ -# master/keystone/tests/mapping_fixtures.py +# main/keystone/tests/mapping_fixtures.py EMPLOYEE_GROUP_ID = "0cd5e9" DEVELOPER_GROUP_ID = "xyz" MAPPING_RULES = [ diff --git a/python-swiftclient/doc/source/conf.py b/python-swiftclient/doc/source/conf.py index 0b3e7e1..2c4f40e 100644 --- a/python-swiftclient/doc/source/conf.py +++ b/python-swiftclient/doc/source/conf.py @@ -46,8 +46,8 @@ # The encoding of source files. # source_encoding = 'utf-8' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'Swiftclient' diff --git a/stevedore/doc/source/conf.py b/stevedore/doc/source/conf.py index 16f953e..7bd8240 100644 --- a/stevedore/doc/source/conf.py +++ b/stevedore/doc/source/conf.py @@ -43,8 +43,8 @@ # The encoding of source files. #source_encoding = 'utf-8-sig' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'stevedore' diff --git a/taskflow/doc/source/conf.py b/taskflow/doc/source/conf.py index fcb0716..484476b 100644 --- a/taskflow/doc/source/conf.py +++ b/taskflow/doc/source/conf.py @@ -27,8 +27,8 @@ # The suffix of source filenames. source_suffix = '.rst' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. diff --git a/taskflow/taskflow/test.py b/taskflow/taskflow/test.py index c6a56a9..546d8a9 100644 --- a/taskflow/taskflow/test.py +++ b/taskflow/taskflow/test.py @@ -204,23 +204,23 @@ class MockTestCase(TestCase): def setUp(self): super(MockTestCase, self).setUp() - self.master_mock = mock.Mock(name='master_mock') + self.main_mock = mock.Mock(name='main_mock') def patch(self, target, autospec=True, **kwargs): - """Patch target and attach it to the master mock.""" + """Patch target and attach it to the main mock.""" f = self.useFixture(mockpatch.Patch(target, autospec=autospec, **kwargs)) mocked = f.mock attach_as = kwargs.pop('attach_as', None) if attach_as is not None: - self.master_mock.attach_mock(mocked, attach_as) + self.main_mock.attach_mock(mocked, attach_as) return mocked def patchClass(self, module, name, autospec=True, attach_as=None): """Patches a modules class. This will create a class instance mock (using the provided name to - find the class in the module) and attach a mock class the master mock + find the class in the module) and attach a mock class the main mock to be cleaned up on test exit. """ if autospec: @@ -240,12 +240,12 @@ def patchClass(self, module, name, autospec=True, attach_as=None): attach_class_as = attach_as + '_class' attach_instance_as = attach_as - self.master_mock.attach_mock(class_mock, attach_class_as) - self.master_mock.attach_mock(instance_mock, attach_instance_as) + self.main_mock.attach_mock(class_mock, attach_class_as) + self.main_mock.attach_mock(instance_mock, attach_instance_as) return class_mock, instance_mock - def resetMasterMock(self): - self.master_mock.reset_mock() + def resetMainMock(self): + self.main_mock.reset_mock() class CapturingLoggingHandler(logging.Handler): diff --git a/taskflow/taskflow/tests/unit/worker_based/test_engine.py b/taskflow/taskflow/tests/unit/worker_based/test_engine.py index f274a82..1dccdf4 100644 --- a/taskflow/taskflow/tests/unit/worker_based/test_engine.py +++ b/taskflow/taskflow/tests/unit/worker_based/test_engine.py @@ -48,7 +48,7 @@ def test_creation_default(self): transport_options=None, transition_timeout=mock.ANY) ] - self.assertEqual(self.master_mock.mock_calls, expected_calls) + self.assertEqual(self.main_mock.mock_calls, expected_calls) def test_creation_custom(self): flow = lf.Flow('test-flow').add(utils.DummyTask()) @@ -68,4 +68,4 @@ def test_creation_custom(self): transport_options={}, transition_timeout=200) ] - self.assertEqual(self.master_mock.mock_calls, expected_calls) + self.assertEqual(self.main_mock.mock_calls, expected_calls) diff --git a/taskflow/taskflow/tests/unit/worker_based/test_executor.py b/taskflow/taskflow/tests/unit/worker_based/test_executor.py index d2b97bf..a0199e5 100644 --- a/taskflow/taskflow/tests/unit/worker_based/test_executor.py +++ b/taskflow/taskflow/tests/unit/worker_based/test_executor.py @@ -70,25 +70,25 @@ def _fake_proxy_start(self): def _fake_proxy_stop(self): self.proxy_started_event.clear() - def executor(self, reset_master_mock=True, **kwargs): + def executor(self, reset_main_mock=True, **kwargs): executor_kwargs = dict(uuid=self.executor_uuid, exchange=self.executor_exchange, topics=[self.executor_topic], url=self.broker_url) executor_kwargs.update(kwargs) ex = executor.WorkerTaskExecutor(**executor_kwargs) - if reset_master_mock: - self.resetMasterMock() + if reset_main_mock: + self.resetMainMock() return ex def test_creation(self): - ex = self.executor(reset_master_mock=False) + ex = self.executor(reset_main_mock=False) - master_mock_calls = [ + main_mock_calls = [ mock.call.Proxy(self.executor_uuid, self.executor_exchange, mock.ANY, ex._on_wait, url=self.broker_url) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) def test_on_message_response_state_running(self): response = pr.Response(pr.RUNNING) @@ -219,7 +219,7 @@ def test_execute_task(self): reply_to=self.executor_uuid, correlation_id=self.task_uuid) ] - self.assertEqual(expected_calls, self.master_mock.mock_calls) + self.assertEqual(expected_calls, self.main_mock.mock_calls) def test_revert_task(self): self.message_mock.properties['type'] = pr.NOTIFY @@ -241,7 +241,7 @@ def test_revert_task(self): reply_to=self.executor_uuid, correlation_id=self.task_uuid) ] - self.assertEqual(expected_calls, self.master_mock.mock_calls) + self.assertEqual(expected_calls, self.main_mock.mock_calls) def test_execute_task_topic_not_found(self): workers_info = {self.executor_topic: ['']} @@ -252,7 +252,7 @@ def test_execute_task_topic_not_found(self): mock.call.Request(self.task, self.task_uuid, 'execute', self.task_args, None, self.timeout) ] - self.assertEqual(self.master_mock.mock_calls, expected_calls) + self.assertEqual(self.main_mock.mock_calls, expected_calls) def test_execute_task_publish_error(self): self.message_mock.properties['type'] = pr.NOTIFY @@ -275,7 +275,7 @@ def test_execute_task_publish_error(self): logger=mock.ANY), mock.call.request.set_result(mock.ANY) ] - self.assertEqual(expected_calls, self.master_mock.mock_calls) + self.assertEqual(expected_calls, self.main_mock.mock_calls) def test_wait_for_any(self): fs = [futures.Future(), futures.Future()] @@ -293,10 +293,10 @@ def test_wait_for_any_with_timeout(self): ex = self.executor() ex.wait_for_any(fs, timeout) - master_mock_calls = [ + main_mock_calls = [ mock.call(fs, timeout) ] - self.assertEqual(self.wait_for_any_mock.mock_calls, master_mock_calls) + self.assertEqual(self.wait_for_any_mock.mock_calls, main_mock_calls) def test_start_stop(self): ex = self.executor() @@ -308,7 +308,7 @@ def test_start_stop(self): # stop executor ex.stop() - self.master_mock.assert_has_calls([ + self.main_mock.assert_has_calls([ mock.call.proxy.start(), mock.call.proxy.wait(), mock.call.proxy.stop() @@ -327,7 +327,7 @@ def test_start_already_running(self): # stop executor ex.stop() - self.master_mock.assert_has_calls([ + self.main_mock.assert_has_calls([ mock.call.proxy.start(), mock.call.proxy.wait(), mock.call.proxy.stop() @@ -336,7 +336,7 @@ def test_start_already_running(self): def test_stop_not_running(self): self.executor().stop() - self.assertEqual(self.master_mock.mock_calls, []) + self.assertEqual(self.main_mock.mock_calls, []) def test_stop_not_alive(self): self.proxy_inst_mock.start.side_effect = None @@ -352,7 +352,7 @@ def test_stop_not_alive(self): ex.stop() # since proxy thread is already done - stop is not called - self.master_mock.assert_has_calls([ + self.main_mock.assert_has_calls([ mock.call.proxy.start(), mock.call.proxy.wait() ], any_order=True) @@ -374,7 +374,7 @@ def test_restart(self): # stop executor ex.stop() - self.master_mock.assert_has_calls([ + self.main_mock.assert_has_calls([ mock.call.proxy.start(), mock.call.proxy.wait(), mock.call.proxy.stop(), diff --git a/taskflow/taskflow/tests/unit/worker_based/test_proxy.py b/taskflow/taskflow/tests/unit/worker_based/test_proxy.py index de5f3ab..aaecc13 100644 --- a/taskflow/taskflow/tests/unit/worker_based/test_proxy.py +++ b/taskflow/taskflow/tests/unit/worker_based/test_proxy.py @@ -67,10 +67,10 @@ def setUp(self): # other mocking self.on_wait_mock = mock.MagicMock(name='on_wait') - self.master_mock.attach_mock(self.on_wait_mock, 'on_wait') + self.main_mock.attach_mock(self.on_wait_mock, 'on_wait') - # reset master mock - self.resetMasterMock() + # reset main mock + self.resetMainMock() def _queue_name(self, topic): return "%s_%s" % (self.exchange_name, topic) @@ -91,41 +91,41 @@ def proxy_start_calls(self, calls, exc_type=mock.ANY): mock.ANY) ] - def proxy(self, reset_master_mock=False, **kwargs): + def proxy(self, reset_main_mock=False, **kwargs): proxy_kwargs = dict(topic=self.topic, exchange_name=self.exchange_name, url=self.broker_url, type_handlers={}) proxy_kwargs.update(kwargs) p = proxy.Proxy(**proxy_kwargs) - if reset_master_mock: - self.resetMasterMock() + if reset_main_mock: + self.resetMainMock() return p def test_creation(self): self.proxy() - master_mock_calls = [ + main_mock_calls = [ mock.call.Connection(self.broker_url, transport=None, transport_options=None), mock.call.Exchange(name=self.exchange_name, durable=False, auto_delete=True) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) def test_creation_custom(self): transport_opts = {'context': 'context'} self.proxy(transport='memory', transport_options=transport_opts) - master_mock_calls = [ + main_mock_calls = [ mock.call.Connection(self.broker_url, transport='memory', transport_options=transport_opts), mock.call.Exchange(name=self.exchange_name, durable=False, auto_delete=True) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) def test_publish(self): msg_mock = mock.MagicMock() @@ -135,10 +135,10 @@ def test_publish(self): task_uuid = 'task-uuid' kwargs = dict(a='a', b='b') - self.proxy(reset_master_mock=True).publish( + self.proxy(reset_main_mock=True).publish( msg_mock, routing_key, correlation_id=task_uuid, **kwargs) - master_mock_calls = [ + main_mock_calls = [ mock.call.Queue(name=self._queue_name(routing_key), exchange=self.exchange_inst_mock, routing_key=routing_key, @@ -152,59 +152,59 @@ def test_publish(self): type=msg_mock.TYPE, **kwargs) ] - self.master_mock.assert_has_calls(master_mock_calls) + self.main_mock.assert_has_calls(main_mock_calls) def test_start(self): try: # KeyboardInterrupt will be raised after two iterations - self.proxy(reset_master_mock=True).start() + self.proxy(reset_main_mock=True).start() except KeyboardInterrupt: pass - master_calls = self.proxy_start_calls([ + main_calls = self.proxy_start_calls([ mock.call.connection.drain_events(timeout=self.de_period), mock.call.connection.drain_events(timeout=self.de_period), mock.call.connection.drain_events(timeout=self.de_period), ], exc_type=KeyboardInterrupt) - self.master_mock.assert_has_calls(master_calls) + self.main_mock.assert_has_calls(main_calls) def test_start_with_on_wait(self): try: # KeyboardInterrupt will be raised after two iterations - self.proxy(reset_master_mock=True, + self.proxy(reset_main_mock=True, on_wait=self.on_wait_mock).start() except KeyboardInterrupt: pass - master_calls = self.proxy_start_calls([ + main_calls = self.proxy_start_calls([ mock.call.connection.drain_events(timeout=self.de_period), mock.call.on_wait(), mock.call.connection.drain_events(timeout=self.de_period), mock.call.on_wait(), mock.call.connection.drain_events(timeout=self.de_period), ], exc_type=KeyboardInterrupt) - self.master_mock.assert_has_calls(master_calls) + self.main_mock.assert_has_calls(main_calls) def test_start_with_on_wait_raises(self): self.on_wait_mock.side_effect = RuntimeError('Woot!') try: # KeyboardInterrupt will be raised after two iterations - self.proxy(reset_master_mock=True, + self.proxy(reset_main_mock=True, on_wait=self.on_wait_mock).start() except KeyboardInterrupt: pass - master_calls = self.proxy_start_calls([ + main_calls = self.proxy_start_calls([ mock.call.connection.drain_events(timeout=self.de_period), mock.call.on_wait(), ], exc_type=RuntimeError) - self.master_mock.assert_has_calls(master_calls) + self.main_mock.assert_has_calls(main_calls) def test_stop(self): self.conn_inst_mock.drain_events.side_effect = socket.timeout # create proxy - pr = self.proxy(reset_master_mock=True) + pr = self.proxy(reset_main_mock=True) # check that proxy is not running yes self.assertFalse(pr.is_running) diff --git a/taskflow/taskflow/tests/unit/worker_based/test_server.py b/taskflow/taskflow/tests/unit/worker_based/test_server.py index 5e9129a..ed25658 100644 --- a/taskflow/taskflow/tests/unit/worker_based/test_server.py +++ b/taskflow/taskflow/tests/unit/worker_based/test_server.py @@ -54,10 +54,10 @@ def setUp(self): self.message_mock.properties = {'correlation_id': self.task_uuid, 'reply_to': self.reply_to, 'type': pr.REQUEST} - self.master_mock.attach_mock(self.executor_mock, 'executor') - self.master_mock.attach_mock(self.message_mock, 'message') + self.main_mock.attach_mock(self.executor_mock, 'executor') + self.main_mock.attach_mock(self.message_mock, 'message') - def server(self, reset_master_mock=False, **kwargs): + def server(self, reset_main_mock=False, **kwargs): server_kwargs = dict(topic=self.server_topic, exchange=self.server_exchange, executor=self.executor_mock, @@ -65,8 +65,8 @@ def server(self, reset_master_mock=False, **kwargs): url=self.broker_url) server_kwargs.update(kwargs) s = server.Server(**server_kwargs) - if reset_master_mock: - self.resetMasterMock() + if reset_main_mock: + self.resetMainMock() return s def make_request(self, **kwargs): @@ -83,22 +83,22 @@ def test_creation(self): s = self.server() # check calls - master_mock_calls = [ + main_mock_calls = [ mock.call.Proxy(self.server_topic, self.server_exchange, mock.ANY, url=self.broker_url, on_wait=mock.ANY) ] - self.master_mock.assert_has_calls(master_mock_calls) + self.main_mock.assert_has_calls(main_mock_calls) self.assertEqual(len(s._endpoints), 3) def test_creation_with_endpoints(self): s = self.server(endpoints=self.endpoints) # check calls - master_mock_calls = [ + main_mock_calls = [ mock.call.Proxy(self.server_topic, self.server_exchange, mock.ANY, url=self.broker_url, on_wait=mock.ANY) ] - self.master_mock.assert_has_calls(master_mock_calls) + self.main_mock.assert_has_calls(main_mock_calls) self.assertEqual(len(s._endpoints), len(self.endpoints)) def test_parse_request(self): @@ -150,10 +150,10 @@ def test_reply_publish_failure(self, mocked_exception): self.proxy_inst_mock.publish.side_effect = RuntimeError('Woot!') # create server and process request - s = self.server(reset_master_mock=True) + s = self.server(reset_main_mock=True) s._reply(True, self.reply_to, self.task_uuid) - self.assertEqual(self.master_mock.mock_calls, [ + self.assertEqual(self.main_mock.mock_calls, [ mock.call.Response(pr.FAILURE), mock.call.proxy.publish(self.response_inst_mock, self.reply_to, correlation_id=self.task_uuid) @@ -165,7 +165,7 @@ def test_on_run_reply_failure(self): self.proxy_inst_mock.publish.side_effect = RuntimeError('Woot!') # create server and process request - s = self.server(reset_master_mock=True) + s = self.server(reset_main_mock=True) s._process_request(request, self.message_mock) self.assertEqual(1, self.proxy_inst_mock.publish.call_count) @@ -174,11 +174,11 @@ def test_on_update_progress(self): request = self.make_request(task=utils.ProgressingTask(), arguments={}) # create server and process request - s = self.server(reset_master_mock=True) + s = self.server(reset_main_mock=True) s._process_request(request, self.message_mock) # check calls - master_mock_calls = [ + main_mock_calls = [ mock.call.Response(pr.RUNNING), mock.call.proxy.publish(self.response_inst_mock, self.reply_to, correlation_id=self.task_uuid), @@ -192,15 +192,15 @@ def test_on_update_progress(self): mock.call.proxy.publish(self.response_inst_mock, self.reply_to, correlation_id=self.task_uuid) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) def test_process_request(self): # create server and process request - s = self.server(reset_master_mock=True) + s = self.server(reset_main_mock=True) s._process_request(self.make_request(), self.message_mock) # check calls - master_mock_calls = [ + main_mock_calls = [ mock.call.Response(pr.RUNNING), mock.call.proxy.publish(self.response_inst_mock, self.reply_to, correlation_id=self.task_uuid), @@ -208,16 +208,16 @@ def test_process_request(self): mock.call.proxy.publish(self.response_inst_mock, self.reply_to, correlation_id=self.task_uuid) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) @mock.patch("taskflow.engines.worker_based.server.LOG.warn") def test_process_request_parse_message_failure(self, mocked_exception): self.message_mock.properties = {} request = self.make_request() - s = self.server(reset_master_mock=True) + s = self.server(reset_main_mock=True) s._process_request(request, self.message_mock) - self.assertEqual(self.master_mock.mock_calls, []) + self.assertEqual(self.main_mock.mock_calls, []) self.assertTrue(mocked_exception.called) @mock.patch.object(failure.Failure, 'from_dict') @@ -232,17 +232,17 @@ def test_process_request_parse_request_failure(self, to_mock, from_mock): request = self.make_request(result=a_failure) # create server and process request - s = self.server(reset_master_mock=True) + s = self.server(reset_main_mock=True) s._process_request(request, self.message_mock) # check calls - master_mock_calls = [ + main_mock_calls = [ mock.call.Response(pr.FAILURE, result=failure_dict), mock.call.proxy.publish(self.response_inst_mock, self.reply_to, correlation_id=self.task_uuid) ] - self.assertEqual(master_mock_calls, self.master_mock.mock_calls) + self.assertEqual(main_mock_calls, self.main_mock.mock_calls) @mock.patch.object(failure.Failure, 'to_dict') def test_process_request_endpoint_not_found(self, to_mock): @@ -253,17 +253,17 @@ def test_process_request_endpoint_not_found(self, to_mock): request = self.make_request(task=mock.MagicMock(name='')) # create server and process request - s = self.server(reset_master_mock=True) + s = self.server(reset_main_mock=True) s._process_request(request, self.message_mock) # check calls - master_mock_calls = [ + main_mock_calls = [ mock.call.Response(pr.FAILURE, result=failure_dict), mock.call.proxy.publish(self.response_inst_mock, self.reply_to, correlation_id=self.task_uuid) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) @mock.patch.object(failure.Failure, 'to_dict') def test_process_request_execution_failure(self, to_mock): @@ -275,17 +275,17 @@ def test_process_request_execution_failure(self, to_mock): request['action'] = '' # create server and process request - s = self.server(reset_master_mock=True) + s = self.server(reset_main_mock=True) s._process_request(request, self.message_mock) # check calls - master_mock_calls = [ + main_mock_calls = [ mock.call.Response(pr.FAILURE, result=failure_dict), mock.call.proxy.publish(self.response_inst_mock, self.reply_to, correlation_id=self.task_uuid) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) @mock.patch.object(failure.Failure, 'to_dict') def test_process_request_task_failure(self, to_mock): @@ -296,11 +296,11 @@ def test_process_request_task_failure(self, to_mock): request = self.make_request(task=utils.TaskWithFailure(), arguments={}) # create server and process request - s = self.server(reset_master_mock=True) + s = self.server(reset_main_mock=True) s._process_request(request, self.message_mock) # check calls - master_mock_calls = [ + main_mock_calls = [ mock.call.Response(pr.RUNNING), mock.call.proxy.publish(self.response_inst_mock, self.reply_to, correlation_id=self.task_uuid), @@ -309,34 +309,34 @@ def test_process_request_task_failure(self, to_mock): self.reply_to, correlation_id=self.task_uuid) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) def test_start(self): - self.server(reset_master_mock=True).start() + self.server(reset_main_mock=True).start() # check calls - master_mock_calls = [ + main_mock_calls = [ mock.call.proxy.start() ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) def test_wait(self): - server = self.server(reset_master_mock=True) + server = self.server(reset_main_mock=True) server.start() server.wait() # check calls - master_mock_calls = [ + main_mock_calls = [ mock.call.proxy.start(), mock.call.proxy.wait() ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) def test_stop(self): - self.server(reset_master_mock=True).stop() + self.server(reset_main_mock=True).stop() # check calls - master_mock_calls = [ + main_mock_calls = [ mock.call.proxy.stop() ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) diff --git a/taskflow/taskflow/tests/unit/worker_based/test_worker.py b/taskflow/taskflow/tests/unit/worker_based/test_worker.py index d37e817..5b441ea 100644 --- a/taskflow/taskflow/tests/unit/worker_based/test_worker.py +++ b/taskflow/taskflow/tests/unit/worker_based/test_worker.py @@ -45,36 +45,36 @@ def setUp(self): 'taskflow.engines.worker_based.worker.tu.get_optimal_thread_count') self.threads_count_mock.return_value = self.threads_count - def worker(self, reset_master_mock=False, **kwargs): + def worker(self, reset_main_mock=False, **kwargs): worker_kwargs = dict(exchange=self.exchange, topic=self.topic, tasks=[], url=self.broker_url) worker_kwargs.update(kwargs) w = worker.Worker(**worker_kwargs) - if reset_master_mock: - self.resetMasterMock() + if reset_main_mock: + self.resetMainMock() return w def test_creation(self): self.worker() - master_mock_calls = [ + main_mock_calls = [ mock.call.executor_class(self.threads_count), mock.call.Server(self.topic, self.exchange, self.executor_inst_mock, [], url=self.broker_url) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) def test_creation_with_custom_threads_count(self): self.worker(threads_count=10) - master_mock_calls = [ + main_mock_calls = [ mock.call.executor_class(10), mock.call.Server(self.topic, self.exchange, self.executor_inst_mock, [], url=self.broker_url) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) def test_creation_with_negative_threads_count(self): self.assertRaises(ValueError, self.worker, threads_count=-10) @@ -83,58 +83,58 @@ def test_creation_with_custom_executor(self): executor_mock = mock.MagicMock(name='executor') self.worker(executor=executor_mock) - master_mock_calls = [ + main_mock_calls = [ mock.call.Server(self.topic, self.exchange, executor_mock, [], url=self.broker_url) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) def test_run_with_no_tasks(self): - self.worker(reset_master_mock=True).run() + self.worker(reset_main_mock=True).run() - master_mock_calls = [ + main_mock_calls = [ mock.call.server.start() ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) def test_run_with_tasks(self): - self.worker(reset_master_mock=True, + self.worker(reset_main_mock=True, tasks=['taskflow.tests.utils:DummyTask']).run() - master_mock_calls = [ + main_mock_calls = [ mock.call.server.start() ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) def test_run_with_custom_executor(self): executor_mock = mock.MagicMock(name='executor') - self.worker(reset_master_mock=True, + self.worker(reset_main_mock=True, executor=executor_mock).run() - master_mock_calls = [ + main_mock_calls = [ mock.call.server.start() ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) def test_wait(self): - w = self.worker(reset_master_mock=True) + w = self.worker(reset_main_mock=True) w.run() w.wait() - master_mock_calls = [ + main_mock_calls = [ mock.call.server.start(), mock.call.server.wait() ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) def test_stop(self): - self.worker(reset_master_mock=True).stop() + self.worker(reset_main_mock=True).stop() - master_mock_calls = [ + main_mock_calls = [ mock.call.server.stop(), mock.call.executor.shutdown() ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.assertEqual(self.main_mock.mock_calls, main_mock_calls) def test_derive_endpoints_from_string_tasks(self): endpoints = worker.Worker._derive_endpoints( diff --git a/taskflow/tools/schema_generator.py b/taskflow/tools/schema_generator.py index 3685a0a..7cdf0cc 100755 --- a/taskflow/tools/schema_generator.py +++ b/taskflow/tools/schema_generator.py @@ -31,7 +31,7 @@ # This uses an in-memory database (aka nothing is written) "connection": "sqlite://", } -TABLE_QUERY = "SELECT name, sql FROM sqlite_master WHERE type='table'" +TABLE_QUERY = "SELECT name, sql FROM sqlite_main WHERE type='table'" SCHEMA_QUERY = "pragma table_info(%s)" diff --git a/tempest/doc/source/conf.py b/tempest/doc/source/conf.py index daa293c..e3a66e6 100644 --- a/tempest/doc/source/conf.py +++ b/tempest/doc/source/conf.py @@ -43,8 +43,8 @@ # The encoding of source files. #source_encoding = 'utf-8-sig' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'Tempest' diff --git a/tempest/tempest/api/compute/admin/test_fixed_ips_negative.py b/tempest/tempest/api/compute/admin/test_fixed_ips_negative.py index 8d6a7fc..0f68ceb 100644 --- a/tempest/tempest/api/compute/admin/test_fixed_ips_negative.py +++ b/tempest/tempest/api/compute/admin/test_fixed_ips_negative.py @@ -68,7 +68,7 @@ def test_set_reserve_with_invalid_ip(self): # NOTE(maurosr): since this exercises the same code snippet, we do it # only for reserve action body = {"reserve": "None"} - # NOTE(eliqiao): in Juno, the exception is NotFound, but in master, we + # NOTE(eliqiao): in Juno, the exception is NotFound, but in main, we # change the error code to BadRequest, both exceptions should be # accepted by tempest self.assertRaises((exceptions.NotFound, exceptions.BadRequest), diff --git a/tempest/tempest/api/data_processing/test_cluster_templates.py b/tempest/tempest/api/data_processing/test_cluster_templates.py index 537f90c..c25ba8a 100644 --- a/tempest/tempest/api/data_processing/test_cluster_templates.py +++ b/tempest/tempest/api/data_processing/test_cluster_templates.py @@ -59,7 +59,7 @@ def resource_setup(cls): }, 'node_groups': [ { - 'name': 'master-node', + 'name': 'main-node', 'flavor_id': cls.flavor_ref, 'node_processes': ['namenode'], 'count': 1 diff --git a/tempest/tempest/openstack/common/gettextutils.py b/tempest/tempest/openstack/common/gettextutils.py index 872d58e..b46f4e8 100644 --- a/tempest/tempest/openstack/common/gettextutils.py +++ b/tempest/tempest/openstack/common/gettextutils.py @@ -331,9 +331,9 @@ def get_available_languages(domain): # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was - # renamed to locale_identifiers() in >=1.0, the requirements master list + # renamed to locale_identifiers() in >=1.0, the requirements main list # requires >=0.9.6, uncapped, so defensively work with both. We can remove - # this check when the master list updates to >=1.0, and update all projects + # this check when the main list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers()