mirror of
https://github.com/outbackdingo/patroni.git
synced 2026-01-27 10:20:10 +00:00
Minor fixes (#808)
* Use `shutil.move` instead of `os.replace`, which is available only from 3.3 * Introduce standby-leader health-check and consul service * Improve unit tests, some lines were not covered * rename `assertEquals` -> `assertEqual`, due to deprecation warning
This commit is contained in:
committed by
GitHub
parent
3d76a013a7
commit
76d1b4cfd8
@@ -87,7 +87,9 @@ class RestApiHandler(BaseHTTPRequestHandler):
|
||||
replica_status_code = 200 if not patroni.noloadbalance and response.get('role') == 'replica' else 503
|
||||
status_code = 503
|
||||
|
||||
if 'master' in path:
|
||||
if patroni.config.is_standby_cluster and ('standby_leader' in path or 'standby-leader' in path):
|
||||
status_code = 200 if patroni.ha.is_leader() else 503
|
||||
elif 'master' in path or 'leader' in path or 'primary' in path:
|
||||
status_code = 200 if patroni.ha.is_leader() else 503
|
||||
elif 'replica' in path:
|
||||
status_code = replica_status_code
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import six
|
||||
import sys
|
||||
import tempfile
|
||||
@@ -128,7 +129,7 @@ class Config(object):
|
||||
with os.fdopen(fd, 'w') as f:
|
||||
fd = None
|
||||
json.dump(self.dynamic_configuration, f)
|
||||
tmpfile = os.replace(tmpfile, self._cache_file)
|
||||
tmpfile = shutil.move(tmpfile, self._cache_file)
|
||||
self._cache_needs_saving = False
|
||||
except Exception:
|
||||
logger.exception('Exception when saving file: %s', self._cache_file)
|
||||
|
||||
@@ -6,6 +6,7 @@ import json
|
||||
import logging
|
||||
import os
|
||||
import pkgutil
|
||||
import re
|
||||
import six
|
||||
import sys
|
||||
|
||||
@@ -419,7 +420,6 @@ class AbstractDCS(object):
|
||||
:param config: dict, reference to config section of selected DCS.
|
||||
i.e.: `zookeeper` for zookeeper, `etcd` for etcd, etc...
|
||||
"""
|
||||
import re
|
||||
self._name = config['name']
|
||||
self._base_path = re.sub('/+', '/', '/'.join(['', config.get('namespace', 'service'), config['scope']]))
|
||||
self._set_loop_wait(config.get('loop_wait', 10))
|
||||
|
||||
@@ -377,7 +377,7 @@ class Consul(AbstractDCS):
|
||||
|
||||
def _update_service(self, data):
|
||||
service_name = self._service_name
|
||||
role = data['role']
|
||||
role = data['role'].replace('_', '-')
|
||||
state = data['state']
|
||||
api_parts = urlparse(data['api_url'])
|
||||
api_parts = api_parts._replace(path='/{0}'.format(role))
|
||||
@@ -394,7 +394,7 @@ class Consul(AbstractDCS):
|
||||
if state == 'stopped':
|
||||
return self.deregister_service(params['service_id'])
|
||||
|
||||
if role in ['master', 'replica']:
|
||||
if role in ['master', 'replica', 'standby-leader']:
|
||||
if state != 'running':
|
||||
return
|
||||
return self.register_service(service_name, **params)
|
||||
|
||||
@@ -156,9 +156,8 @@ class PostmasterProcess(psutil.Process):
|
||||
pass
|
||||
cmdline = [pgcommand, '-D', data_dir, '--config-file={}'.format(conf)] + options
|
||||
logger.debug("Starting postgres: %s", " ".join(cmdline))
|
||||
proc = call_self(['pg_ctl_start'] + cmdline,
|
||||
close_fds=(os.name != 'nt'), stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT, env=env)
|
||||
proc = call_self(['pg_ctl_start'] + cmdline, close_fds=(os.name != 'nt'),
|
||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env)
|
||||
pid = int(proc.stdout.readline().strip())
|
||||
proc.wait()
|
||||
logger.info('postmaster pid=%s', pid)
|
||||
|
||||
2
setup.py
2
setup.py
@@ -107,6 +107,8 @@ class PyTest(TestCommand):
|
||||
silence = logging.WARNING
|
||||
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=os.getenv('LOGLEVEL', silence))
|
||||
params['args'] += ['-s' if logging.getLogger().getEffectiveLevel() < silence else '--capture=fd']
|
||||
if not os.getenv('SYSTEMROOT'):
|
||||
os.environ['SYSTEMROOT'] = '/'
|
||||
errno = pytest.main(**params)
|
||||
sys.exit(errno)
|
||||
|
||||
|
||||
@@ -167,6 +167,8 @@ class TestRestApiHandler(unittest.TestCase):
|
||||
self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /master'))
|
||||
with patch.object(RestApiServer, 'query', Mock(return_value=[('', 1, '', '', '', '', False, '')])):
|
||||
self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'GET /patroni'))
|
||||
MockPatroni.config.is_standby_cluster = PropertyMock(return_value=True)
|
||||
MockRestApiServer(RestApiHandler, 'GET /standby_leader')
|
||||
|
||||
def test_do_OPTIONS(self):
|
||||
self.assertIsNotNone(MockRestApiServer(RestApiHandler, 'OPTIONS / HTTP/1.0'))
|
||||
|
||||
@@ -76,7 +76,7 @@ class TestConfig(unittest.TestCase):
|
||||
@patch('os.path.exists', Mock(return_value=True))
|
||||
@patch('os.remove', Mock(side_effect=IOError))
|
||||
@patch('os.close', Mock(side_effect=IOError))
|
||||
@patch('os.rename', Mock(return_value=None))
|
||||
@patch('shutil.move', Mock(return_value=None))
|
||||
@patch('json.dump', Mock())
|
||||
def test_save_cache(self):
|
||||
self.config.set_dynamic_configuration({'ttl': 30, 'postgresql': {'foo': 'bar'}})
|
||||
|
||||
@@ -196,7 +196,7 @@ class TestCtl(unittest.TestCase):
|
||||
self.assertTrue('False' in str(rows))
|
||||
|
||||
rows = query_member(None, None, None, 'replica', 'SELECT pg_is_in_recovery()', {})
|
||||
self.assertEquals(rows, (None, None))
|
||||
self.assertEqual(rows, (None, None))
|
||||
|
||||
with patch('test_postgresql.MockCursor.execute', Mock(side_effect=OperationalError('bla'))):
|
||||
rows = query_member(None, None, None, 'replica', 'SELECT pg_is_in_recovery()', {})
|
||||
@@ -364,20 +364,20 @@ class TestCtl(unittest.TestCase):
|
||||
self.assertIsNone(get_any_member(get_cluster_initialized_without_leader(), role='master'))
|
||||
|
||||
m = get_any_member(get_cluster_initialized_with_leader(), role='master')
|
||||
self.assertEquals(m.name, 'leader')
|
||||
self.assertEqual(m.name, 'leader')
|
||||
|
||||
def test_get_all_members(self):
|
||||
self.assertEquals(list(get_all_members(get_cluster_initialized_without_leader(), role='master')), [])
|
||||
self.assertEqual(list(get_all_members(get_cluster_initialized_without_leader(), role='master')), [])
|
||||
|
||||
r = list(get_all_members(get_cluster_initialized_with_leader(), role='master'))
|
||||
self.assertEquals(len(r), 1)
|
||||
self.assertEquals(r[0].name, 'leader')
|
||||
self.assertEqual(len(r), 1)
|
||||
self.assertEqual(r[0].name, 'leader')
|
||||
|
||||
r = list(get_all_members(get_cluster_initialized_with_leader(), role='replica'))
|
||||
self.assertEquals(len(r), 1)
|
||||
self.assertEquals(r[0].name, 'other')
|
||||
self.assertEqual(len(r), 1)
|
||||
self.assertEqual(r[0].name, 'other')
|
||||
|
||||
self.assertEquals(len(list(get_all_members(get_cluster_initialized_without_leader(), role='replica'))), 2)
|
||||
self.assertEqual(len(list(get_all_members(get_cluster_initialized_without_leader(), role='replica'))), 2)
|
||||
|
||||
@patch('patroni.ctl.get_dcs')
|
||||
def test_members(self, mock_get_dcs):
|
||||
@@ -499,23 +499,23 @@ class TestCtl(unittest.TestCase):
|
||||
after_editing, changed_config = apply_config_changes(before_editing, config,
|
||||
["postgresql.parameters.work_mem = 5MB",
|
||||
"ttl=15", "postgresql.use_pg_rewind=off", 'a.b=c'])
|
||||
self.assertEquals(changed_config, {"a": {"b": "c"}, "postgresql": {"parameters": {"work_mem": "5MB"},
|
||||
"use_pg_rewind": False}, "ttl": 15})
|
||||
self.assertEqual(changed_config, {"a": {"b": "c"}, "postgresql": {"parameters": {"work_mem": "5MB"},
|
||||
"use_pg_rewind": False}, "ttl": 15})
|
||||
|
||||
# postgresql.parameters namespace is flattened
|
||||
after_editing, changed_config = apply_config_changes(before_editing, config,
|
||||
["postgresql.parameters.work_mem.sub = x"])
|
||||
self.assertEquals(changed_config, {"postgresql": {"parameters": {"work_mem": "4MB", "work_mem.sub": "x"},
|
||||
"use_pg_rewind": True}, "ttl": 30})
|
||||
self.assertEqual(changed_config, {"postgresql": {"parameters": {"work_mem": "4MB", "work_mem.sub": "x"},
|
||||
"use_pg_rewind": True}, "ttl": 30})
|
||||
|
||||
# Setting to null deletes
|
||||
after_editing, changed_config = apply_config_changes(before_editing, config,
|
||||
["postgresql.parameters.work_mem=null"])
|
||||
self.assertEquals(changed_config, {"postgresql": {"use_pg_rewind": True}, "ttl": 30})
|
||||
self.assertEqual(changed_config, {"postgresql": {"use_pg_rewind": True}, "ttl": 30})
|
||||
after_editing, changed_config = apply_config_changes(before_editing, config,
|
||||
["postgresql.use_pg_rewind=null",
|
||||
"postgresql.parameters.work_mem=null"])
|
||||
self.assertEquals(changed_config, {"ttl": 30})
|
||||
self.assertEqual(changed_config, {"ttl": 30})
|
||||
|
||||
self.assertRaises(PatroniCtlException, apply_config_changes, before_editing, config, ['a'])
|
||||
|
||||
@@ -572,5 +572,5 @@ class TestCtl(unittest.TestCase):
|
||||
assert 'failed to get version' in result.output
|
||||
|
||||
def test_format_pg_version(self):
|
||||
self.assertEquals(format_pg_version(100001), '10.1')
|
||||
self.assertEquals(format_pg_version(90605), '9.6.5')
|
||||
self.assertEqual(format_pg_version(100001), '10.1')
|
||||
self.assertEqual(format_pg_version(90605), '9.6.5')
|
||||
|
||||
@@ -215,8 +215,8 @@ class TestClient(unittest.TestCase):
|
||||
self.assertRaises(etcd.EtcdException, self.client.api_execute, '/', 'GET')
|
||||
|
||||
def test_get_srv_record(self):
|
||||
self.assertEquals(self.client.get_srv_record('_etcd-server._tcp.blabla'), [])
|
||||
self.assertEquals(self.client.get_srv_record('_etcd-server._tcp.exception'), [])
|
||||
self.assertEqual(self.client.get_srv_record('_etcd-server._tcp.blabla'), [])
|
||||
self.assertEqual(self.client.get_srv_record('_etcd-server._tcp.exception'), [])
|
||||
|
||||
def test__get_machines_cache_from_srv(self):
|
||||
self.client._get_machines_cache_from_srv('foobar')
|
||||
@@ -259,7 +259,7 @@ class TestEtcd(unittest.TestCase):
|
||||
'host': 'localhost:2379', 'scope': 'test', 'name': 'foo'})
|
||||
|
||||
def test_base_path(self):
|
||||
self.assertEquals(self.etcd._base_path, '/patroni/test')
|
||||
self.assertEqual(self.etcd._base_path, '/patroni/test')
|
||||
|
||||
@patch('dns.resolver.query', dns_query)
|
||||
def test_get_etcd_client(self):
|
||||
|
||||
220
tests/test_ha.py
220
tests/test_ha.py
@@ -209,7 +209,7 @@ class TestHa(unittest.TestCase):
|
||||
|
||||
def test_start_as_replica(self):
|
||||
self.p.is_healthy = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'starting as a secondary')
|
||||
self.assertEqual(self.ha.run_cycle(), 'starting as a secondary')
|
||||
|
||||
@patch('patroni.dcs.etcd.Etcd.initialize', return_value=True)
|
||||
def test_start_as_standby_leader(self, initialize):
|
||||
@@ -221,7 +221,7 @@ class TestHa(unittest.TestCase):
|
||||
"port": 5432,
|
||||
"primary_slot_name": "",
|
||||
}}
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
self.ha.run_cycle(),
|
||||
'trying to bootstrap a new standby leader'
|
||||
)
|
||||
@@ -238,7 +238,7 @@ class TestHa(unittest.TestCase):
|
||||
"port": 5432,
|
||||
"primary_slot_name": "",
|
||||
}}
|
||||
self.assertEquals(
|
||||
self.assertEqual(
|
||||
self.ha.run_cycle(),
|
||||
"trying to bootstrap from replica 'test'"
|
||||
)
|
||||
@@ -253,14 +253,14 @@ class TestHa(unittest.TestCase):
|
||||
"primary_slot_name": "",
|
||||
}}
|
||||
self.ha._post_bootstrap_task = CriticalTask()
|
||||
self.assertEquals(self.ha.bootstrap_standby_leader(), True)
|
||||
self.assertEqual(self.ha.bootstrap_standby_leader(), True)
|
||||
|
||||
def test_recover_replica_failed(self):
|
||||
self.p.controldata = lambda: {'Database cluster state': 'in recovery', 'Database system identifier': SYSID}
|
||||
self.p.is_running = false
|
||||
self.p.follow = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'starting as a secondary')
|
||||
self.assertEquals(self.ha.run_cycle(), 'failed to start postgres')
|
||||
self.assertEqual(self.ha.run_cycle(), 'starting as a secondary')
|
||||
self.assertEqual(self.ha.run_cycle(), 'failed to start postgres')
|
||||
|
||||
def test_recover_former_master(self):
|
||||
self.p.follow = false
|
||||
@@ -269,19 +269,19 @@ class TestHa(unittest.TestCase):
|
||||
self.p.set_role('master')
|
||||
self.p.controldata = lambda: {'Database cluster state': 'shut down', 'Database system identifier': SYSID}
|
||||
self.ha.cluster = get_cluster_initialized_with_leader()
|
||||
self.assertEquals(self.ha.run_cycle(), 'starting as readonly because i had the session lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'starting as readonly because i had the session lock')
|
||||
|
||||
@patch.object(Postgresql, 'fix_cluster_state', Mock())
|
||||
def test_crash_recovery(self):
|
||||
self.p.is_running = false
|
||||
self.p.controldata = lambda: {'Database cluster state': 'in production', 'Database system identifier': SYSID}
|
||||
self.assertEquals(self.ha.run_cycle(), 'doing crash recovery in a single user mode')
|
||||
self.assertEqual(self.ha.run_cycle(), 'doing crash recovery in a single user mode')
|
||||
|
||||
@patch.object(Postgresql, 'rewind_needed_and_possible', Mock(return_value=True))
|
||||
def test_recover_with_rewind(self):
|
||||
self.p.is_running = false
|
||||
self.ha.cluster = get_cluster_initialized_with_leader()
|
||||
self.assertEquals(self.ha.run_cycle(), 'running pg_rewind from leader')
|
||||
self.assertEqual(self.ha.run_cycle(), 'running pg_rewind from leader')
|
||||
|
||||
@patch('sys.exit', return_value=1)
|
||||
@patch('patroni.ha.Ha.sysid_valid', MagicMock(return_value=True))
|
||||
@@ -296,129 +296,129 @@ class TestHa(unittest.TestCase):
|
||||
self.p.is_healthy = true
|
||||
self.ha.has_lock = true
|
||||
self.p.controldata = lambda: {'Database cluster state': 'in production', 'Database system identifier': SYSID}
|
||||
self.assertEquals(self.ha.run_cycle(), 'promoted self to leader because i had the session lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'promoted self to leader because i had the session lock')
|
||||
|
||||
@patch('psycopg2.connect', psycopg2_connect)
|
||||
def test_acquire_lock_as_master(self):
|
||||
self.assertEquals(self.ha.run_cycle(), 'acquired session lock as a leader')
|
||||
self.assertEqual(self.ha.run_cycle(), 'acquired session lock as a leader')
|
||||
|
||||
def test_promoted_by_acquiring_lock(self):
|
||||
self.ha.is_healthiest_node = true
|
||||
self.p.is_leader = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock')
|
||||
|
||||
def test_long_promote(self):
|
||||
self.ha.cluster.is_unlocked = false
|
||||
self.ha.has_lock = true
|
||||
self.p.is_leader = false
|
||||
self.p.set_role('master')
|
||||
self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
|
||||
def test_demote_after_failing_to_obtain_lock(self):
|
||||
self.ha.acquire_lock = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'demoted self after trying and failing to obtain lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'demoted self after trying and failing to obtain lock')
|
||||
|
||||
def test_follow_new_leader_after_failing_to_obtain_lock(self):
|
||||
self.ha.is_healthiest_node = true
|
||||
self.ha.acquire_lock = false
|
||||
self.p.is_leader = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'following new leader after trying and failing to obtain lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'following new leader after trying and failing to obtain lock')
|
||||
|
||||
def test_demote_because_not_healthiest(self):
|
||||
self.ha.is_healthiest_node = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'demoting self because i am not the healthiest node')
|
||||
self.assertEqual(self.ha.run_cycle(), 'demoting self because i am not the healthiest node')
|
||||
|
||||
def test_follow_new_leader_because_not_healthiest(self):
|
||||
self.ha.is_healthiest_node = false
|
||||
self.p.is_leader = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node')
|
||||
self.assertEqual(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node')
|
||||
|
||||
def test_promote_because_have_lock(self):
|
||||
self.ha.cluster.is_unlocked = false
|
||||
self.ha.has_lock = true
|
||||
self.p.is_leader = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'promoted self to leader because i had the session lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'promoted self to leader because i had the session lock')
|
||||
|
||||
def test_promote_without_watchdog(self):
|
||||
self.ha.cluster.is_unlocked = false
|
||||
self.ha.has_lock = true
|
||||
self.p.is_leader = true
|
||||
with patch.object(Watchdog, 'activate', Mock(return_value=False)):
|
||||
self.assertEquals(self.ha.run_cycle(), 'Demoting self because watchdog could not be activated')
|
||||
self.assertEqual(self.ha.run_cycle(), 'Demoting self because watchdog could not be activated')
|
||||
self.p.is_leader = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'Not promoting self because watchdog could not be activated')
|
||||
self.assertEqual(self.ha.run_cycle(), 'Not promoting self because watchdog could not be activated')
|
||||
|
||||
def test_leader_with_lock(self):
|
||||
self.ha.cluster = get_cluster_not_initialized_without_leader()
|
||||
self.ha.cluster.is_unlocked = false
|
||||
self.ha.has_lock = true
|
||||
self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
|
||||
def test_demote_because_not_having_lock(self):
|
||||
self.ha.cluster.is_unlocked = false
|
||||
with patch.object(Watchdog, 'is_running', PropertyMock(return_value=True)):
|
||||
self.assertEquals(self.ha.run_cycle(), 'demoting self because i do not have the lock and i was a leader')
|
||||
self.assertEqual(self.ha.run_cycle(), 'demoting self because i do not have the lock and i was a leader')
|
||||
|
||||
def test_demote_because_update_lock_failed(self):
|
||||
self.ha.cluster.is_unlocked = false
|
||||
self.ha.has_lock = true
|
||||
self.ha.update_lock = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'demoted self because failed to update leader lock in DCS')
|
||||
self.assertEqual(self.ha.run_cycle(), 'demoted self because failed to update leader lock in DCS')
|
||||
self.p.is_leader = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'not promoting because failed to update leader lock in DCS')
|
||||
self.assertEqual(self.ha.run_cycle(), 'not promoting because failed to update leader lock in DCS')
|
||||
|
||||
def test_follow(self):
|
||||
self.ha.cluster.is_unlocked = false
|
||||
self.p.is_leader = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'no action. i am a secondary and i am following a leader')
|
||||
self.assertEqual(self.ha.run_cycle(), 'no action. i am a secondary and i am following a leader')
|
||||
self.ha.patroni.replicatefrom = "foo"
|
||||
self.assertEquals(self.ha.run_cycle(), 'no action. i am a secondary and i am following a leader')
|
||||
self.assertEqual(self.ha.run_cycle(), 'no action. i am a secondary and i am following a leader')
|
||||
|
||||
def test_follow_in_pause(self):
|
||||
self.ha.cluster.is_unlocked = false
|
||||
self.ha.is_paused = true
|
||||
self.assertEquals(self.ha.run_cycle(), 'PAUSE: continue to run as master without lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'PAUSE: continue to run as master without lock')
|
||||
self.p.is_leader = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'PAUSE: no action')
|
||||
self.assertEqual(self.ha.run_cycle(), 'PAUSE: no action')
|
||||
|
||||
@patch.object(Postgresql, 'rewind_needed_and_possible', Mock(return_value=True))
|
||||
def test_follow_triggers_rewind(self):
|
||||
self.p.is_leader = false
|
||||
self.p.trigger_check_diverged_lsn()
|
||||
self.ha.cluster = get_cluster_initialized_with_leader()
|
||||
self.assertEquals(self.ha.run_cycle(), 'running pg_rewind from leader')
|
||||
self.assertEqual(self.ha.run_cycle(), 'running pg_rewind from leader')
|
||||
|
||||
def test_no_etcd_connection_master_demote(self):
|
||||
self.ha.load_cluster_from_dcs = Mock(side_effect=DCSError('Etcd is not responding properly'))
|
||||
self.assertEquals(self.ha.run_cycle(), 'demoted self because DCS is not accessible and i was a leader')
|
||||
self.assertEqual(self.ha.run_cycle(), 'demoted self because DCS is not accessible and i was a leader')
|
||||
|
||||
@patch('time.sleep', Mock())
|
||||
def test_bootstrap_from_another_member(self):
|
||||
self.ha.cluster = get_cluster_initialized_with_leader()
|
||||
self.assertEquals(self.ha.bootstrap(), 'trying to bootstrap from replica \'other\'')
|
||||
self.assertEqual(self.ha.bootstrap(), 'trying to bootstrap from replica \'other\'')
|
||||
|
||||
def test_bootstrap_waiting_for_leader(self):
|
||||
self.ha.cluster = get_cluster_initialized_without_leader()
|
||||
self.assertEquals(self.ha.bootstrap(), 'waiting for leader to bootstrap')
|
||||
self.assertEqual(self.ha.bootstrap(), 'waiting for leader to bootstrap')
|
||||
|
||||
def test_bootstrap_without_leader(self):
|
||||
self.ha.cluster = get_cluster_initialized_without_leader()
|
||||
self.p.can_create_replica_without_replication_connection = MagicMock(return_value=True)
|
||||
self.assertEquals(self.ha.bootstrap(), 'trying to bootstrap (without leader)')
|
||||
self.assertEqual(self.ha.bootstrap(), 'trying to bootstrap (without leader)')
|
||||
|
||||
def test_bootstrap_initialize_lock_failed(self):
|
||||
self.ha.cluster = get_cluster_not_initialized_without_leader()
|
||||
self.assertEquals(self.ha.bootstrap(), 'failed to acquire initialize lock')
|
||||
self.assertEqual(self.ha.bootstrap(), 'failed to acquire initialize lock')
|
||||
|
||||
def test_bootstrap_initialized_new_cluster(self):
|
||||
self.ha.cluster = get_cluster_not_initialized_without_leader()
|
||||
self.e.initialize = true
|
||||
self.assertEquals(self.ha.bootstrap(), 'trying to bootstrap a new cluster')
|
||||
self.assertEqual(self.ha.bootstrap(), 'trying to bootstrap a new cluster')
|
||||
self.p.is_leader = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'waiting for end of recovery after bootstrap')
|
||||
self.assertEqual(self.ha.run_cycle(), 'waiting for end of recovery after bootstrap')
|
||||
self.p.is_leader = true
|
||||
self.assertEquals(self.ha.run_cycle(), 'running post_bootstrap')
|
||||
self.assertEquals(self.ha.run_cycle(), 'initialized a new cluster')
|
||||
self.assertEqual(self.ha.run_cycle(), 'running post_bootstrap')
|
||||
self.assertEqual(self.ha.run_cycle(), 'initialized a new cluster')
|
||||
|
||||
def test_bootstrap_release_initialize_key_on_failure(self):
|
||||
self.ha.cluster = get_cluster_not_initialized_without_leader()
|
||||
@@ -434,7 +434,7 @@ class TestHa(unittest.TestCase):
|
||||
self.p.is_running.return_value = MockPostmaster()
|
||||
self.p.is_leader = true
|
||||
with patch.object(Watchdog, 'activate', Mock(return_value=False)):
|
||||
self.assertEquals(self.ha.post_bootstrap(), 'running post_bootstrap')
|
||||
self.assertEqual(self.ha.post_bootstrap(), 'running post_bootstrap')
|
||||
self.assertRaises(PatroniException, self.ha.post_bootstrap)
|
||||
|
||||
@patch('psycopg2.connect', psycopg2_connect)
|
||||
@@ -451,35 +451,35 @@ class TestHa(unittest.TestCase):
|
||||
|
||||
@patch('time.sleep', Mock())
|
||||
def test_restart(self):
|
||||
self.assertEquals(self.ha.restart({}), (True, 'restarted successfully'))
|
||||
self.assertEqual(self.ha.restart({}), (True, 'restarted successfully'))
|
||||
self.p.restart = Mock(return_value=None)
|
||||
self.assertEquals(self.ha.restart({}), (False, 'postgres is still starting'))
|
||||
self.assertEqual(self.ha.restart({}), (False, 'postgres is still starting'))
|
||||
self.p.restart = false
|
||||
self.assertEquals(self.ha.restart({}), (False, 'restart failed'))
|
||||
self.assertEqual(self.ha.restart({}), (False, 'restart failed'))
|
||||
self.ha.cluster = get_cluster_initialized_with_leader()
|
||||
self.ha.reinitialize()
|
||||
self.assertEquals(self.ha.restart({}), (False, 'reinitialize already in progress'))
|
||||
self.assertEqual(self.ha.restart({}), (False, 'reinitialize already in progress'))
|
||||
with patch.object(self.ha, "restart_matches", return_value=False):
|
||||
self.assertEquals(self.ha.restart({'foo': 'bar'}), (False, "restart conditions are not satisfied"))
|
||||
self.assertEqual(self.ha.restart({'foo': 'bar'}), (False, "restart conditions are not satisfied"))
|
||||
|
||||
@patch('os.kill', Mock())
|
||||
def test_restart_in_progress(self):
|
||||
with patch('patroni.async_executor.AsyncExecutor.busy', PropertyMock(return_value=True)):
|
||||
self.ha.restart({}, run_async=True)
|
||||
self.assertTrue(self.ha.restart_scheduled())
|
||||
self.assertEquals(self.ha.run_cycle(), 'restart in progress')
|
||||
self.assertEqual(self.ha.run_cycle(), 'restart in progress')
|
||||
|
||||
self.ha.cluster = get_cluster_initialized_with_leader()
|
||||
self.assertEquals(self.ha.run_cycle(), 'restart in progress')
|
||||
self.assertEqual(self.ha.run_cycle(), 'restart in progress')
|
||||
|
||||
self.ha.has_lock = true
|
||||
self.assertEquals(self.ha.run_cycle(), 'updated leader lock during restart')
|
||||
self.assertEqual(self.ha.run_cycle(), 'updated leader lock during restart')
|
||||
|
||||
self.ha.update_lock = false
|
||||
self.p.set_role('master')
|
||||
with patch('patroni.async_executor.CriticalTask.cancel', Mock(return_value=False)):
|
||||
with patch('patroni.postgresql.Postgresql.terminate_starting_postmaster') as mock_terminate:
|
||||
self.assertEquals(self.ha.run_cycle(), 'lost leader lock during restart')
|
||||
self.assertEqual(self.ha.run_cycle(), 'lost leader lock during restart')
|
||||
mock_terminate.assert_called()
|
||||
|
||||
@patch('requests.get', requests_get)
|
||||
@@ -487,25 +487,25 @@ class TestHa(unittest.TestCase):
|
||||
self.ha.fetch_node_status = get_node_status()
|
||||
self.ha.has_lock = true
|
||||
self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', '', None))
|
||||
self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, '', self.p.name, None))
|
||||
self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, '', 'blabla', None))
|
||||
self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
f = Failover(0, self.p.name, '', None)
|
||||
self.ha.cluster = get_cluster_initialized_with_leader(f)
|
||||
self.assertEquals(self.ha.run_cycle(), 'manual failover: demoting myself')
|
||||
self.assertEqual(self.ha.run_cycle(), 'manual failover: demoting myself')
|
||||
self.p.rewind_needed_and_possible = true
|
||||
self.assertEquals(self.ha.run_cycle(), 'manual failover: demoting myself')
|
||||
self.assertEqual(self.ha.run_cycle(), 'manual failover: demoting myself')
|
||||
self.ha.fetch_node_status = get_node_status(nofailover=True)
|
||||
self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
self.ha.fetch_node_status = get_node_status(watchdog_failed=True)
|
||||
self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
self.ha.fetch_node_status = get_node_status(wal_position=1)
|
||||
self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
# manual failover from the previous leader to us won't happen if we hold the nofailover flag
|
||||
self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, None))
|
||||
self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
|
||||
# Failover scheduled time must include timezone
|
||||
scheduled = datetime.datetime.now()
|
||||
@@ -514,19 +514,19 @@ class TestHa(unittest.TestCase):
|
||||
|
||||
scheduled = datetime.datetime.utcnow().replace(tzinfo=tzutc)
|
||||
self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, scheduled))
|
||||
self.assertEquals('no action. i am the leader with the lock', self.ha.run_cycle())
|
||||
self.assertEqual('no action. i am the leader with the lock', self.ha.run_cycle())
|
||||
|
||||
scheduled = scheduled + datetime.timedelta(seconds=30)
|
||||
self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, scheduled))
|
||||
self.assertEquals('no action. i am the leader with the lock', self.ha.run_cycle())
|
||||
self.assertEqual('no action. i am the leader with the lock', self.ha.run_cycle())
|
||||
|
||||
scheduled = scheduled + datetime.timedelta(seconds=-600)
|
||||
self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, scheduled))
|
||||
self.assertEquals('no action. i am the leader with the lock', self.ha.run_cycle())
|
||||
self.assertEqual('no action. i am the leader with the lock', self.ha.run_cycle())
|
||||
|
||||
scheduled = None
|
||||
self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, scheduled))
|
||||
self.assertEquals('no action. i am the leader with the lock', self.ha.run_cycle())
|
||||
self.assertEqual('no action. i am the leader with the lock', self.ha.run_cycle())
|
||||
|
||||
@patch('requests.get', requests_get)
|
||||
def test_manual_failover_from_leader_in_pause(self):
|
||||
@@ -534,9 +534,9 @@ class TestHa(unittest.TestCase):
|
||||
self.ha.is_paused = true
|
||||
scheduled = datetime.datetime.now()
|
||||
self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, scheduled))
|
||||
self.assertEquals('PAUSE: no action. i am the leader with the lock', self.ha.run_cycle())
|
||||
self.assertEqual('PAUSE: no action. i am the leader with the lock', self.ha.run_cycle())
|
||||
self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, self.p.name, '', None))
|
||||
self.assertEquals('PAUSE: no action. i am the leader with the lock', self.ha.run_cycle())
|
||||
self.assertEqual('PAUSE: no action. i am the leader with the lock', self.ha.run_cycle())
|
||||
|
||||
@patch('requests.get', requests_get)
|
||||
def test_manual_failover_from_leader_in_synchronous_mode(self):
|
||||
@@ -546,48 +546,48 @@ class TestHa(unittest.TestCase):
|
||||
self.ha.is_failover_possible = false
|
||||
self.ha.process_sync_replication = Mock()
|
||||
self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, self.p.name, 'a', None), (self.p.name, None))
|
||||
self.assertEquals('no action. i am the leader with the lock', self.ha.run_cycle())
|
||||
self.assertEqual('no action. i am the leader with the lock', self.ha.run_cycle())
|
||||
self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, self.p.name, 'a', None), (self.p.name, 'a'))
|
||||
self.ha.is_failover_possible = true
|
||||
self.assertEquals('manual failover: demoting myself', self.ha.run_cycle())
|
||||
self.assertEqual('manual failover: demoting myself', self.ha.run_cycle())
|
||||
|
||||
@patch('requests.get', requests_get)
|
||||
def test_manual_failover_process_no_leader(self):
|
||||
self.p.is_leader = false
|
||||
self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', self.p.name, None))
|
||||
self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock')
|
||||
self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', 'leader', None))
|
||||
self.p.set_role('replica')
|
||||
self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock')
|
||||
self.ha.fetch_node_status = get_node_status() # accessible, in_recovery
|
||||
self.assertEquals(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node')
|
||||
self.assertEqual(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node')
|
||||
self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, self.p.name, '', None))
|
||||
self.assertEquals(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node')
|
||||
self.assertEqual(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node')
|
||||
self.ha.fetch_node_status = get_node_status(reachable=False) # inaccessible, in_recovery
|
||||
self.p.set_role('replica')
|
||||
self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock')
|
||||
# set failover flag to True for all members of the cluster
|
||||
# this should elect the current member, as we are not going to call the API for it.
|
||||
self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', 'other', None))
|
||||
self.ha.fetch_node_status = get_node_status(nofailover=True) # accessible, in_recovery
|
||||
self.p.set_role('replica')
|
||||
self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock')
|
||||
# same as previous, but set the current member to nofailover. In no case it should be elected as a leader
|
||||
self.ha.patroni.nofailover = True
|
||||
self.assertEquals(self.ha.run_cycle(), 'following a different leader because I am not allowed to promote')
|
||||
self.assertEqual(self.ha.run_cycle(), 'following a different leader because I am not allowed to promote')
|
||||
|
||||
def test_manual_failover_process_no_leader_in_pause(self):
|
||||
self.ha.is_paused = true
|
||||
self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', 'other', None))
|
||||
self.assertEquals(self.ha.run_cycle(), 'PAUSE: continue to run as master without lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'PAUSE: continue to run as master without lock')
|
||||
self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, 'leader', '', None))
|
||||
self.assertEquals(self.ha.run_cycle(), 'PAUSE: continue to run as master without lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'PAUSE: continue to run as master without lock')
|
||||
self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, 'leader', 'blabla', None))
|
||||
self.assertEquals('PAUSE: acquired session lock as a leader', self.ha.run_cycle())
|
||||
self.assertEqual('PAUSE: acquired session lock as a leader', self.ha.run_cycle())
|
||||
self.p.is_leader = false
|
||||
self.p.set_role('replica')
|
||||
self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, 'leader', self.p.name, None))
|
||||
self.assertEquals(self.ha.run_cycle(), 'PAUSE: promoted self to leader by acquiring session lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'PAUSE: promoted self to leader by acquiring session lock')
|
||||
|
||||
def test_is_healthiest_node(self):
|
||||
self.ha.state_handler.is_leader = false
|
||||
@@ -668,7 +668,7 @@ class TestHa(unittest.TestCase):
|
||||
def test_scheduled_restart(self):
|
||||
self.ha.cluster = get_cluster_initialized_with_leader()
|
||||
with patch.object(self.ha, "evaluate_scheduled_restart", Mock(return_value="restart scheduled")):
|
||||
self.assertEquals(self.ha.run_cycle(), "restart scheduled")
|
||||
self.assertEqual(self.ha.run_cycle(), "restart scheduled")
|
||||
|
||||
def test_restart_matches(self):
|
||||
self.p._role = 'replica'
|
||||
@@ -685,9 +685,9 @@ class TestHa(unittest.TestCase):
|
||||
self.ha.is_paused = true
|
||||
self.p.name = 'leader'
|
||||
self.ha.cluster = get_cluster_initialized_with_leader()
|
||||
self.assertEquals(self.ha.run_cycle(), 'PAUSE: removed leader lock because postgres is not running as master')
|
||||
self.assertEqual(self.ha.run_cycle(), 'PAUSE: removed leader lock because postgres is not running as master')
|
||||
self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, '', self.p.name, None))
|
||||
self.assertEquals(self.ha.run_cycle(), 'PAUSE: waiting to become master after promote...')
|
||||
self.assertEqual(self.ha.run_cycle(), 'PAUSE: waiting to become master after promote...')
|
||||
|
||||
def test_process_healthy_standby_cluster_as_standby_leader(self):
|
||||
self.p.is_leader = false
|
||||
@@ -699,7 +699,7 @@ class TestHa(unittest.TestCase):
|
||||
}}
|
||||
self.ha.cluster = get_standby_cluster_initialized_with_only_leader()
|
||||
msg = 'no action. i am the standby leader with the lock'
|
||||
self.assertEquals(self.ha.run_cycle(), msg)
|
||||
self.assertEqual(self.ha.run_cycle(), msg)
|
||||
|
||||
def test_process_healthy_standby_cluster_as_cascade_replica(self):
|
||||
self.p.is_leader = false
|
||||
@@ -711,7 +711,7 @@ class TestHa(unittest.TestCase):
|
||||
}}
|
||||
self.ha.cluster = get_standby_cluster_initialized_with_only_leader()
|
||||
msg = 'no action. i am a secondary and i am following a leader'
|
||||
self.assertEquals(self.ha.run_cycle(), msg)
|
||||
self.assertEqual(self.ha.run_cycle(), msg)
|
||||
|
||||
@patch('patroni.dcs.etcd.Etcd.initialize', return_value=True)
|
||||
def test_process_unhealthy_standby_cluster_as_standby_leader(self, initialize):
|
||||
@@ -727,7 +727,7 @@ class TestHa(unittest.TestCase):
|
||||
self.ha.sysid_valid = true
|
||||
self.p._sysid = True
|
||||
msg = 'promoted self to a standby leader because i had the session lock'
|
||||
self.assertEquals(self.ha.run_cycle(), msg)
|
||||
self.assertEqual(self.ha.run_cycle(), msg)
|
||||
|
||||
@patch.object(Postgresql, 'rewind_needed_and_possible', Mock(return_value=True))
|
||||
@patch('patroni.dcs.etcd.Etcd.initialize', return_value=True)
|
||||
@@ -742,27 +742,27 @@ class TestHa(unittest.TestCase):
|
||||
self.ha.cluster = get_standby_cluster_initialized_with_only_leader()
|
||||
self.ha.is_unlocked = true
|
||||
msg = 'running pg_rewind from leader'
|
||||
self.assertEquals(self.ha.run_cycle(), msg)
|
||||
self.assertEqual(self.ha.run_cycle(), msg)
|
||||
|
||||
def test_failed_to_update_lock_in_pause(self):
|
||||
self.ha.update_lock = false
|
||||
self.ha.is_paused = true
|
||||
self.p.name = 'leader'
|
||||
self.ha.cluster = get_cluster_initialized_with_leader()
|
||||
self.assertEquals(self.ha.run_cycle(),
|
||||
'PAUSE: continue to run as master after failing to update leader lock in DCS')
|
||||
self.assertEqual(self.ha.run_cycle(),
|
||||
'PAUSE: continue to run as master after failing to update leader lock in DCS')
|
||||
|
||||
def test_postgres_unhealthy_in_pause(self):
|
||||
self.ha.is_paused = true
|
||||
self.p.is_healthy = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'PAUSE: postgres is not running')
|
||||
self.assertEqual(self.ha.run_cycle(), 'PAUSE: postgres is not running')
|
||||
self.ha.has_lock = true
|
||||
self.assertEquals(self.ha.run_cycle(), 'PAUSE: removed leader lock because postgres is not running')
|
||||
self.assertEqual(self.ha.run_cycle(), 'PAUSE: removed leader lock because postgres is not running')
|
||||
|
||||
def test_no_etcd_connection_in_pause(self):
|
||||
self.ha.is_paused = true
|
||||
self.ha.load_cluster_from_dcs = Mock(side_effect=DCSError('Etcd is not responding properly'))
|
||||
self.assertEquals(self.ha.run_cycle(), 'PAUSE: DCS is not accessible')
|
||||
self.assertEqual(self.ha.run_cycle(), 'PAUSE: DCS is not accessible')
|
||||
|
||||
@patch('patroni.ha.Ha.update_lock', return_value=True)
|
||||
@patch('patroni.ha.Ha.demote')
|
||||
@@ -778,26 +778,26 @@ class TestHa(unittest.TestCase):
|
||||
self.ha.cluster = get_cluster_initialized_with_leader()
|
||||
self.p.check_for_startup = true
|
||||
self.p.time_in_state = lambda: 30
|
||||
self.assertEquals(self.ha.run_cycle(), 'PostgreSQL is still starting up, 270 seconds until timeout')
|
||||
self.assertEqual(self.ha.run_cycle(), 'PostgreSQL is still starting up, 270 seconds until timeout')
|
||||
check_calls([(update_lock, True), (demote, False)])
|
||||
|
||||
self.p.time_in_state = lambda: 350
|
||||
self.ha.fetch_node_status = get_node_status(reachable=False) # inaccessible, in_recovery
|
||||
self.assertEquals(self.ha.run_cycle(),
|
||||
'master start has timed out, but continuing to wait because failover is not possible')
|
||||
self.assertEqual(self.ha.run_cycle(),
|
||||
'master start has timed out, but continuing to wait because failover is not possible')
|
||||
check_calls([(update_lock, True), (demote, False)])
|
||||
|
||||
self.ha.fetch_node_status = get_node_status() # accessible, in_recovery
|
||||
self.assertEquals(self.ha.run_cycle(), 'stopped PostgreSQL because of startup timeout')
|
||||
self.assertEqual(self.ha.run_cycle(), 'stopped PostgreSQL because of startup timeout')
|
||||
check_calls([(update_lock, True), (demote, True)])
|
||||
|
||||
update_lock.return_value = False
|
||||
self.assertEquals(self.ha.run_cycle(), 'stopped PostgreSQL while starting up because leader key was lost')
|
||||
self.assertEqual(self.ha.run_cycle(), 'stopped PostgreSQL while starting up because leader key was lost')
|
||||
check_calls([(update_lock, True), (demote, True)])
|
||||
|
||||
self.ha.has_lock = false
|
||||
self.p.is_leader = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'no action. i am a secondary and i am following a leader')
|
||||
self.assertEqual(self.ha.run_cycle(), 'no action. i am a secondary and i am following a leader')
|
||||
check_calls([(update_lock, False), (demote, False)])
|
||||
|
||||
def test_manual_failover_while_starting(self):
|
||||
@@ -806,7 +806,7 @@ class TestHa(unittest.TestCase):
|
||||
f = Failover(0, self.p.name, '', None)
|
||||
self.ha.cluster = get_cluster_initialized_with_leader(f)
|
||||
self.ha.fetch_node_status = get_node_status() # accessible, in_recovery
|
||||
self.assertEquals(self.ha.run_cycle(), 'manual failover: demoting myself')
|
||||
self.assertEqual(self.ha.run_cycle(), 'manual failover: demoting myself')
|
||||
|
||||
@patch('patroni.ha.Ha.demote')
|
||||
def test_failover_immediately_on_zero_master_start_timeout(self, demote):
|
||||
@@ -817,7 +817,7 @@ class TestHa(unittest.TestCase):
|
||||
self.ha.has_lock = true
|
||||
self.ha.update_lock = true
|
||||
self.ha.fetch_node_status = get_node_status() # accessible, in_recovery
|
||||
self.assertEquals(self.ha.run_cycle(), 'stopped PostgreSQL to fail over after a crash')
|
||||
self.assertEqual(self.ha.run_cycle(), 'stopped PostgreSQL to fail over after a crash')
|
||||
demote.assert_called_once()
|
||||
|
||||
@patch('patroni.postgresql.Postgresql.follow')
|
||||
@@ -879,18 +879,18 @@ class TestHa(unittest.TestCase):
|
||||
self.p.pick_synchronous_standby = Mock(return_value=('other2', True))
|
||||
self.ha.run_cycle()
|
||||
self.ha.dcs.get_cluster.assert_called_once()
|
||||
self.assertEquals(self.ha.dcs.write_sync_state.call_count, 2)
|
||||
self.assertEqual(self.ha.dcs.write_sync_state.call_count, 2)
|
||||
|
||||
# Test updating sync standby key failed due to race
|
||||
self.ha.dcs.write_sync_state = Mock(side_effect=[True, False])
|
||||
self.ha.run_cycle()
|
||||
self.assertEquals(self.ha.dcs.write_sync_state.call_count, 2)
|
||||
self.assertEqual(self.ha.dcs.write_sync_state.call_count, 2)
|
||||
|
||||
# Test changing sync standby failed due to race
|
||||
self.ha.dcs.write_sync_state = Mock(return_value=True)
|
||||
self.ha.dcs.get_cluster = Mock(return_value=get_cluster_initialized_with_leader(sync=('somebodyelse', None)))
|
||||
self.ha.run_cycle()
|
||||
self.assertEquals(self.ha.dcs.write_sync_state.call_count, 1)
|
||||
self.assertEqual(self.ha.dcs.write_sync_state.call_count, 1)
|
||||
|
||||
# Test sync set to '*' when synchronous_mode_strict is enabled
|
||||
mock_set_sync.reset_mock()
|
||||
@@ -911,7 +911,7 @@ class TestHa(unittest.TestCase):
|
||||
self.ha.cluster = get_cluster_initialized_with_leader(sync=('other', None))
|
||||
|
||||
# When we just became master nobody is sync
|
||||
self.assertEquals(self.ha.enforce_master_role('msg', 'promote msg'), 'promote msg')
|
||||
self.assertEqual(self.ha.enforce_master_role('msg', 'promote msg'), 'promote msg')
|
||||
mock_set_sync.assert_called_once_with(None)
|
||||
mock_write_sync.assert_called_once_with('leader', None, index=0)
|
||||
|
||||
@@ -939,7 +939,7 @@ class TestHa(unittest.TestCase):
|
||||
self.ha.run_cycle()
|
||||
mock_acquire.assert_not_called()
|
||||
mock_follow.assert_called_once()
|
||||
self.assertEquals(mock_follow.call_args[0][0], None)
|
||||
self.assertEqual(mock_follow.call_args[0][0], None)
|
||||
mock_write_sync.assert_not_called()
|
||||
|
||||
mock_follow.reset_mock()
|
||||
@@ -990,15 +990,15 @@ class TestHa(unittest.TestCase):
|
||||
|
||||
def test_effective_tags(self):
|
||||
self.ha._disable_sync = True
|
||||
self.assertEquals(self.ha.get_effective_tags(), {'foo': 'bar', 'nosync': True})
|
||||
self.assertEqual(self.ha.get_effective_tags(), {'foo': 'bar', 'nosync': True})
|
||||
self.ha._disable_sync = False
|
||||
self.assertEquals(self.ha.get_effective_tags(), {'foo': 'bar'})
|
||||
self.assertEqual(self.ha.get_effective_tags(), {'foo': 'bar'})
|
||||
|
||||
def test_restore_cluster_config(self):
|
||||
self.ha.cluster.config.data.clear()
|
||||
self.ha.has_lock = true
|
||||
self.ha.cluster.is_unlocked = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
|
||||
def test_watch(self):
|
||||
self.ha.cluster = get_cluster_initialized_with_leader()
|
||||
@@ -1017,19 +1017,19 @@ class TestHa(unittest.TestCase):
|
||||
self.ha.cluster = get_cluster_initialized_with_leader()
|
||||
self.ha.has_lock = true
|
||||
self.p.data_directory_empty = true
|
||||
self.assertEquals(self.ha.run_cycle(), 'released leader key voluntarily as data dir empty and currently leader')
|
||||
self.assertEquals(self.p.role, 'uninitialized')
|
||||
self.assertEqual(self.ha.run_cycle(), 'released leader key voluntarily as data dir empty and currently leader')
|
||||
self.assertEqual(self.p.role, 'uninitialized')
|
||||
|
||||
# as has_lock is mocked out, we need to fake the leader key release
|
||||
self.ha.has_lock = false
|
||||
# will not say bootstrap from leader as replica can't self elect
|
||||
self.assertEquals(self.ha.run_cycle(), "trying to bootstrap from replica 'other'")
|
||||
self.assertEqual(self.ha.run_cycle(), "trying to bootstrap from replica 'other'")
|
||||
|
||||
def test_update_cluster_history(self):
|
||||
self.p.get_master_timeline = Mock(return_value=1)
|
||||
self.ha.has_lock = true
|
||||
self.ha.cluster.is_unlocked = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
|
||||
@patch('sys.exit', return_value=1)
|
||||
def test_abort_join(self, exit_mock):
|
||||
@@ -1042,6 +1042,6 @@ class TestHa(unittest.TestCase):
|
||||
self.ha.has_lock = true
|
||||
self.ha.cluster.is_unlocked = false
|
||||
self.ha.is_paused = true
|
||||
self.assertEquals(self.ha.run_cycle(), 'PAUSE: no action. i am the leader with the lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'PAUSE: no action. i am the leader with the lock')
|
||||
self.ha.is_paused = false
|
||||
self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
self.assertEqual(self.ha.run_cycle(), 'no action. i am the leader with the lock')
|
||||
|
||||
@@ -73,7 +73,7 @@ class TestPatroni(unittest.TestCase):
|
||||
mock_getpid.return_value = 2
|
||||
_main()
|
||||
|
||||
with patch('sys.frozen', Mock(return_value=True), create=True):
|
||||
with patch('sys.frozen', Mock(return_value=True), create=True), patch('os.setsid', Mock()):
|
||||
sys.argv = ['/patroni', 'pg_ctl_start', 'postgres', '-D', '/data', '--max_connections=100']
|
||||
_main()
|
||||
|
||||
|
||||
@@ -308,7 +308,7 @@ class TestPostgresql(unittest.TestCase):
|
||||
def test_restart(self):
|
||||
self.p.start = Mock(return_value=False)
|
||||
self.assertFalse(self.p.restart())
|
||||
self.assertEquals(self.p.state, 'restart failed (restarting)')
|
||||
self.assertEqual(self.p.state, 'restart failed (restarting)')
|
||||
|
||||
@patch.object(builtins, 'open', MagicMock())
|
||||
def test_write_pgpass(self):
|
||||
@@ -317,10 +317,10 @@ class TestPostgresql(unittest.TestCase):
|
||||
|
||||
def test_checkpoint(self):
|
||||
with patch.object(MockCursor, 'fetchone', Mock(return_value=(True, ))):
|
||||
self.assertEquals(self.p.checkpoint({'user': 'postgres'}), 'is_in_recovery=true')
|
||||
self.assertEqual(self.p.checkpoint({'user': 'postgres'}), 'is_in_recovery=true')
|
||||
with patch.object(MockCursor, 'execute', Mock(return_value=None)):
|
||||
self.assertIsNone(self.p.checkpoint())
|
||||
self.assertEquals(self.p.checkpoint(), 'not accessible or not healty')
|
||||
self.assertEqual(self.p.checkpoint(), 'not accessible or not healty')
|
||||
|
||||
@patch.object(Postgresql, 'cancellable_subprocess_call')
|
||||
@patch('patroni.postgresql.Postgresql.write_pgpass', MagicMock(return_value=dict()))
|
||||
@@ -401,7 +401,8 @@ class TestPostgresql(unittest.TestCase):
|
||||
@patch.object(Postgresql, 'is_running', Mock(return_value=False))
|
||||
@patch.object(Postgresql, 'start', Mock())
|
||||
def test_follow(self):
|
||||
self.p.follow(RemoteMember('123', {'recovery_command': 'foo'}))
|
||||
m = RemoteMember('1', {'restore_command': '2', 'recovery_min_apply_delay': 3, 'archive_cleanup_command': '4'})
|
||||
self.p.follow(m)
|
||||
|
||||
@patch('subprocess.check_output', Mock(return_value=0, side_effect=pg_controldata_string))
|
||||
def test_can_rewind(self):
|
||||
@@ -423,13 +424,13 @@ class TestPostgresql(unittest.TestCase):
|
||||
self.p.config['create_replica_methods'] = ['wale', 'basebackup']
|
||||
self.p.config['wale'] = {'command': 'foo'}
|
||||
mock_cancellable_subprocess_call.return_value = 0
|
||||
self.assertEquals(self.p.create_replica(self.leader), 0)
|
||||
self.assertEqual(self.p.create_replica(self.leader), 0)
|
||||
del self.p.config['wale']
|
||||
self.assertEquals(self.p.create_replica(self.leader), 0)
|
||||
self.assertEqual(self.p.create_replica(self.leader), 0)
|
||||
|
||||
self.p.config['create_replica_methods'] = ['basebackup']
|
||||
self.p.config['basebackup'] = [{'max_rate': '100M'}, 'no-sync']
|
||||
self.assertEquals(self.p.create_replica(self.leader), 0)
|
||||
self.assertEqual(self.p.create_replica(self.leader), 0)
|
||||
|
||||
self.p.config['basebackup'] = [{'max_rate': '100M', 'compress': '9'}]
|
||||
with mock.patch('patroni.postgresql.logger.error', new_callable=Mock()) as mock_logger:
|
||||
@@ -446,24 +447,24 @@ class TestPostgresql(unittest.TestCase):
|
||||
"not matching {0}".format(mock_logger.call_args[0][0]))
|
||||
|
||||
self.p.config['basebackup'] = {"foo": "bar"}
|
||||
self.assertEquals(self.p.create_replica(self.leader), 0)
|
||||
self.assertEqual(self.p.create_replica(self.leader), 0)
|
||||
|
||||
self.p.config['create_replica_methods'] = ['wale', 'basebackup']
|
||||
del self.p.config['basebackup']
|
||||
mock_cancellable_subprocess_call.return_value = 1
|
||||
self.assertEquals(self.p.create_replica(self.leader), 1)
|
||||
self.assertEqual(self.p.create_replica(self.leader), 1)
|
||||
|
||||
mock_cancellable_subprocess_call.side_effect = Exception('foo')
|
||||
self.assertEquals(self.p.create_replica(self.leader), 1)
|
||||
self.assertEqual(self.p.create_replica(self.leader), 1)
|
||||
|
||||
mock_cancellable_subprocess_call.side_effect = [1, 0]
|
||||
self.assertEquals(self.p.create_replica(self.leader), 0)
|
||||
self.assertEqual(self.p.create_replica(self.leader), 0)
|
||||
|
||||
mock_cancellable_subprocess_call.side_effect = [Exception(), 0]
|
||||
self.assertEquals(self.p.create_replica(self.leader), 0)
|
||||
self.assertEqual(self.p.create_replica(self.leader), 0)
|
||||
|
||||
self.p.cancel()
|
||||
self.assertEquals(self.p.create_replica(self.leader), 1)
|
||||
self.assertEqual(self.p.create_replica(self.leader), 1)
|
||||
|
||||
@patch('time.sleep', Mock())
|
||||
@patch.object(Postgresql, 'cancellable_subprocess_call')
|
||||
@@ -477,18 +478,18 @@ class TestPostgresql(unittest.TestCase):
|
||||
self.p.config['create_replica_method'] = ['wale', 'basebackup']
|
||||
self.p.config['wale'] = {'command': 'foo'}
|
||||
mock_cancellable_subprocess_call.return_value = 0
|
||||
self.assertEquals(self.p.create_replica(self.leader), 0)
|
||||
self.assertEqual(self.p.create_replica(self.leader), 0)
|
||||
del self.p.config['wale']
|
||||
self.assertEquals(self.p.create_replica(self.leader), 0)
|
||||
self.assertEqual(self.p.create_replica(self.leader), 0)
|
||||
|
||||
self.p.config['create_replica_method'] = ['basebackup']
|
||||
self.p.config['basebackup'] = [{'max_rate': '100M'}, 'no-sync']
|
||||
self.assertEquals(self.p.create_replica(self.leader), 0)
|
||||
self.assertEqual(self.p.create_replica(self.leader), 0)
|
||||
|
||||
self.p.config['create_replica_method'] = ['wale', 'basebackup']
|
||||
del self.p.config['basebackup']
|
||||
mock_cancellable_subprocess_call.return_value = 1
|
||||
self.assertEquals(self.p.create_replica(self.leader), 1)
|
||||
self.assertEqual(self.p.create_replica(self.leader), 1)
|
||||
|
||||
def test_basebackup(self):
|
||||
self.p.cancel()
|
||||
@@ -549,25 +550,25 @@ class TestPostgresql(unittest.TestCase):
|
||||
self.assertTrue(self.p.promote(0))
|
||||
|
||||
def test_timeline_wal_position(self):
|
||||
self.assertEquals(self.p.timeline_wal_position(), (1, 2))
|
||||
self.assertEqual(self.p.timeline_wal_position(), (1, 2))
|
||||
Thread(target=self.p.timeline_wal_position).start()
|
||||
|
||||
@patch.object(PostmasterProcess, 'from_pidfile')
|
||||
def test_is_running(self, mock_frompidfile):
|
||||
# Cached postmaster running
|
||||
mock_postmaster = self.p._postmaster_proc = MockPostmaster()
|
||||
self.assertEquals(self.p.is_running(), mock_postmaster)
|
||||
self.assertEqual(self.p.is_running(), mock_postmaster)
|
||||
|
||||
# Cached postmaster not running, no postmaster running
|
||||
mock_postmaster.is_running.return_value = False
|
||||
mock_frompidfile.return_value = None
|
||||
self.assertEquals(self.p.is_running(), None)
|
||||
self.assertEquals(self.p._postmaster_proc, None)
|
||||
self.assertEqual(self.p.is_running(), None)
|
||||
self.assertEqual(self.p._postmaster_proc, None)
|
||||
|
||||
# No cached postmaster, postmaster running
|
||||
mock_frompidfile.return_value = mock_postmaster2 = MockPostmaster()
|
||||
self.assertEquals(self.p.is_running(), mock_postmaster2)
|
||||
self.assertEquals(self.p._postmaster_proc, mock_postmaster2)
|
||||
self.assertEqual(self.p.is_running(), mock_postmaster2)
|
||||
self.assertEqual(self.p._postmaster_proc, mock_postmaster2)
|
||||
|
||||
@patch('shlex.split', Mock(side_effect=OSError))
|
||||
def test_call_nowait(self):
|
||||
@@ -686,13 +687,13 @@ class TestPostgresql(unittest.TestCase):
|
||||
mock_cancellable_subprocess_call.assert_called()
|
||||
args, kwargs = mock_cancellable_subprocess_call.call_args
|
||||
self.assertTrue('PGPASSFILE' in kwargs['env'])
|
||||
self.assertEquals(args[0], ['/bin/false', 'postgres://127.0.0.2:5432/postgres'])
|
||||
self.assertEqual(args[0], ['/bin/false', 'postgres://127.0.0.2:5432/postgres'])
|
||||
|
||||
mock_cancellable_subprocess_call.reset_mock()
|
||||
self.p._local_address.pop('host')
|
||||
self.assertTrue(self.p.run_bootstrap_post_init({'post_init': '/bin/false'}))
|
||||
mock_cancellable_subprocess_call.assert_called()
|
||||
self.assertEquals(mock_cancellable_subprocess_call.call_args[0][0], ['/bin/false', 'postgres://:5432/postgres'])
|
||||
self.assertEqual(mock_cancellable_subprocess_call.call_args[0][0], ['/bin/false', 'postgres://:5432/postgres'])
|
||||
|
||||
mock_cancellable_subprocess_call.side_effect = OSError
|
||||
self.assertFalse(self.p.run_bootstrap_post_init({'post_init': '/bin/false'}))
|
||||
@@ -704,7 +705,7 @@ class TestPostgresql(unittest.TestCase):
|
||||
@patch('os.listdir', Mock(return_value=['recovery.conf']))
|
||||
@patch('os.path.exists', Mock(return_value=True))
|
||||
def test_get_postgres_role_from_data_directory(self):
|
||||
self.assertEquals(self.p.get_postgres_role_from_data_directory(), 'replica')
|
||||
self.assertEqual(self.p.get_postgres_role_from_data_directory(), 'replica')
|
||||
|
||||
def test_remove_data_directory(self):
|
||||
self.p.remove_data_directory()
|
||||
@@ -719,13 +720,13 @@ class TestPostgresql(unittest.TestCase):
|
||||
def test_controldata(self):
|
||||
with patch('subprocess.check_output', Mock(return_value=0, side_effect=pg_controldata_string)):
|
||||
data = self.p.controldata()
|
||||
self.assertEquals(len(data), 50)
|
||||
self.assertEquals(data['Database cluster state'], 'shut down in recovery')
|
||||
self.assertEquals(data['wal_log_hints setting'], 'on')
|
||||
self.assertEquals(int(data['Database block size']), 8192)
|
||||
self.assertEqual(len(data), 50)
|
||||
self.assertEqual(data['Database cluster state'], 'shut down in recovery')
|
||||
self.assertEqual(data['wal_log_hints setting'], 'on')
|
||||
self.assertEqual(int(data['Database block size']), 8192)
|
||||
|
||||
with patch('subprocess.check_output', Mock(side_effect=subprocess.CalledProcessError(1, ''))):
|
||||
self.assertEquals(self.p.controldata(), {})
|
||||
self.assertEqual(self.p.controldata(), {})
|
||||
|
||||
@patch('patroni.postgresql.Postgresql._version_file_exists', Mock(return_value=True))
|
||||
@patch('subprocess.check_output', MagicMock(return_value=0, side_effect=pg_controldata_string))
|
||||
@@ -779,9 +780,9 @@ class TestPostgresql(unittest.TestCase):
|
||||
@patch.object(Postgresql, '_version_file_exists', Mock(return_value=True))
|
||||
def test_get_major_version(self):
|
||||
with patch.object(builtins, 'open', mock_open(read_data='9.4')):
|
||||
self.assertEquals(self.p.get_major_version(), 90400)
|
||||
self.assertEqual(self.p.get_major_version(), 90400)
|
||||
with patch.object(builtins, 'open', Mock(side_effect=Exception)):
|
||||
self.assertEquals(self.p.get_major_version(), 0)
|
||||
self.assertEqual(self.p.get_major_version(), 0)
|
||||
|
||||
def test_postmaster_start_time(self):
|
||||
with patch.object(MockCursor, "fetchone", Mock(return_value=('foo', True, '', '', '', '', False))):
|
||||
@@ -793,31 +794,31 @@ class TestPostgresql(unittest.TestCase):
|
||||
with patch('subprocess.call', return_value=0):
|
||||
self.p._state = 'starting'
|
||||
self.assertFalse(self.p.check_for_startup())
|
||||
self.assertEquals(self.p.state, 'running')
|
||||
self.assertEqual(self.p.state, 'running')
|
||||
|
||||
with patch('subprocess.call', return_value=1):
|
||||
self.p._state = 'starting'
|
||||
self.assertTrue(self.p.check_for_startup())
|
||||
self.assertEquals(self.p.state, 'starting')
|
||||
self.assertEqual(self.p.state, 'starting')
|
||||
|
||||
with patch('subprocess.call', return_value=2):
|
||||
self.p._state = 'starting'
|
||||
self.assertFalse(self.p.check_for_startup())
|
||||
self.assertEquals(self.p.state, 'start failed')
|
||||
self.assertEqual(self.p.state, 'start failed')
|
||||
|
||||
with patch('subprocess.call', return_value=0):
|
||||
self.p._state = 'running'
|
||||
self.assertFalse(self.p.check_for_startup())
|
||||
self.assertEquals(self.p.state, 'running')
|
||||
self.assertEqual(self.p.state, 'running')
|
||||
|
||||
with patch('subprocess.call', return_value=127):
|
||||
self.p._state = 'running'
|
||||
self.assertFalse(self.p.check_for_startup())
|
||||
self.assertEquals(self.p.state, 'running')
|
||||
self.assertEqual(self.p.state, 'running')
|
||||
|
||||
self.p._state = 'starting'
|
||||
self.assertFalse(self.p.check_for_startup())
|
||||
self.assertEquals(self.p.state, 'running')
|
||||
self.assertEqual(self.p.state, 'running')
|
||||
|
||||
def test_wait_for_startup(self):
|
||||
state = {'sleeps': 0, 'num_rejects': 0, 'final_return': 0}
|
||||
@@ -842,12 +843,12 @@ class TestPostgresql(unittest.TestCase):
|
||||
|
||||
self.p._state = 'stopped'
|
||||
self.assertTrue(self.p.wait_for_startup())
|
||||
self.assertEquals(state['sleeps'], 0)
|
||||
self.assertEqual(state['sleeps'], 0)
|
||||
|
||||
self.p._state = 'starting'
|
||||
state['num_rejects'] = 5
|
||||
self.assertTrue(self.p.wait_for_startup())
|
||||
self.assertEquals(state['sleeps'], 5)
|
||||
self.assertEqual(state['sleeps'], 5)
|
||||
|
||||
self.p._state = 'starting'
|
||||
state['sleeps'] = 0
|
||||
@@ -858,7 +859,7 @@ class TestPostgresql(unittest.TestCase):
|
||||
state['sleeps'] = 0
|
||||
state['final_return'] = 0
|
||||
self.assertFalse(self.p.wait_for_startup(timeout=2))
|
||||
self.assertEquals(state['sleeps'], 3)
|
||||
self.assertEqual(state['sleeps'], 3)
|
||||
|
||||
with patch.object(Postgresql, 'check_startup_state_changed', Mock(return_value=False)):
|
||||
self.p.cancel()
|
||||
@@ -874,30 +875,30 @@ class TestPostgresql(unittest.TestCase):
|
||||
(self.me.name, 'streaming', 'async'),
|
||||
(self.other.name, 'streaming', 'async'),
|
||||
]):
|
||||
self.assertEquals(self.p.pick_synchronous_standby(cluster), (self.leadermem.name, True))
|
||||
self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.leadermem.name, True))
|
||||
|
||||
with patch.object(Postgresql, "query", return_value=[
|
||||
(self.me.name, 'streaming', 'async'),
|
||||
(self.leadermem.name, 'streaming', 'potential'),
|
||||
(self.other.name, 'streaming', 'async'),
|
||||
]):
|
||||
self.assertEquals(self.p.pick_synchronous_standby(cluster), (self.leadermem.name, False))
|
||||
self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.leadermem.name, False))
|
||||
|
||||
with patch.object(Postgresql, "query", return_value=[
|
||||
(self.me.name, 'streaming', 'async'),
|
||||
(self.other.name, 'streaming', 'async'),
|
||||
]):
|
||||
self.assertEquals(self.p.pick_synchronous_standby(cluster), (self.me.name, False))
|
||||
self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.me.name, False))
|
||||
|
||||
with patch.object(Postgresql, "query", return_value=[
|
||||
('missing', 'streaming', 'sync'),
|
||||
(self.me.name, 'streaming', 'async'),
|
||||
(self.other.name, 'streaming', 'async'),
|
||||
]):
|
||||
self.assertEquals(self.p.pick_synchronous_standby(cluster), (self.me.name, False))
|
||||
self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.me.name, False))
|
||||
|
||||
with patch.object(Postgresql, "query", return_value=[]):
|
||||
self.assertEquals(self.p.pick_synchronous_standby(cluster), (None, False))
|
||||
self.assertEqual(self.p.pick_synchronous_standby(cluster), (None, False))
|
||||
|
||||
def test_set_sync_standby(self):
|
||||
def value_in_conf():
|
||||
@@ -908,22 +909,22 @@ class TestPostgresql(unittest.TestCase):
|
||||
|
||||
mock_reload = self.p.reload = Mock()
|
||||
self.p.set_synchronous_standby('n1')
|
||||
self.assertEquals(value_in_conf(), "synchronous_standby_names = 'n1'")
|
||||
self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n1'")
|
||||
mock_reload.assert_called()
|
||||
|
||||
mock_reload.reset_mock()
|
||||
self.p.set_synchronous_standby('n1')
|
||||
mock_reload.assert_not_called()
|
||||
self.assertEquals(value_in_conf(), "synchronous_standby_names = 'n1'")
|
||||
self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n1'")
|
||||
|
||||
self.p.set_synchronous_standby('n2')
|
||||
mock_reload.assert_called()
|
||||
self.assertEquals(value_in_conf(), "synchronous_standby_names = 'n2'")
|
||||
self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n2'")
|
||||
|
||||
mock_reload.reset_mock()
|
||||
self.p.set_synchronous_standby(None)
|
||||
mock_reload.assert_called()
|
||||
self.assertEquals(value_in_conf(), None)
|
||||
self.assertEqual(value_in_conf(), None)
|
||||
|
||||
def test_get_server_parameters(self):
|
||||
config = {'synchronous_mode': True, 'parameters': {'wal_level': 'hot_standby'}, 'listen': '0'}
|
||||
@@ -957,8 +958,8 @@ class TestPostgresql(unittest.TestCase):
|
||||
"--wal_log_hints=on" "--max_wal_senders=5" "--max_replication_slots=5"\n')
|
||||
with patch.object(builtins, 'open', m):
|
||||
data = self.p.read_postmaster_opts()
|
||||
self.assertEquals(data['wal_level'], 'hot_standby')
|
||||
self.assertEquals(int(data['max_replication_slots']), 5)
|
||||
self.assertEqual(data['wal_level'], 'hot_standby')
|
||||
self.assertEqual(int(data['max_replication_slots']), 5)
|
||||
self.assertEqual(data.get('D'), None)
|
||||
|
||||
m.side_effect = IOError
|
||||
@@ -968,7 +969,7 @@ class TestPostgresql(unittest.TestCase):
|
||||
@patch('subprocess.Popen')
|
||||
def test_single_user_mode(self, subprocess_popen_mock):
|
||||
subprocess_popen_mock.return_value.wait.return_value = 0
|
||||
self.assertEquals(self.p.single_user_mode('CHECKPOINT', {'archive_mode': 'on'}), 0)
|
||||
self.assertEqual(self.p.single_user_mode('CHECKPOINT', {'archive_mode': 'on'}), 0)
|
||||
|
||||
@patch('os.listdir', Mock(side_effect=[OSError, ['a', 'b']]))
|
||||
@patch('os.unlink', Mock(side_effect=OSError))
|
||||
@@ -986,10 +987,10 @@ class TestPostgresql(unittest.TestCase):
|
||||
self.assertTrue(self.p.fix_cluster_state())
|
||||
|
||||
def test_replica_cached_timeline(self):
|
||||
self.assertEquals(self.p.replica_cached_timeline(1), 2)
|
||||
self.assertEqual(self.p.replica_cached_timeline(1), 2)
|
||||
|
||||
def test_get_master_timeline(self):
|
||||
self.assertEquals(self.p.get_master_timeline(), 1)
|
||||
self.assertEqual(self.p.get_master_timeline(), 1)
|
||||
|
||||
def test_cancellable_subprocess_call(self):
|
||||
self.p.cancel()
|
||||
|
||||
@@ -46,7 +46,7 @@ class TestPostmasterProcess(unittest.TestCase):
|
||||
@patch('psutil.Process.__init__')
|
||||
def test_from_pid(self, mock_init):
|
||||
mock_init.side_effect = psutil.NoSuchProcess(123)
|
||||
self.assertEquals(PostmasterProcess.from_pid(123), None)
|
||||
self.assertEqual(PostmasterProcess.from_pid(123), None)
|
||||
mock_init.side_effect = None
|
||||
self.assertNotEquals(PostmasterProcess.from_pid(123), None)
|
||||
|
||||
@@ -55,13 +55,13 @@ class TestPostmasterProcess(unittest.TestCase):
|
||||
@patch('psutil.Process.pid', Mock(return_value=123))
|
||||
def test_signal_stop(self, mock_send_signal):
|
||||
proc = PostmasterProcess(-123)
|
||||
self.assertEquals(proc.signal_stop('immediate'), False)
|
||||
self.assertEqual(proc.signal_stop('immediate'), False)
|
||||
|
||||
mock_send_signal.side_effect = [None, psutil.NoSuchProcess(123), psutil.AccessDenied()]
|
||||
proc = PostmasterProcess(123)
|
||||
self.assertEquals(proc.signal_stop('immediate'), None)
|
||||
self.assertEquals(proc.signal_stop('immediate'), True)
|
||||
self.assertEquals(proc.signal_stop('immediate'), False)
|
||||
self.assertEqual(proc.signal_stop('immediate'), None)
|
||||
self.assertEqual(proc.signal_stop('immediate'), True)
|
||||
self.assertEqual(proc.signal_stop('immediate'), False)
|
||||
|
||||
@patch('psutil.Process.__init__', Mock())
|
||||
@patch('psutil.wait_procs')
|
||||
@@ -89,11 +89,11 @@ class TestPostmasterProcess(unittest.TestCase):
|
||||
mock_frompidfile.return_value._is_postmaster_process.return_value = False
|
||||
mock_frompid.return_value = "proc 123"
|
||||
mock_popen.return_value.stdout.readline.return_value = '123'
|
||||
self.assertEquals(PostmasterProcess.start('true', '/tmp', '/tmp/test.conf', []), "proc 123")
|
||||
self.assertEqual(PostmasterProcess.start('true', '/tmp', '/tmp/test.conf', []), "proc 123")
|
||||
mock_frompid.assert_called_with(123)
|
||||
|
||||
mock_frompidfile.side_effect = psutil.NoSuchProcess(123)
|
||||
self.assertEquals(PostmasterProcess.start('true', '/tmp', '/tmp/test.conf', []), "proc 123")
|
||||
self.assertEqual(PostmasterProcess.start('true', '/tmp', '/tmp/test.conf', []), "proc 123")
|
||||
|
||||
@patch('psutil.Process.__init__', Mock(side_effect=psutil.NoSuchProcess(123)))
|
||||
def test_read_postmaster_pidfile(self):
|
||||
|
||||
@@ -8,7 +8,7 @@ from patroni.utils import Retry, RetryFailedError, polling_loop
|
||||
class TestUtils(unittest.TestCase):
|
||||
|
||||
def test_polling_loop(self):
|
||||
self.assertEquals(list(polling_loop(0.001, interval=0.001)), [0])
|
||||
self.assertEqual(list(polling_loop(0.001, interval=0.001)), [0])
|
||||
|
||||
|
||||
@patch('time.sleep', Mock())
|
||||
@@ -29,21 +29,21 @@ class TestRetrySleeper(unittest.TestCase):
|
||||
def test_reset(self):
|
||||
retry = Retry(delay=0, max_tries=2)
|
||||
retry(self._fail())
|
||||
self.assertEquals(retry._attempts, 1)
|
||||
self.assertEqual(retry._attempts, 1)
|
||||
retry.reset()
|
||||
self.assertEquals(retry._attempts, 0)
|
||||
self.assertEqual(retry._attempts, 0)
|
||||
|
||||
def test_too_many_tries(self):
|
||||
retry = Retry(delay=0)
|
||||
self.assertRaises(RetryFailedError, retry, self._fail(times=999))
|
||||
self.assertEquals(retry._attempts, 1)
|
||||
self.assertEqual(retry._attempts, 1)
|
||||
|
||||
def test_maximum_delay(self):
|
||||
retry = Retry(delay=10, max_tries=100)
|
||||
retry(self._fail(times=10))
|
||||
self.assertTrue(retry._cur_delay < 4000, retry._cur_delay)
|
||||
# gevent's sleep function is picky about the type
|
||||
self.assertEquals(type(retry._cur_delay), float)
|
||||
self.assertEqual(type(retry._cur_delay), float)
|
||||
|
||||
def test_deadline(self):
|
||||
retry = Retry(deadline=0.0001)
|
||||
|
||||
@@ -73,14 +73,14 @@ class TestWatchdog(unittest.TestCase):
|
||||
@patch.object(LinuxWatchdogDevice, 'can_be_disabled', PropertyMock(return_value=True))
|
||||
def test_unsafe_timeout_disable_watchdog_and_exit(self):
|
||||
watchdog = Watchdog({'ttl': 30, 'loop_wait': 15, 'watchdog': {'mode': 'required', 'safety_margin': -1}})
|
||||
self.assertEquals(watchdog.activate(), False)
|
||||
self.assertEquals(watchdog.is_running, False)
|
||||
self.assertEqual(watchdog.activate(), False)
|
||||
self.assertEqual(watchdog.is_running, False)
|
||||
|
||||
@patch('platform.system', Mock(return_value='Linux'))
|
||||
@patch.object(LinuxWatchdogDevice, 'get_timeout', Mock(return_value=16))
|
||||
def test_timeout_does_not_ensure_safe_termination(self):
|
||||
Watchdog({'ttl': 30, 'loop_wait': 15, 'watchdog': {'mode': 'auto', 'safety_margin': -1}}).activate()
|
||||
self.assertEquals(len(mock_devices), 2)
|
||||
self.assertEqual(len(mock_devices), 2)
|
||||
|
||||
@patch('platform.system', Mock(return_value='Linux'))
|
||||
@patch.object(Watchdog, 'is_running', PropertyMock(return_value=False))
|
||||
@@ -99,29 +99,29 @@ class TestWatchdog(unittest.TestCase):
|
||||
watchdog = Watchdog({'ttl': 30, 'loop_wait': 10, 'watchdog': {'mode': 'required'}})
|
||||
watchdog.activate()
|
||||
|
||||
self.assertEquals(len(mock_devices), 2)
|
||||
self.assertEqual(len(mock_devices), 2)
|
||||
device = mock_devices[-1]
|
||||
self.assertTrue(device.open)
|
||||
|
||||
self.assertEquals(device.timeout, 24)
|
||||
self.assertEqual(device.timeout, 24)
|
||||
|
||||
watchdog.keepalive()
|
||||
self.assertEquals(len(device.writes), 1)
|
||||
self.assertEqual(len(device.writes), 1)
|
||||
|
||||
watchdog.disable()
|
||||
self.assertFalse(device.open)
|
||||
self.assertEquals(device.writes[-1], b'V')
|
||||
self.assertEqual(device.writes[-1], b'V')
|
||||
|
||||
def test_invalid_timings(self):
|
||||
watchdog = Watchdog({'ttl': 30, 'loop_wait': 20, 'watchdog': {'mode': 'automatic', 'safety_margin': -1}})
|
||||
watchdog.activate()
|
||||
self.assertEquals(len(mock_devices), 1)
|
||||
self.assertEqual(len(mock_devices), 1)
|
||||
self.assertFalse(watchdog.is_running)
|
||||
|
||||
def test_parse_mode(self):
|
||||
with patch('patroni.watchdog.base.logger.warning', new_callable=Mock()) as warning_mock:
|
||||
watchdog = Watchdog({'ttl': 30, 'loop_wait': 10, 'watchdog': {'mode': 'bad'}})
|
||||
self.assertEquals(watchdog.config.mode, 'off')
|
||||
self.assertEqual(watchdog.config.mode, 'off')
|
||||
warning_mock.assert_called_once()
|
||||
|
||||
@patch('platform.system', Mock(return_value='Unknown'))
|
||||
@@ -170,7 +170,7 @@ class TestNullWatchdog(unittest.TestCase):
|
||||
watchdog = NullWatchdog()
|
||||
self.assertTrue(watchdog.can_be_disabled)
|
||||
self.assertRaises(WatchdogError, watchdog.set_timeout, 1)
|
||||
self.assertEquals(watchdog.describe(), 'NullWatchdog')
|
||||
self.assertEqual(watchdog.describe(), 'NullWatchdog')
|
||||
self.assertIsInstance(NullWatchdog.from_config({}), NullWatchdog)
|
||||
|
||||
|
||||
@@ -210,7 +210,7 @@ class TestLinuxWatchdogDevice(unittest.TestCase):
|
||||
self.assertRaises(WatchdogError, self.impl.get_timeout)
|
||||
self.assertRaises(WatchdogError, self.impl.set_timeout, 10)
|
||||
# We still try to output a reasonable string even if getting info errors
|
||||
self.assertEquals(self.impl.describe(), "Linux watchdog device")
|
||||
self.assertEqual(self.impl.describe(), "Linux watchdog device")
|
||||
|
||||
@patch('os.open', Mock(side_effect=OSError))
|
||||
def test_open(self):
|
||||
|
||||
Reference in New Issue
Block a user