diff --git a/apps/cli/executables/alma_product_fetch/__init__.py b/apps/cli/executables/alma_product_fetch/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/executables/alma_reingester/__init__.py b/apps/cli/executables/alma_reingester/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/executables/datafetcher/__init__.py b/apps/cli/executables/datafetcher/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/executables/datafetcher/test/test_datafetcher.py b/apps/cli/executables/datafetcher/test/test_datafetcher.py
index 1fb88ca37a82bb91e76f9f5f1d8accf6a89afe7b..73cb4e551b8072fc848aa0137b8efa018d3e3d07 100644
--- a/apps/cli/executables/datafetcher/test/test_datafetcher.py
+++ b/apps/cli/executables/datafetcher/test/test_datafetcher.py
@@ -3,7 +3,6 @@
 import os
 import subprocess
 import tempfile
-import unittest
 from pathlib import Path
 from typing import List
 from unittest.mock import MagicMock
@@ -34,7 +33,7 @@ TO EXECUTE THESE TESTS: from apps/cli/executables/datafetcher,
 
 '''
 
-class DataFetcherTestCase(unittest.TestCase):
+class TestDataFetcher:
     """ IMPORTANT NOTE: we CANNOT retrieve by copy if we don't have access to a
         location to which NGAS can write, e.g, lustre. Therefore, any test
         that involves -actual- retrieval of files must be by streaming, to
@@ -62,22 +61,21 @@ class DataFetcherTestCase(unittest.TestCase):
 
     """
 
-    @classmethod
-    def setUpClass(cls) -> None:
-        cls.profile = TEST_PROFILE
-        cls.settings = get_test_capo_settings()
-        cls.db_settings = get_metadata_db_settings(cls.profile)
-        cls.test_data = cls._initialize_test_data(cls)
-        cls.DATA_DIR = get_test_data_dir()
-        if cls.DATA_DIR is None:
+    @pytest.fixture(autouse=True, scope='function')
+    def setup_settings_datadir(self) -> None:
+        self.settings = get_test_capo_settings()
+        self.db_settings = get_metadata_db_settings(TEST_PROFILE)
+        self.test_data = self._initialize_test_data()
+        self.DATA_DIR = get_test_data_dir()
+        if self.DATA_DIR is None:
             pytest.fail(f'test data directory not found under {os.getcwd()}')
 
-    @classmethod
-    def setUp(cls) -> None:
+    @pytest.fixture(autouse=True, scope='function')
+    def make_tempdir(self) -> None:
         umask = os.umask(0o000)
-        cls.top_level = tempfile.mkdtemp()
+        self.top_level = tempfile.mkdtemp()
         os.umask(umask)
-        cls._LOG = FlexLogger(cls.__name__, cls.top_level)
+        self._LOG = FlexLogger(__name__, self.top_level)
 
     def test_bad_command_line(self):
 
@@ -96,10 +94,8 @@ class DataFetcherTestCase(unittest.TestCase):
         terminal_exception_thrown = False
 
         bad_locator_logfile = find_newest_fetch_log_file(self.top_level)
-        self.assertIsNotNone(bad_locator_logfile,
-                             f'we should have gotten a log file in {self.top_level}')
-        self.assertNotEqual(0, os.path.getsize(bad_locator_logfile),
-                            f'expecting a non-empty log file in {self.top_level}')
+        assert bad_locator_logfile is not None
+        assert 0 != os.path.getsize(bad_locator_logfile)
         with open(bad_locator_logfile) as log:
             log_contents = log.readlines()
 
@@ -110,8 +106,8 @@ class DataFetcherTestCase(unittest.TestCase):
                 terminal_exception_thrown = True
             if exception_found and terminal_exception_thrown:
                 break
-        self.assertTrue(exception_found, 'expecting NoLocatorException')
-        self.assertTrue(terminal_exception_thrown, 'terminal_exception should be thrown')
+        assert exception_found
+        assert terminal_exception_thrown
         bad_locator_logfile.unlink()
 
         # nonexistent locations file
@@ -133,7 +129,7 @@ class DataFetcherTestCase(unittest.TestCase):
                 terminal_exception_thrown = True
             if exception_found and terminal_exception_thrown:
                 break
-        self.assertTrue(exception_found, 'expecting FileNotFoundError')
+        assert exception_found
 
     def test_nothing_retrieved_if_dry_on_cmd_line(self):
         toplevel = Path(self.top_level)
@@ -146,16 +142,15 @@ class DataFetcherTestCase(unittest.TestCase):
         fetcher = CommandLineFetchLauncher(args, self._LOG)
         output = fetcher.run()
         logfile = find_newest_fetch_log_file(self.top_level)
-        self.assertEqual([], output, 'expecting no files for dry run')
-        self.assertNotEqual(0, os.path.getsize(logfile),
-                            'log file should be non-empty because verbose')
+        assert [] == output
+        assert 0 != os.path.getsize(logfile)
         Path.unlink(location_file)
 
         # make sure none of these files written
         file_count = 0
         for _ in os.walk(location_file):
             file_count += 1
-        self.assertEqual(0, file_count, 'no files should have been retrieved')
+        assert 0 == file_count
 
     def test_force_overwrite_from_cmd_line(self):
         toplevel = Path(self.top_level)
@@ -179,11 +174,9 @@ class DataFetcherTestCase(unittest.TestCase):
             for fname in fnames:
                 path = dest_dir / fname
                 sizes[path] = os.path.getsize(path)
-        self.assertEqual(37, len(sizes), 'expecting 37 files to be fetched')
+        assert 37 == len(sizes)
         fake_size = os.path.getsize(fake_file)
-        self.assertEqual(9339, fake_size, f'expecting '
-                                          f'{fake_file} to '
-                                          f'be 9339 bytes')
+        assert 9339 == fake_size
 
     def test_no_overwrite_from_cmd_line(self):
         toplevel = Path(self.top_level)
@@ -214,8 +207,7 @@ class DataFetcherTestCase(unittest.TestCase):
             if term_except_found and file_exists_found:
                 break
 
-        self.assertTrue(term_except_found and file_exists_found,
-                        'expecting terminal_exception for FileExistsError')
+        assert term_except_found and file_exists_found
 
     def test_cmd_line_more_output_when_verbose(self):
         report_file = get_mini_locations_file(
@@ -227,12 +219,10 @@ class DataFetcherTestCase(unittest.TestCase):
         fetcher = CommandLineFetchLauncher(args, self._LOG)
         retrieved = fetcher.run()
         num_files_expected = 37
-        self.assertEqual(num_files_expected, len(retrieved),
-                         f'expecting {num_files_expected} files')
+        assert num_files_expected == len(retrieved)
 
         verbose_logfile = find_newest_fetch_log_file(self.top_level)
-        self.assertNotEqual(0,  os.path.getsize(verbose_logfile),
-                            'log should contain debug messages')
+        assert 0 !=  os.path.getsize(verbose_logfile)
 
         [file.unlink() for file in retrieved]
         verbose_logfile.unlink()
@@ -243,11 +233,9 @@ class DataFetcherTestCase(unittest.TestCase):
                 '--profile', TEST_PROFILE, '--output-dir', self.top_level]
         fetcher = CommandLineFetchLauncher(args, self._LOG)
         retrieved = fetcher.run()
-        self.assertEqual(num_files_expected, len(retrieved),
-                         f'expecting {num_files_expected} files')
+        assert num_files_expected == len(retrieved)
         logfile = find_newest_fetch_log_file(self.top_level)
-        self.assertEqual(0,  os.path.getsize(logfile),
-                         f'{logfile} should be empty')
+        assert 0 ==  os.path.getsize(logfile)
 
     def test_can_stream_from_mini_locations_file(self):
         """ gin up a location report with just a few small files in it
@@ -258,44 +246,40 @@ class DataFetcherTestCase(unittest.TestCase):
         report_file = get_mini_locations_file(location_file)
         args = ['--location-file', str(report_file),
                 '--output-dir', self.top_level,
-                '--profile', self.profile]
+                '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         retrieved = fetch.run()
         file_count = len(retrieved)
-        self.assertEqual(37, file_count)
+        assert 37 == file_count
 
     def test_verbose_writes_stuff_to_log(self):
         path = Path(self.top_level, _LOCATION_FILENAME)
         report_file = get_mini_locations_file(path)
         args = ['--location-file', str(report_file),
                 '--output-dir', self.top_level,
-                '--profile', self.profile, '--verbose']
+                '--profile', TEST_PROFILE, '--verbose']
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         fetch.run()
 
         logfile = fetch.logfile
-        self.assertTrue(logfile.is_file(),
-                        f'expecting log file {logfile}')
-        self.assertNotEqual(0, os.path.getsize(logfile),
-                            'there should be entries in the log file')
+        assert logfile.is_file()
+        assert 0 != os.path.getsize(logfile)
 
     def test_empty_log_if_not_verbose(self):
         path = Path(self.top_level, _LOCATION_FILENAME)
         report_file = get_mini_locations_file(path)
         args = ['--location-file', str(report_file),
                 '--output-dir', self.top_level,
-                '--profile', self.profile]
+                '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         fetch.run()
 
         logfile = fetch.logfile
-        self.assertTrue(logfile.is_file(),
-                        f'expecting log file {logfile}')
-        self.assertEqual(0, os.path.getsize(logfile),
-                         'log file should be empty')
+        assert logfile.is_file()
+        assert 0 == os.path.getsize(logfile)
 
     def test_copy_attempt_throws_sys_exit_service_error(self):
         product_locator = self.test_data['13B-014']['product_locator']
@@ -314,8 +298,7 @@ class DataFetcherTestCase(unittest.TestCase):
         servers_report = fetch.servers_report
         for server in servers_report:
             entry = servers_report[server]
-            self.assertTrue(
-                entry['retrieve_method'].value == RetrievalMode.COPY.value)
+            assert entry['retrieve_method'].value == RetrievalMode.COPY.value
 
         # let's try just one file so we're not sitting here all day
         for server in servers_report:
@@ -323,14 +306,14 @@ class DataFetcherTestCase(unittest.TestCase):
             servers_report = {server: entry}
             break
         fetch.servers_report = servers_report
-        self.assertIsNotNone(fetch.servers_report[server])
+        assert fetch.servers_report[server] is not None
         files = fetch.servers_report[server]['files']
         fetch.servers_report[server]['files'] = [files[0]]
 
         try:
             with pytest.raises(SystemExit) as s_ex:
                 fetch.run()
-            self.assertEqual(Errors.NGAS_SERVICE_ERROR.value, s_ex.value.code)
+            assert Errors.NGAS_SERVICE_ERROR.value == s_ex.value.code
         finally:
             self.settings['execution_site'] = local_exec_site
 
@@ -338,31 +321,30 @@ class DataFetcherTestCase(unittest.TestCase):
         report_file = get_locations_file('VLA_BAD_SERVER')
         args = ['--location-file', str(report_file),
                 '--output-dir', self.top_level,
-                '--profile', self.profile]
+                '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         with pytest.raises(SystemExit) as s_ex:
             fetch.run()
         exc_code = s_ex.value.code
         expected = Errors.NGAS_SERVICE_ERROR.value
-        self.assertEqual(expected, exc_code)
+        assert expected == exc_code
 
     def test_throws_sys_exit_file_exists_if_overwrite_not_forced(self):
         toplevel = Path(self.top_level)
         location_file = get_mini_locations_file(
             Path(self.top_level, _LOCATION_FILENAME))
-        self.assertTrue(Path.exists(location_file),
-                        f'{location_file}')
+        assert Path.exists(location_file)
         destination = Path(toplevel, _EB_EXTERNAL_NAME)
         Path(destination).mkdir(parents=True, exist_ok=True)
-        self.assertTrue(destination.is_dir(), f'{destination}')
+        assert destination.is_dir()
 
         # stick a fake SDM in there so it will fall over
         fake_file = Path(destination, _ASDM_XML)
         with open(fake_file, 'w') as to_write:
             to_write.write('lalalalalala')
-        self.assertTrue(fake_file.exists(), f'expecting fake file: {fake_file}')
-        self.assertFalse(os.path.getsize(fake_file) == 0)
+        assert fake_file.exists()
+        assert os.path.getsize(fake_file) != 0
 
         args = ['--location-file', str(location_file),
                 '--output-dir', self.top_level,
@@ -375,14 +357,14 @@ class DataFetcherTestCase(unittest.TestCase):
             DataFetcher(namespace, self.settings).run()
         exc_code = exc.value.code
         expected = Errors.FILE_EXISTS_ERROR.value
-        self.assertEqual(expected, exc_code)
+        assert expected == exc_code
 
     def test_overwrites_when_forced(self):
         external_name = LOCATION_REPORTS[_VLA_SMALL_KEY]['external_name']
         toplevel = Path(self.top_level)
         destination = toplevel / external_name
         destination.mkdir(parents=True, exist_ok=True)
-        self.assertTrue(destination.is_dir(), f'{destination}')
+        assert destination.is_dir()
 
         # stick a fake SDM in there to see if overwrite really happens
         to_overwrite = _ASDM_XML
@@ -390,11 +372,8 @@ class DataFetcherTestCase(unittest.TestCase):
         text = '"Bother!" said Pooh. "Lock phasers on that heffalump!"'
         with open(fake_file, 'w') as to_write:
             to_write.write(text)
-        self.assertTrue(fake_file.exists(),
-                        f'{to_overwrite} should have been created')
-        self.assertEqual(len(text), os.path.getsize(fake_file),
-                         f'before overwrite, {to_overwrite} should be'
-                         f' {len(text)} bytes')
+        assert fake_file.exists()
+        assert len(text) == os.path.getsize(fake_file)
         report_metadata = LOCATION_REPORTS['VLA_SMALL_EB']
         external_name = report_metadata['external_name']
         destination = toplevel / external_name
@@ -414,13 +393,11 @@ class DataFetcherTestCase(unittest.TestCase):
         sizes = [file['size'] for file in files]
         total_size_expected = sum(sizes)
         num_files_expected = 37
-        self.assertEqual(num_files_expected, len(files),
-                         f"expecting {report_metadata['file_count']} files in report")
+        assert num_files_expected == len(files)
 
         fetch = DataFetcher(namespace, self.settings)
         retrieved = fetch.run()
-        self.assertEqual(num_files_expected, len(retrieved),
-                         f'expected {num_files_expected} files but got {len(retrieved)}')
+        assert num_files_expected == len(retrieved)
 
         # delete the .json so it doesn't mess up our total size computation
         Path.unlink(report_file)
@@ -431,49 +408,44 @@ class DataFetcherTestCase(unittest.TestCase):
             for fname in filenames:
                 path = Path(dirpath, fname)
                 total_size_actual += os.path.getsize(path)
-        self.assertEqual(total_size_expected, total_size_actual,
-                         f'expected total size={total_size_expected}; got {total_size_actual}')
+        assert total_size_expected == total_size_actual
 
     def test_sys_exit_file_error_on_bad_destination(self):
         file_spec = self.test_data['13B-014']
         args = ['--product-locator', file_spec['product_locator'],
                 '--output-dir', '/foo',
-                '--profile', self.profile]
+                '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         with pytest.raises(SystemExit) as s_ex:
             DataFetcher(namespace, self.settings)
-        self.assertEqual(Errors.FILE_NOT_FOUND_ERROR.value, s_ex.value.code,
-                         'should throw FILE_NOT_FOUND_ERROR')
+        assert Errors.FILE_NOT_FOUND_ERROR.value == s_ex.value.code
 
     def test_sys_exit_no_locator_for_bad_product_locator(self):
         args = ['--product-locator', '/foo',
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
 
         with pytest.raises(SystemExit) as s_ex:
             fetch = DataFetcher(namespace, self.settings)
             fetch.run()
-        self.assertEqual(Errors.NO_LOCATOR.value, s_ex.value.code,
-                         'should throw NO_LOCATOR')
+        assert Errors.NO_LOCATOR.value == s_ex.value.code
 
     def test_gets_expected_test_data(self):
-        self.assertIsNotNone(self.test_data['13B-014'])
+        assert self.test_data['13B-014'] is not None
         file_spec = self.test_data['13B-014']
-        self.assertEqual('13B-014.sb28862036.eb29155786.56782.5720116088',
-                         file_spec['external_name'])
+        assert '13B-014.sb28862036.eb29155786.56782.5720116088' == file_spec['external_name']
         locator = file_spec['product_locator']
-        self.assertTrue(locator.startswith('uid://evla/execblock/'))
+        assert locator.startswith('uid://evla/execblock/')
 
     def test_gets_vlbas_from_report_file(self):
         report_file = get_locations_file('VLBA_EB')
         args = ['--location-file', str(report_file),
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         report_files = fetch.locations_report.files_report['files']
 
-        self.assertEqual(16, len(report_files),
-                         f'expecting 16 report files in {self.top_level}')
+        assert 16 == len(report_files)
         expected_files = [Path(self.top_level, item['relative_path'])
                           for item in report_files]
 
@@ -482,8 +454,7 @@ class DataFetcherTestCase(unittest.TestCase):
         fetch.run = MagicMock(return_value=expected_files)
         actual_files = fetch.run()
         num_expected = len(expected_files)
-        self.assertEqual(num_expected, len(actual_files),
-                         f'expecting {num_expected} VLBA files in {self.top_level}')
+        assert num_expected == len(actual_files)
 
         match_count = 0
         for exp_file in expected_files:
@@ -492,37 +463,32 @@ class DataFetcherTestCase(unittest.TestCase):
                 if act_parent == exp_file.name:
                     match_count += 1
                     break
-        self.assertEqual(num_expected, match_count,
-                         f'{num_expected - match_count} file(s) are '
-                         f'unaccounted for')
+        assert num_expected == match_count
 
     def test_gets_large_vla_ebs_from_report_file(self):
         report_file = get_locations_file('VLA_LARGE_EB')
         args = ['--location-file', str(report_file),
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         report_files = fetch.locations_report.files_report['files']
-        self.assertEqual(46, len(report_files), 'expecting 46 files')
+        assert 46 == len(report_files)
         toplevel = Path(self.top_level)
         expected_files = [toplevel / item['relative_path']
                           for item in report_files]
         fetch.run = MagicMock(return_value=expected_files)
         actual_files = fetch.run()
         num_expected = len(expected_files)
-        self.assertEqual(num_expected, len(actual_files), f'expecting '
-                                                          f'{num_expected} '
-                                                          f'VLBA files')
+        assert num_expected == len(actual_files)
 
     def test_gets_images_from_report_file(self):
         report_file = get_locations_file('IMG')
         args = ['--location-file', str(report_file),
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         report_files = fetch.locations_report.files_report['files']
-        self.assertEqual(2, len(report_files),
-                         f'expecting 2 report files in {self.top_level}')
+        assert 2 == len(report_files)
         toplevel = Path(self.top_level)
         expected_files = [toplevel / item['relative_path']
                           for item in report_files]
@@ -530,62 +496,52 @@ class DataFetcherTestCase(unittest.TestCase):
         fetch.run = MagicMock(return_value=expected_files)
         actual_files = fetch.run()
         num_expected = len(expected_files)
-        self.assertEqual(num_expected, len(actual_files), f'expecting '
-                                                          f'{num_expected} '
-                                                          f'image files')
+        assert num_expected == len(actual_files)
 
     def test_gets_calibration_from_report_file(self):
         report_file = get_locations_file('CALIBRATION')
         args = ['--location-file', str(report_file),
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         report_files = fetch.locations_report.files_report['files']
-        self.assertEqual(1, len(report_files),
-                         f'expecting 1 report file in {self.top_level}')
+        assert 1 == len(report_files)
         file_spec = report_files[0]
 
         # calibration will have external name = relative path = subdirectory
         relative_path = file_spec['relative_path']
-        self.assertEqual(relative_path, file_spec['subdirectory'],
-                         'expecting relative_path same as subdirectory')
+        assert relative_path == file_spec['subdirectory']
 
         expected_files = [Path(self.top_level, relative_path)]
         fetch.run = MagicMock(return_value=expected_files)
         actual_files = fetch.run()
         num_expected = len(expected_files)
-        self.assertEqual(num_expected, len(actual_files), f'expecting '
-                                                          f'{num_expected} '
-                                                          f'calibration')
+        assert num_expected == len(actual_files)
 
     def test_gets_calibration_from_locator(self):
         external_name = LOCATION_REPORTS['CALIBRATION']['external_name']
         product_locator = ProductLocatorLookup(
             self.db_settings).look_up_locator_for_ext_name(external_name)
         args = ['--product-locator', product_locator,
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         report_files = fetch.locations_report.files_report['files']
-        self.assertEqual(1, len(report_files),
-                         f'{external_name} should be 1 file in {self.top_level}')
+        assert 1 == len(report_files)
 
         file_spec = report_files[0]
 
         # calibration will have external name = relative path = subdirectory
         relative_path = file_spec['relative_path']
-        self.assertEqual(external_name, relative_path,
-                         'expecting external_name same as relative path')
-        self.assertEqual(relative_path, file_spec['subdirectory'],
-                         'expecting relative_path same as subdirectory')
+        assert external_name == relative_path
+        assert relative_path == file_spec['subdirectory']
 
         expected_files = [Path(self.top_level) / relative_path]
         fetch.run = MagicMock(return_value=expected_files)
         actual_files = fetch.run()
         num_expected = len(expected_files)
-        self.assertEqual(num_expected, len(actual_files), f'expecting '
-                                                          f'{num_expected} '
-                                                          f'calibration')
+        assert num_expected == len(actual_files)
+
     def test_retrieval_finds_size_mismatch(self):
         report_spec = LOCATION_REPORTS[_VLA_SMALL_KEY]
         external_name = report_spec['external_name']
@@ -593,59 +549,52 @@ class DataFetcherTestCase(unittest.TestCase):
         data_dir = Path(self.DATA_DIR)
         locations_file = data_dir / 'VLA_SMALL_EB_BUSTED.json'
         args = ['--location-file', str(locations_file),
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch1 = DataFetcher(namespace, self.settings)
         report_files = fetch1.locations_report.files_report['files']
-        self.assertEqual(44, len(report_files),
-                         f'{locations_file.name} should have 44 files')
+        assert 44 == len(report_files)
 
         filename = 'Weather.xml'
         for file in report_files:
             if filename == file['relative_path']:
-                self.assertEqual(165100, file['size'])
+                assert 165100 == file['size']
                 break
 
         product_locator = ProductLocatorLookup(self.db_settings) \
             .look_up_locator_for_ext_name(external_name)
         args = ['--product-locator', product_locator,
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch2 = DataFetcher(namespace, self.settings)
 
         locations_report = get_locations_report(_VLA_SMALL_KEY)
         fetch2.run = MagicMock(return_value=locations_report['files'])
         locator_files = fetch2.run()
-        self.assertEqual(len(report_files), len(locator_files),
-                         'should get same no. files from locator as from '
-                         'report file')
+        assert len(report_files) == len(locator_files)
         for file1 in report_files:
             for file2 in locator_files:
                 if file2['relative_path'] == file1['relative_path']:
                     if filename != file1['relative_path']:
-                        self.assertEqual(file2['size'], file1['size'],
-                                         'sizes should match')
+                        assert file2['size'] == file1['size']
                     else:
-                        self.assertNotEqual(file2['size'], file1['size'],
-                                            'sizes should match')
+                        assert file2['size'] != file1['size']
                     break
 
     def test_throws_sys_exit_missing_setting_if_no_args(self):
         args = []
         with pytest.raises(SystemExit) as s_ex:
             get_arg_parser().parse_args(args)
-        self.assertEqual(Errors.MISSING_SETTING.value, s_ex.value.code,
-                         'should throw MISSING_SETTING error')
+        assert Errors.MISSING_SETTING.value == s_ex.value.code
 
     def test_throws_sys_exit_no_locator_if_no_product_locator(self):
         args = ['--product-locator', '',
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
 
         with pytest.raises(SystemExit) as s_ex:
             DataFetcher(namespace, self.settings)
-        self.assertEqual(Errors.NO_LOCATOR.value, s_ex.value.code,
-                         'should throw NO_LOCATOR error')
+        assert Errors.NO_LOCATOR.value == s_ex.value.code
 
     # --------------------------------------------------------------------------
     #
diff --git a/apps/cli/executables/datafetcher/test/testing_utils.py b/apps/cli/executables/datafetcher/test/testing_utils.py
index 91be50e9e317d3bc92b0580ba4d58e461f0a082d..0a2e708fa4927df2173dc834f6a33715fdd17b21 100644
--- a/apps/cli/executables/datafetcher/test/testing_utils.py
+++ b/apps/cli/executables/datafetcher/test/testing_utils.py
@@ -9,13 +9,9 @@ from pathlib import Path
 
 from pycapo import CapoConfig
 
-from datafetcher.errors import \
-    MissingSettingsException, NoProfileException
-from datafetcher.locations_report import \
-    LocationsReport
-from datafetcher.utilities import \
-    REQUIRED_SETTINGS, get_arg_parser, \
-    ExecutionSite
+from datafetcher.errors import MissingSettingsException, NoProfileException
+from datafetcher.locations_report import LocationsReport
+from datafetcher.utilities import REQUIRED_SETTINGS, get_arg_parser, ExecutionSite
 
 TEST_PROFILE = 'local'
 
@@ -155,8 +151,6 @@ def get_test_capo_settings():
     if result is None or len(result) == 0:
         raise MissingSettingsException('Required Capo settings were not found')
 
-    for setting in result:
-        print(f'{setting} = {result[setting]}')
     # be sure execution site is not DSOC nor NAASC
     exec_site = result['execution_site']
     if ExecutionSite.DSOC.value in exec_site or ExecutionSite.NAASC.value in \
diff --git a/apps/cli/executables/epilogue/__init__.py b/apps/cli/executables/epilogue/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/executables/ingestion/__init__.py b/apps/cli/executables/ingestion/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/executables/vlba_grabber/__init__.py b/apps/cli/executables/vlba_grabber/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/executables/weblog_thumbs/__init__.py b/apps/cli/executables/weblog_thumbs/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/launchers/pymygdala/__init__.py b/apps/cli/launchers/pymygdala/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/launchers/wf/__init__.py b/apps/cli/launchers/wf/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/datafinder/__init__.py b/apps/cli/utilities/datafinder/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/dumplogs/__init__.py b/apps/cli/utilities/dumplogs/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/faultchecker/__init__.py b/apps/cli/utilities/faultchecker/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/mr_books/__init__.py b/apps/cli/utilities/mr_books/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/mr_clean/__init__.py b/apps/cli/utilities/mr_clean/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/proprietary_setter/__init__.py b/apps/cli/utilities/proprietary_setter/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/proprietary_setter/setup.py b/apps/cli/utilities/proprietary_setter/setup.py
index 749807d46507f68438e2e24951489626c953bade..e63875be644d35a19b83f6e650ac3b529141803f 100644
--- a/apps/cli/utilities/proprietary_setter/setup.py
+++ b/apps/cli/utilities/proprietary_setter/setup.py
@@ -24,6 +24,6 @@ setup(
         'Programming Language :: Python :: 3.8'
     ],
     entry_points={
-        'console_scripts': ['proj_prop_period = proprietary_setter.commands:main']
+        'console_scripts': ['proj_prop_period = proprietary_setter.prop_setter:main']
     },
 )
diff --git a/apps/cli/utilities/proprietary_setter/src/proprietary_setter/commands.py b/apps/cli/utilities/proprietary_setter/src/proprietary_setter/commands.py
deleted file mode 100644
index ac62780b1725ca9e5e56c2171ae2d2a192a20ee6..0000000000000000000000000000000000000000
--- a/apps/cli/utilities/proprietary_setter/src/proprietary_setter/commands.py
+++ /dev/null
@@ -1,177 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-A module for updating the proprietary period of a project, whether that be to set it as
-proprietary or to make it public.
-
-Author: Richard Falardeau <rfalarde@nrao.edu>
-"""
-# pylint: disable=logging-format-interpolation
-
-import argparse as ap
-import sys
-import warnings
-import logging
-from astropy.time import Time
-from sqlalchemy import exc as sa_exc
-
-from ._version import ___version___ as version
-from support.logging import get_console_logger, LOG_MESSAGE_FORMATTER
-from support.capo import get_my_capo_config
-from schema import create_session
-from schema.model import Project
-from schema.legacy_model import LegacyProject
-from pymygdala import LogHandler, SendNRAOEvent
-
-
-_APPLICATION_NAME = 'proprietary_setter'
-_LOG = get_console_logger(_APPLICATION_NAME)
-_MISSING_PROFILE = """unable to derive the capo profile from the environment, """ +\
-                   """provide the capo profile through the -P argument or set the """ +\
-                   """CAPO_PROFILE environment variable."""
-_DISALLOWED_DURATION = """invalid proprietary duration, only integer values """ +\
-                       """between 0 and 730 are allowed."""
-_DESCRIPTION = """A tool for setting the proprietary duration of a project"""
-_EPILOG = """Return values:
-0: everything worked,
-1: can't deduce which profile to use
-2: invalid proprietary duration
-3: DB update failed"""
-
-
-class UpdateException(Exception):
-    r"""A wrapper for exceptions caught during our attempt to update the proprietary period.
-    Since we aren't passing the argparse object to helper functions, we want to catch some
-    obviously library exceptions in sqlalchemy, but also use it for some error trapping and
-    'handling'"""
-
-
-def _make_parser():
-    r"""
-    Build a command line parser to take the parameters for setting a projects
-    proprietary period.
-
-    :return result: an argparse object with the profile, project and
-    duration in its namespace"""
-    result = ap.ArgumentParser(description=_DESCRIPTION.format(version),
-                               formatter_class=ap.RawTextHelpFormatter,
-                               epilog=_EPILOG)
-    result.add_argument('-P', '--profile', action='store',
-                        help='profile name to use, e.g. nmtest, dsoc-test, or nmprod')
-    result.add_argument('project', action='store',
-                        help='project_code to update proprietary duration')
-    result.add_argument('duration', action='store',
-                        type=int,
-                        help='an integer duration to apply to the project; '
-                             '0 (immediately public) to 730 (private for two years from today)')
-    return result
-
-
-def set_project_proprietary_state(capo_config, project_code, proprietary_duration):
-    r"""
-    Set the proprietary period on the project code to the proprietary period provide on both the
-    archive and the legacy archive.
-        Note:   Since the capo_config will only really set the db parameters for the archive,
-                all updates to the legacy archive are actually live on production.
-    :param capo_config: the capo_config we're running under, which determines which db we update
-    :param project_code: the project code to update
-    :param proprietary_duration: an integer value for the new proprietary period (in days)
-    """
-    try:
-        with warnings.catch_warnings():
-            # This is solely to suppress the SQLAlchemy warning messages
-            warnings.simplefilter("ignore", category=sa_exc.SAWarning)
-            a_session = create_session('SDM', profile=capo_config.profile)
-            legacy_session = create_session('LEGACY', profile=capo_config.profile)
-            new_mjd_endtime = Time.now().mjd
-
-            # In the archive, the project code is a PK, we should only ever get one
-            project = a_session.query(Project) \
-                .filter(Project.project_code == project_code) \
-                .first()
-            # And if we don't, throw an exception before we commit anything
-            if project is not None:
-                project.proprietary_duration = proprietary_duration
-                if proprietary_duration != 0:
-                    project.endtime = new_mjd_endtime
-            else:
-                raise UpdateException(f'Project {project_code} was not found in the archive')
-
-            # We're not so lucky in the legacy archive, so we need to get all instances
-            leg_project = legacy_session.query(LegacyProject) \
-                .filter(LegacyProject.project_code == project_code) \
-                .all()
-            # Loop over each to set the period, or throw an exception if not found
-            if len(leg_project) > 0:
-                for project_instance in leg_project:
-                    project_instance.proprietary_duration = proprietary_duration
-                    project_instance.unlock_expire = proprietary_duration
-                    if proprietary_duration != 0:
-                        project_instance.proprietary = new_mjd_endtime
-                        project_instance.project_lock = 'LTIME'
-                    else:
-                        project_instance.project_lock = 'PUBLIC'
-            else:
-                raise UpdateException(f'Project {project_code} was not found in the legacy archive')
-
-            a_session.commit()
-            a_session.close()
-            legacy_session.commit()
-            legacy_session.close()
-
-    except Exception as update_exception:
-        raise UpdateException(f'DB update failed for the following reason: {update_exception}')
-
-
-def main(**kwargs):
-    r"""
-    The main entry point for this script.  Builds the parser, checks params, gets a profile and then
-    attempts to update the db.  If that succeeds, we kick off an amygdala even to tell the system
-    to re-index the project so the archive will reflect the new status.
-    :param kwargs: command line arguments to be passed to our parser builder
-    :return: nothing, if we complete the system will exit normally, if not we'll set a system
-    exit code.
-    """
-    parser = _make_parser()
-    args = parser.parse_args(**kwargs)
-
-    capo_config = get_my_capo_config(profile=args.profile)
-
-    if args.duration not in range(0, 730):
-        _LOG.error(_DISALLOWED_DURATION)
-        parser.print_help()
-        sys.exit(2)
-
-    try:
-        set_project_proprietary_state(capo_config, args.project, args.duration)
-    except UpdateException as update_exception:
-        _LOG.error(update_exception)
-        parser.print_help()
-        sys.exit(3)
-
-    # Set up a LogHandler to record the fact we just made a change to this project.
-    # We're adding it here, instead of earlier, because nothing we log earlier should be presented
-    # to anyone but the command line user and would only add useless clutter to our system logging.
-    # We only really want the completed task to make a record in our system.
-    broadcast = LogHandler(profile=capo_config.profile, application=_APPLICATION_NAME)
-    broadcast.setLevel(logging.DEBUG)
-    broadcast.setFormatter(LOG_MESSAGE_FORMATTER)
-    _LOG.addHandler(broadcast)
-    _LOG.info(f'Attempting to update proprietary period for {args.project}.')
-    if args.duration != 0:
-        _LOG.info(f'Locking for {args.duration} days from today')
-    else:
-        _LOG.info('Unlocking')
-
-    event = {'logData': {'project_code': args.project,
-                         'proprietary_duration': args.duration,
-                         'ingestion_type': 'evla_sdm'
-                         },
-             'message': 'proprietary period updated',
-             'request': 're-index please'}
-    SendNRAOEvent(profile=capo_config.profile, application=_APPLICATION_NAME) \
-        .send(routing_key='ingestion-complete.metadata', event=event)
-    sys.exit(0)
-
-
-if __name__ == '__main__':
-    main()
diff --git a/apps/cli/utilities/proprietary_setter/src/proprietary_setter/project_finders.py b/apps/cli/utilities/proprietary_setter/src/proprietary_setter/project_finders.py
new file mode 100644
index 0000000000000000000000000000000000000000..642b992c76e97f4b93b0550005e1f26a47e14cc6
--- /dev/null
+++ b/apps/cli/utilities/proprietary_setter/src/proprietary_setter/project_finders.py
@@ -0,0 +1,170 @@
+# -*- coding: utf-8 -*-
+
+''' Project finders pull project metadata
+    from the "new" and legacy archive DBs.
+'''
+
+import logging
+import re
+import warnings
+from pprint import pprint
+from sqlalchemy import exc as sa_exc
+
+from schema import create_session, Project
+from schema.legacy_model import LegacyProject
+from support.logging import get_console_logger
+
+# pylint: disable=logging-format-interpolation
+
+class NewArchiveProjectFinder:
+    ''' Pulls specified project from archive DB '''
+
+    def __init__(self, profile: str):
+        self.profile = profile
+        self._LOG = get_console_logger(self.__class__.__name__)
+
+    def find_project(self, project_code: str):
+        ''' get specified project'''
+        session = create_session('SDM', profile=self.profile)
+        try:
+            with warnings.catch_warnings():
+                # suppress SQLAlchemy warnings
+                warnings.simplefilter("ignore", category=sa_exc.SAWarning)
+
+                # In the archive, the project code is a PK;
+                # we should only  ever get one
+                project = session.query(Project) \
+                    .filter(Project.project_code == project_code) \
+                    .first()
+
+            return project
+
+        except Exception as ex:
+            self._LOG.error(f'{ex}')
+            raise NewArchiveProjectException(ex)
+
+        finally:
+            session.close()
+
+
+LEGACY_QUERY = """select PROJECT_CODE,
+OBSERVER,
+STARTTIME,
+STOPTIME,
+PROPRIETARY_DURATION,
+STOPTIME + PROPRIETARY_DURATION,
+proprietary,
+UNLOCK_EXPIRE,
+PROJECT_LOCK
+from PROJECT
+where PROJECT_CODE= :project_code 
+order by PROPRIETARY desc"""
+
+
+class LegacyProjectFinder:
+    ''' Pulls specified project from legacy DB '''
+
+    def __init__(self, profile: str):
+        self.profile = profile
+        self._LOG = get_console_logger(self.__class__.__name__)
+
+    def find_projects(self, project_code: str) -> list:
+        ''' project codes are not unique in the legacy archive '''
+
+        legacy_project_code = make_legacy_project_code(project_code)
+        session = create_session('LEGACY',
+                                 profile=self.profile)
+        try:
+
+            with warnings.catch_warnings():
+                # suppress SQLAlchemy warnings
+                warnings.simplefilter("ignore", category=sa_exc.SAWarning)
+
+                try:
+                    projects = session.query(LegacyProject) \
+                        .filter(LegacyProject.project_code
+                                .in_((project_code, legacy_project_code))) \
+                        .all()
+                finally:
+                    session.close()
+            return projects
+
+        except Exception as ex:
+            self._LOG.error(f'{ex}')
+            raise LegacyProjectException(ex)
+
+        finally:
+            session.close()
+
+    def find_proprietary_metadata(self, project_code: str):
+        ''' Get proprietary, lock, prop duration from legacy archive '''
+        session = create_session('LEGACY',
+                                 profile=self.profile)
+        try:
+
+            with warnings.catch_warnings():
+                # suppress SQLAlchemy warnings
+                warnings.simplefilter("ignore", category=sa_exc.SAWarning)
+
+                cursor = session.connection().engine.raw_connection().cursor()
+                cursor.execute(LEGACY_QUERY, project_code=project_code)
+
+            results = list()
+            for row in cursor.fetchall():
+                (proj_code, operator, starttime, stoptime, prop_dur, stop_dur,
+                 proprietary, unlock_expire, proj_lock) = row
+                proj_metadata = {'project_code': proj_code,
+                                 'operator': operator,
+                                 'starttime': starttime,
+                                 'stoptime': stoptime,
+                                 'prop_duration': prop_dur,
+                                 'stop_dur': stop_dur,
+                                 'proprietary': proprietary,
+                                 'unlock_expire': unlock_expire,
+                                 'project_lock': proj_lock,
+                                 }
+                self._LOG.info(pprint(proj_metadata))
+                results.append(proj_metadata)
+
+            return results
+
+        except Exception as ex:
+            self._LOG.error(f'{ex}')
+            raise LegacyProjectException(ex)
+
+        finally:
+            session.close()
+
+
+def make_legacy_project_code(project_code: str):
+    ''' Some project codes in the legacy DB have an extra 0 in them.
+        Create such a project code from a given project code
+        for use in searches.
+
+     '''
+    legacy_project_code = project_code.replace('0', '00', 1)
+    if legacy_project_code != project_code:
+        return legacy_project_code
+
+    # project code didn't have a zero in it
+    match = re.match(r"(([A-Za-z]+)([-0-9]+))", project_code)
+    if match is None:
+        return project_code
+    parts = match.groups()
+    if len(parts) == 3:
+        return parts[1] + '0' + parts[2]
+
+    logging.error(f"don't know what to do with {project_code}")
+    return project_code
+
+
+class NewArchiveProjectException(Exception):
+    ''' raise this when something goes wrong in querying or updating
+        the archive DB
+    '''
+
+
+class LegacyProjectException(Exception):
+    ''' raise this when something goes wrong in querying or updating
+        the legacy DB
+    '''
diff --git a/apps/cli/utilities/proprietary_setter/src/proprietary_setter/prop_setter.py b/apps/cli/utilities/proprietary_setter/src/proprietary_setter/prop_setter.py
new file mode 100644
index 0000000000000000000000000000000000000000..aea8d2fc815f9f1f2c7b32148046ed976ebf669b
--- /dev/null
+++ b/apps/cli/utilities/proprietary_setter/src/proprietary_setter/prop_setter.py
@@ -0,0 +1,330 @@
+# -*- coding: utf-8 -*-
+
+"""
+Updates the proprietary new_duration_days of a project in both the "new" and legacy
+archives
+
+This is a rewrite of the original proprietary-new_duration setter
+(commands.py) by Richard Falardeau <rfalarde@nrao.edu>
+
+"""
+import argparse
+import logging
+import sys
+import warnings
+from argparse import Namespace
+from enum import Enum
+
+from sqlalchemy import exc as sa_exc
+from astropy.time import Time
+from pymygdala import LogHandler, SendNRAOEvent
+from schema import create_session, Project, ExecutionBlock
+from support.logging import get_console_logger, LOG_MESSAGE_FORMATTER
+
+from ._version import ___version___
+from .project_finders import NewArchiveProjectFinder, LegacyProjectFinder
+
+# pylint: disable=logging-format-interpolation
+
+MAX_DURATION = 730
+
+_APPLICATION_NAME = 'proprietary_setter'
+_LOG = get_console_logger(_APPLICATION_NAME)
+_MISSING_PROFILE = """unable to derive the capo profile from the environment, """ + \
+                   """provide the capo profile through the -P argument or set the """ + \
+                   """CAPO_PROFILE environment variable."""
+_DISALLOWED_DURATION = f'invalid proprietary new_duration; value must be between 0 ' \
+                       f'and {MAX_DURATION}'
+
+_DESCRIPTION = """A tool for setting the proprietary duration of a project"""
+_EPILOG = """Return values:
+0: everything worked,
+1: can't deduce which profile to use
+2: invalid proprietary new_duration
+3: DB update failed"""
+
+class ProprietaryPeriodSetter:
+    ''' Sets the proprietary duration of a project to a new value
+        in both the "new" and legacy archive DBs.
+
+    '''
+
+    def __init__(self, usage:str, args: Namespace):
+        try:
+            self.new_duration_days = args.duration
+        except AttributeError as err:
+            _LOG.error(f'{err}')
+            _LOG.error(_DISALLOWED_DURATION)
+            sys.exit(2)
+        if self.new_duration_days not in range(0, MAX_DURATION):
+            _LOG.error(_DISALLOWED_DURATION)
+            _LOG.error(usage)
+            sys.exit(2)
+
+        try:
+            self.project = args.project
+        except AttributeError:
+            _LOG.error('Unable to determine project code')
+            _LOG.error(usage)
+            sys.exit(3)
+
+        try:
+            assert args.profile is not None
+            self.profile = args.profile
+        except (AttributeError, AssertionError):
+            _LOG.error(_MISSING_PROFILE)
+            sys.exit(1)
+
+    def set_project_proprietary_state(self):
+        ''' Where the magic happens:
+            sets proprietary duration and status of the specified project
+        '''
+        project_code = self.project
+        new_arch_proj = NewArchiveProjectFinder(self.profile) \
+            .find_project(self.project)
+        if new_arch_proj is None:
+            _LOG.error(f'project not found in new archive: {project_code}')
+            sys.exit(3)
+
+        if not self.is_science_project(project_code):
+            _LOG.error('This script can be used only on science projects.')
+            sys.exit(3)
+
+        legacy_proj = LegacyProjectFinder(self.profile).find_projects(self.project)
+
+        with warnings.catch_warnings():
+            # suppress SQLAlchemy warnings
+            warnings.simplefilter("ignore", category=sa_exc.SAWarning)
+
+            new_arch_session = create_session('SDM', profile=self.profile)
+            legacy_session = create_session('LEGACY', profile=self.profile)
+
+            _LOG.debug(f'>>> profile: {self.profile}')
+            try:
+                _LOG.debug('>>> updating new archive')
+                new_updated = self.update_new_archive(new_arch_proj)
+                _LOG.debug('>>> updating legacy archive')
+                legacy_updated = self.update_legacy_archive(legacy_proj)
+                if new_updated and legacy_updated:
+                    new_arch_session.commit()
+                    if 'prod' in self.profile:
+                        legacy_session.commit()
+                    else:
+                        _LOG.info('(This is where we would have updated the '
+                                  'legacy archive if we were in production.)')
+                        legacy_session.rollback()
+                    self.announce_update(legacy_proj[0].stoptime)
+
+            except UpdateException as exc:
+                _LOG.error(f'update failure: {exc}')
+                new_arch_session.rollback()
+                legacy_session.rollback()
+                sys.exit(3)
+            except Exception as exc:
+                _LOG.error(f'>>> SOME OTHER KIND OF FAILURE: {exc}')
+                new_arch_session.rollback()
+                legacy_session.rollback()
+                sys.exit(3)
+
+            finally:
+                new_arch_session.close()
+                legacy_session.close()
+
+        return {'new': new_arch_proj, 'legacy': legacy_proj}
+
+    def is_science_project(self, project_code: str):
+        ''' Is this project a "science" project (i.e., VLA/EVLA)? '''
+        session = None
+        try:
+            # suppress SQLAlchemy warnings
+            with warnings.catch_warnings():
+
+                session = create_session('SDM', profile=self.profile)
+                project = session.query(ExecutionBlock) \
+                    .filter(ExecutionBlock.project_code == project_code) \
+                    .filter(ExecutionBlock.telescope.in_(
+                        [Telescope.EVLA.value, Telescope.VLA.value])) \
+                    .first()
+                return project is not None
+        except Exception as exc:
+            _LOG.error(f'Problem checking telescope for {project_code}: {exc}')
+            sys.exit(3)
+        finally:
+            if session is not None:
+                session.close()
+
+    def update_new_archive(self, project: Project):
+        ''' Update the "new" archive DB with new project duration '''
+        try:
+            project.proprietary_duration = self.new_duration_days
+            return True
+        except Exception as ex:
+            _LOG.error(f'{ex}')
+            sys.exit(3)
+
+    def update_legacy_archive(self, projects: list):
+        ''' Update the legacy archive DB with new project duration
+
+            PROPRIETARY_DURATION: input new_duration_days
+            PROPRIETARY: STOPTIME + input new_duration_days
+            PROJECT_LOCK: if proprietary > today then 'LTIME' else 'UTIME'
+        '''
+        try:
+            for project_instance in projects:
+                project_instance.proprietary_duration = self.new_duration_days
+                project_instance.proprietary = project_instance.stoptime + self.new_duration_days
+                new_mjd_endtime = Time.now().mjd
+                if project_instance.proprietary > new_mjd_endtime:
+                    project_instance.project_lock = ProjectLock.LTIME.value
+                else:
+                    project_instance.project_lock = ProjectLock.UTIME.value
+
+            return True
+        except Exception as ex:
+            _LOG.error(f'{ex}')
+            sys.exit(3)
+
+    def announce_update(self, stoptime):
+        '''
+        Set up a LogHandler to record the fact we just made a change to
+        this project.
+
+        We're adding it here, instead of earlier, because nothing we log earlier
+        should be presented to anyone but the command line user and would
+        only add useless clutter to our system logging.
+
+        We only really want the completed task to make a record in our system.
+
+        :return:
+        '''
+
+        profile = self.profile if 'prod' in self.profile else 'nmtest'
+        try:
+            broadcast = LogHandler(profile=profile,
+                                   application=_APPLICATION_NAME)
+            broadcast.setLevel(logging.DEBUG)
+            broadcast.setFormatter(LOG_MESSAGE_FORMATTER)
+            _LOG.addHandler(broadcast)
+        except Exception as exc:
+            _LOG.error(f'{exc}')
+            raise
+
+        _LOG.info(f'Attempting to update proprietary period for '
+                  f'{self.project}.')
+        if self.new_duration_days != 0:
+            _LOG.info(f'Locking for {self.new_duration_days} days from {stoptime}')
+        else:
+            _LOG.info('Unlocking')
+
+        event = {'logData': {'project_code': self.project,
+                             'proprietary_duration': self.new_duration_days,
+                             'ingestion_type': 'evla_sdm'
+                             },
+                 'message': 'proprietary period updated',
+                 'request': 're-index please'}
+        SendNRAOEvent(profile=profile,
+                      application=_APPLICATION_NAME) \
+            .send(routing_key='ingestion-complete.metadata', event=event)
+        return 0
+
+
+def main(**kwargs):
+    r"""
+    The main entry point for this script.  Builds the parser, checks params, gets a profile and then
+    attempts to update the db.  If that succeeds, we kick off an amygdala even to tell the system
+    to re-index the project so the archive will reflect the new status.
+    :param kwargs: command line arguments to be passed to our parser builder
+    :return: nothing, if we complete the system will exit normally, if not we'll set a system
+    exit code.
+    """
+    parser = make_parser()
+    try:
+        args = parser.parse_args(**kwargs)
+    except AttributeError as err:
+        _LOG.error(f'{err}')
+        msg = str(err)
+        if 'duration' in msg:
+            _LOG.error(_DISALLOWED_DURATION)
+            parser.print_help()
+            sys.exit(2)
+        elif 'project' in msg:
+            _LOG.error('Unable to determine project code')
+            sys.exit(3)
+        else:
+            _LOG.error(f'{err}')
+            sys.exit(1)
+
+    if args.duration not in range(0, 730):
+        _LOG.error(_DISALLOWED_DURATION)
+        parser.print_help()
+        sys.exit(2)
+
+    try:
+        ProprietaryPeriodSetter(parser.format_usage(), args) \
+            .set_project_proprietary_state()
+        return 0
+    except UpdateException as update_exception:
+        _LOG.error(update_exception)
+        parser.print_help()
+        sys.exit(3)
+    except ValueError as err:
+        _LOG.error(f'{err}')
+        if str(err) == 'could not determine profile':
+            parser.print_help()
+            sys.exit(1)
+    except KeyError as err:
+        # invalid profile => can't connect to new-archive DB
+        if err.args[0].startswith('METADATADATABASE'):
+            _LOG.error(f'Update failed: {err}')
+            parser.print_help()
+            sys.exit(3)
+
+def make_parser():
+    r"""
+    Build a command line parser to take the parameters for setting a projects
+    proprietary period.
+
+    :return result: an argparse object with the profile, project and
+    duration in its namespace"""
+    result = argparse.ArgumentParser(
+        description=_DESCRIPTION.format(___version___),
+        formatter_class=argparse.RawTextHelpFormatter,
+        epilog=_EPILOG)
+    result.add_argument('-P', '--profile', action='store',
+                        help='profile name to use, e.g. nmtest, dsoc-test, or nmprod')
+    result.add_argument('project', action='store',
+                        help='project_code to update proprietary duration')
+    result.add_argument('duration', action='store',
+                        type=int,
+                        help='an integer duration to apply to the project; '
+                             '0 (immediately public) to 730 (private for two years from today)')
+    return result
+
+class UpdateException(Exception):
+    """ Wrapper for exceptions caught during our attempt to update the
+    proprietary period.
+
+    Since we aren't passing the argparse object to helper functions,
+    we want to catch some library exceptions in sqlalchemy and also use it
+    for some error trapping and 'handling'
+
+    """
+
+
+class Telescope(Enum):
+    ''' all values we can expect in execution_block.telescope '''
+    GBT  = 'GBT'
+    VLA  = 'VLA'
+    ALMA = 'ALMA'
+    EVLA = 'EVLA'
+    VLBA = 'VLBA'
+
+class ProjectLock(Enum):
+    ''' valid values for project.project_lock in legacy archive '''
+    PUBLIC = 'PUBLIC'
+    LTIME  = 'LTIME'
+    UTIME  = 'UTIME'
+
+
+if __name__ == '__main__':
+    main()
diff --git a/apps/cli/utilities/proprietary_setter/test/test_prop_setter.py b/apps/cli/utilities/proprietary_setter/test/test_prop_setter.py
new file mode 100644
index 0000000000000000000000000000000000000000..8acb511288eaab2adb2346a50f6c77879d8bcd9e
--- /dev/null
+++ b/apps/cli/utilities/proprietary_setter/test/test_prop_setter.py
@@ -0,0 +1,301 @@
+''' Tests for ProprietaryPeriodSetter '''
+
+import logging
+import random
+import subprocess
+
+import pytest
+from astropy.time import Time
+from proprietary_setter.project_finders import \
+    LegacyProjectFinder, NewArchiveProjectFinder
+from proprietary_setter.prop_setter import \
+    MAX_DURATION, make_parser, ProprietaryPeriodSetter, ProjectLock
+from schema import Project, create_session
+from schema.legacy_model import LegacyProject
+from support.logging import get_console_logger
+
+# pylint: disable=logging-format-interpolation
+
+_PROFILE = 'local'
+
+_LOG = get_console_logger('proprietary_setter_tests')
+
+canonical_values = {'13B-014': {'duration': 365,
+                                'lock': ProjectLock.UTIME.value,
+                                'expire': 365},
+                    'BG266': {'duration': 365,
+                              'lock': ProjectLock.LTIME.value,
+                              'expire': 0},
+                    'BZ076': {'duration': 548,
+                              'lock': ProjectLock.LTIME.value,
+                              'expire': 0},
+
+                    }
+
+def test_setter_updates_vla_project():
+    project_code = '13B-014'
+    legacy_before = LegacyProjectFinder(_PROFILE).find_projects(project_code)[0]
+    new_duration = random.randint(0, MAX_DURATION)
+
+    expected_proprietary = legacy_before.stoptime + new_duration
+    expected_lock = compute_lock(expected_proprietary)
+
+    args = ['-P', _PROFILE, project_code, str(new_duration)]
+    parser = make_parser()
+    namespace = parser.parse_args(args)
+    usage = parser.format_usage()
+
+    # call the setter
+    setter = ProprietaryPeriodSetter(usage, namespace)
+    projects = setter.set_project_proprietary_state()
+    assert len(projects) == 2
+    assert 'legacy' in projects.keys()
+
+    # check that we got what we expected from the legacy DB
+    legacy_project = projects['legacy'][0]
+    assert legacy_project is not None
+    assert isinstance(legacy_project, LegacyProject)
+    assert legacy_project.project_lock == expected_lock
+    assert legacy_project.proprietary == expected_proprietary
+    assert new_duration == legacy_project.proprietary_duration
+
+    # check that we got what we expected from the new-archive DB
+    assert 'new' in projects.keys()
+    project_after = projects['new']
+    assert project_after is not None
+    assert isinstance(project_after, Project)
+
+    assert project_after.proprietary_duration == new_duration
+    _restore_original_values(project_code)
+
+def test_invalid_duration_is_rejected():
+    project_code = '13B-014'
+    new_duration = MAX_DURATION + 1
+    args = ['-P', _PROFILE, project_code, str(new_duration)]
+    parser = make_parser()
+
+    namespace = parser.parse_args(args)
+    usage = parser.format_usage()
+
+    with pytest.raises(SystemExit) as exc:
+        ProprietaryPeriodSetter(usage, namespace)
+        if not exc.value.code:
+            _restore_original_values(project_code)
+        assert exc.value.code == 2
+
+    new_duration = -1
+    args = ['-P', _PROFILE, project_code, str(new_duration)]
+    parser = make_parser()
+    namespace = parser.parse_args(args)
+    with pytest.raises(SystemExit) as exc:
+        ProprietaryPeriodSetter(usage, namespace) \
+            .set_project_proprietary_state()
+        if not exc.value.code:
+            _restore_original_values(project_code)
+        assert exc.value.code == 2
+
+def test_invalid_project_type_is_rejected():
+    ''' script should complain and exit if project is not VLA/EVLA'''
+    project_codes =['BZ076', 'AGBT18A_314', '2019.1.01832.S']
+    new_duration = MAX_DURATION - 10
+
+    for project_code in project_codes:
+        args = ['-P', _PROFILE, project_code, str(new_duration)]
+        parser = make_parser()
+        namespace = parser.parse_args(args)
+        with pytest.raises(SystemExit) as exc:
+            ProprietaryPeriodSetter(parser.format_usage(), namespace) \
+                .set_project_proprietary_state()
+            if not exc.value.code:
+                _restore_original_values(project_code)
+        assert exc.value.code == 3
+
+def test_bad_project_code_causes_update_failure():
+    project_code = '15B-99C'
+    new_duration = MAX_DURATION - 10
+    args = ['-P', _PROFILE, project_code, str(new_duration)]
+    parser = make_parser()
+    namespace = parser.parse_args(args)
+    with pytest.raises(SystemExit) as exc:
+        ProprietaryPeriodSetter(parser.format_usage(), namespace) \
+            .set_project_proprietary_state()
+        if not exc.value.code:
+            _restore_original_values(project_code)
+        assert exc.value.code == 3
+
+def test_cli_returns_expected_codes():
+    project_code = '13B-014'
+    new_duration = str(random.randint(0, MAX_DURATION))
+
+    # the "control": good arguments
+    args = ['-P', _PROFILE, project_code, new_duration]
+    return_code = CommandLineLauncher().run(args)
+    if not return_code:
+        _restore_original_values(project_code)
+    assert return_code == 0
+
+    # profile value omitted; parser will think project_code is profile
+    args = ['-P', project_code, new_duration]
+    _LOG.info('>>> profile value omitted')
+    return_code = CommandLineLauncher().run(args)
+    if not return_code:
+        _restore_original_values(project_code)
+    assert return_code == 2
+
+    # profile argument omitted entirely
+    _LOG.info('>>> profile argument omitted')
+    args = [project_code, new_duration]
+    return_code = CommandLineLauncher().run(args)
+    if not return_code:
+        _restore_original_values(project_code)
+    assert return_code == 1
+
+    # invalid profile: prop_setter will be unable to connect to new archive DB
+    _LOG.info('>>> invalid profile')
+    args = ['-P', 'foo', project_code, new_duration]
+    return_code = CommandLineLauncher().run(args)
+    if not return_code:
+        _restore_original_values(project_code)
+    assert return_code == 3
+
+    # project code omitted
+    _LOG.info('>>> project code omitted')
+    args = ['-P', _PROFILE, new_duration]
+    return_code = CommandLineLauncher().run(args)
+    if not return_code:
+        _restore_original_values(project_code)
+    assert return_code == 2
+
+    # invalid proprietary duration
+    _LOG.info('>>> invalid duration')
+    args = ['-P', _PROFILE, project_code, '999']
+    return_code = CommandLineLauncher().run(args)
+    if not return_code:
+        _restore_original_values(project_code)
+    assert return_code == 2
+
+    # proprietary duration omitted
+    _LOG.info('>>> duration omitted')
+    args = ['-P', _PROFILE, project_code]
+    return_code = CommandLineLauncher().run(args)
+    if not return_code:
+        _restore_original_values(project_code)
+    assert return_code == 2
+
+    # nonexistent project code
+    _LOG.info('>>> bad project code')
+    args = ['-P', _PROFILE, 'foo', new_duration]
+    return_code = CommandLineLauncher().run(args)
+    if not return_code:
+        _restore_original_values(project_code)
+    assert return_code == 3
+
+    # VLBA project
+    _LOG.info('>>> VLBA project (not permitted)')
+    args = ['-P', _PROFILE, 'BG266', new_duration]
+    return_code = CommandLineLauncher().run(args)
+    if not return_code:
+        _restore_original_values(project_code)
+    assert return_code == 3
+
+
+### UTILITIES ###
+
+def compute_lock(proprietary):
+    ''' For the legacy archive only:
+        decide the value of project.project_lock
+        based on value of project.proprietary
+        and the current datetime.
+    '''
+    return ProjectLock.LTIME.value if proprietary > Time.now().mjd \
+        else ProjectLock.UTIME.value
+
+def _restore_original_values(project_code: str) -> None:
+    values_dict = canonical_values[project_code]
+
+    # set canonical values for this project....
+
+    # ...in the legacy archive
+    legacy_projs = LegacyProjectFinder(_PROFILE) \
+        .find_projects(project_code)
+
+    legacy_session = create_session('LEGACY', profile=_PROFILE)
+    try:
+        for legacy_proj in legacy_projs:
+            legacy_proj = legacy_session.query(LegacyProject) \
+                .filter(LegacyProject.project_code
+                        .in_((project_code, legacy_proj.project_code))) \
+                .first()
+            assert legacy_proj is not None
+            legacy_proj.proprietary_duration = values_dict['duration']
+            legacy_proj.unlock_expire = values_dict['expire']
+            legacy_proj.project_lock = values_dict['lock']
+        legacy_session.commit()
+
+    except Exception as exc:
+        pytest.fail(f'problem updating {project_code} in legacy archive:'
+                    f' {exc}')
+
+    finally:
+        legacy_session.close()
+
+    # ...in the new archive
+    session = create_session('SDM', profile=_PROFILE)
+    try:
+        new_proj = session.query(Project) \
+            .filter(Project.project_code == project_code) \
+            .first()
+        new_proj.proprietary_duration = values_dict['duration']
+        session.commit()
+    except Exception as exc:
+        pytest.fail(f'problem updating {project_code} in new archive:'
+                    f' {exc}')
+    finally:
+        session.close()
+
+    # confirm values have been reset in the legacy archive....
+    legacies = LegacyProjectFinder(_PROFILE).find_projects(project_code)
+    for legacy_proj in legacies:
+        assert values_dict['duration'] == legacy_proj.proprietary_duration
+        assert values_dict['lock'] == legacy_proj.project_lock
+        assert values_dict['expire'] == legacy_proj.unlock_expire
+
+    # ... and in the "new" archive
+    newbie = NewArchiveProjectFinder(_PROFILE).find_project(project_code)
+    assert values_dict['duration'] == newbie.proprietary_duration
+
+
+class CommandLineLauncher:
+    ''' Simulates execution of the prop setter CLI'''
+
+    def run(self, args_in):
+        ''' Launch prop setter with args_in '''
+
+        args = ['proj_prop_period']
+        for arg in args_in:
+            args.append(arg)
+
+        try:
+            # N.B. deliberately -NOT- using a context manager ("with") here
+            # to ensure we always get a return code
+            proc = subprocess.run(args,
+                                  stdout=subprocess.PIPE,
+                                  stderr=subprocess.STDOUT,
+                                  check=False,
+                                  bufsize=1,
+                                  universal_newlines=True)
+
+            if not proc.returncode:
+                if proc.returncode == 0:
+                    return 0
+
+                # (should never happen)
+                _LOG.error(f'No returncode for {args}')
+                return -1
+            else:
+                logging.error(proc.stdout)
+                return proc.returncode
+
+        except subprocess.CalledProcessError as exc:
+            _LOG.error(f'{exc}')
+            return exc.returncode
diff --git a/apps/cli/utilities/qa_results/__init__.py b/apps/cli/utilities/qa_results/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/s_code_project_updater/__init__.py b/apps/cli/utilities/s_code_project_updater/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/s_code_project_updater/setup.py b/apps/cli/utilities/s_code_project_updater/setup.py
index 2427961859e8a06eaee55fb0771025e1f8cdaecb..3fb55970f3eb2930f568d43280c4847b6a7cf0d5 100644
--- a/apps/cli/utilities/s_code_project_updater/setup.py
+++ b/apps/cli/utilities/s_code_project_updater/setup.py
@@ -16,7 +16,7 @@ setup(
     author_email='dms-ssa@nrao.edu',
     url='TBD',
     license="GPL",
-    install_requires=['pycapo', 'pymygdala', 'schema', 'sqlalchemy', 'support'],
+    install_requires=['pycapo', 'pymygdala', 'schema', 'sqlalchemy'],
     tests_require=['pytest-mock'],
     requires=['sqlalchemy', 'mysqldb'],
     keywords=[],
diff --git a/apps/cli/utilities/s_code_project_updater/src/s_code_project_updater/commands.py b/apps/cli/utilities/s_code_project_updater/src/s_code_project_updater/commands.py
index 1b0be6cad27bdb4003a7bd0c923df4046fd5458b..d7f5f3ed53bef0da2a3cfeab98a7449a34e5ad83 100644
--- a/apps/cli/utilities/s_code_project_updater/src/s_code_project_updater/commands.py
+++ b/apps/cli/utilities/s_code_project_updater/src/s_code_project_updater/commands.py
@@ -15,12 +15,10 @@ from sqlalchemy import exc as sa_exc, asc, desc
 
 from pymygdala import LogHandler, SendNRAOEvent
 from s_code_project_updater import Telescope
-from shared.schema.src.schema import Author, Project, ExecutionBlock, \
-    create_session
-from shared.schema.src.schema.pstmodel import Person, UserAuthentication
-from shared.support.src.support.capo import get_my_capo_config
-from shared.support.src.support.logging import LOG_MESSAGE_FORMATTER, \
-    get_console_logger
+from schema import Author, Project, ExecutionBlock, create_session
+from schema.pstmodel import Person, UserAuthentication
+from support.capo import get_my_capo_config
+from support.logging import LOG_MESSAGE_FORMATTER, get_console_logger
 
 from ._version import ___version___ as version
 from .project_fetcher import ArchiveProjectFetcher
diff --git a/apps/cli/utilities/s_code_project_updater/src/s_code_project_updater/project_fetcher.py b/apps/cli/utilities/s_code_project_updater/src/s_code_project_updater/project_fetcher.py
index 4c4737ddb040f5242940bf2d57c39c7d4b809f36..639c0c3040f2febc42f49316462262b70113f318 100644
--- a/apps/cli/utilities/s_code_project_updater/src/s_code_project_updater/project_fetcher.py
+++ b/apps/cli/utilities/s_code_project_updater/src/s_code_project_updater/project_fetcher.py
@@ -4,11 +4,10 @@ import warnings
 
 from sqlalchemy import exc as sa_exc, asc, desc
 
-from shared.schema.src.schema import ArchiveDBSession, create_session, \
-    ExecutionBlock
-from shared.schema.src.schema.model import Project, Author
-from shared.support.src.support.capo import get_my_capo_config
-from shared.support.src.support.logging import get_console_logger
+from schema import ArchiveDBSession, create_session, ExecutionBlock
+from schema.model import Project, Author
+from support.capo import get_my_capo_config
+from support.logging import get_console_logger
 
 from . import Telescope
 
diff --git a/apps/cli/utilities/s_code_project_updater/test/test_projects.py b/apps/cli/utilities/s_code_project_updater/test/test_projects.py
index c6b8e9e60db5111f15cf8f4240431f83da636f3f..afb5d065a995a25176c735b4bbf40e62326cd7ba 100644
--- a/apps/cli/utilities/s_code_project_updater/test/test_projects.py
+++ b/apps/cli/utilities/s_code_project_updater/test/test_projects.py
@@ -3,7 +3,7 @@ import warnings
 
 from sqlalchemy import exc as sa_exc
 
-from shared.schema.src.schema import Author, Project
+from schema.model import Author, Project
 
 
 class ScodeTestProject():
diff --git a/apps/cli/utilities/s_code_project_updater/test/test_updater.py b/apps/cli/utilities/s_code_project_updater/test/test_updater.py
index 6f161f685b73d42f22c9e5d96223b99e15a241de..83ea0a3b4c6bec2a040fdc4059679f3682e1d34c 100755
--- a/apps/cli/utilities/s_code_project_updater/test/test_updater.py
+++ b/apps/cli/utilities/s_code_project_updater/test/test_updater.py
@@ -1,15 +1,14 @@
 import logging
 import os
 import subprocess
-import unittest
 import warnings
 
 from sqlalchemy import exc as sa_exc
 
 import pytest
 from s_code_project_updater.commands import ScodeProjectUpdater
-from shared.schema.src.schema import create_session, Project
-from shared.support.src.support.logging import get_console_logger
+from schema import create_session, Project
+from support.logging import get_console_logger
 
 from .test_projects import \
     ScodeTestProject, ScienceTestProject, AlmaTestProject, get_author_pst_ids
@@ -18,30 +17,21 @@ _LOG = get_console_logger("scode_project_updater_tests", logging.DEBUG)
 _UPDATE_COMMAND = 'update_sproj'
 PROFILE = 'local'
 
-class UpdaterTestCase(unittest.TestCase):
+class TestUpdater:
     ''' Exercises ScodeProjectUpdater '''
-
-    @classmethod
-    def setUpClass(cls) -> None:
+    @pytest.fixture(autouse=True, scope='function')
+    def install_test_data(self):
         os.environ['CAPO_PROFILE'] = PROFILE
-        cls.return_values = build_updater_return_values()
-
-    @classmethod
-    def setUp(cls) -> None:
-        cls.initialize_test_data(cls)
-
-    @classmethod
-    def tearDownClass(cls) -> None:
-        cls.remove_test_data(cls)
+        self.initialize_test_data()
+        yield
+        self.remove_test_data()
 
     def test_dry_run_does_not_update(self):
         fake_project = ScodeTestProject().project
         project_code = fake_project.project_code
         try:
             new_title = 'this is the new title'
-            self.assertNotEqual(fake_project.title, new_title,
-                                f'new title should be {new_title}; got '
-                                f'{fake_project.title}')
+            assert fake_project.title != new_title
             args = [
                 '-C', project_code,
                 '-P', PROFILE,
@@ -50,17 +40,9 @@ class UpdaterTestCase(unittest.TestCase):
                 ]
             updated = ScodeProjectUpdater(args=args).update_project()
             # nothing should have been updated
-            self.assertEqual(fake_project.title, updated.title,
-                             f'expecting same title, but before is '
-                             f'{fake_project.title} and after is {updated.title}')
-            self.assertEqual(fake_project.abstract, updated.abstract,
-                             f'expecting same abstract, but before is '
-                             f'{fake_project.abstract} and updated is {updated.abstract}')
-            self.assertEqual(len(fake_project.authors),
-                             len(updated.authors),
-                             f'expecting same number of authors, '
-                             f'but before has {len(fake_project.authors)} '
-                             f'and after has {len(updated.authors)}')
+            assert fake_project.title == updated.title
+            assert fake_project.abstract == updated.abstract
+            assert len(fake_project.authors) == len(updated.authors)
         except SystemExit as exc:
             pytest.fail(f'unexpected failure with return code {exc.code}')
             raise
@@ -83,36 +65,24 @@ class UpdaterTestCase(unittest.TestCase):
             pytest.fail(f'unexpected failure with return code {exc.code}')
             raise
 
-        self.assertIsNotNone(updated, 'we should have gotten a project back')
-
-        self.assertEqual(fake_project.title, updated.title,
-                         f'expecting same title, but before is '
-                         f'{fake_project.title} and after is {updated.title}')
-        self.assertEqual(fake_project.abstract, updated.abstract,
-                         f'expecting same abstract, but before is '
-                         f'{fake_project.abstract} and updated is {updated.abstract}')
-        self.assertEqual(len(fake_project.authors),
-                         len(updated.authors),
-                         f'expecting same number of authors, '
-                         f'but before has {len(fake_project.authors)} '
-                         f'and after has {len(updated.authors)}')
+        assert updated != None
+
+        assert fake_project.title == updated.title
+        assert fake_project.abstract == updated.abstract
+        assert len(fake_project.authors) == len(updated.authors)
         count = 0
         for orig_author in fake_project.authors:
             for author in updated.authors:
                 if author.username == orig_author.username:
                     count += 1
                     break
-        self.assertEqual(len(fake_project.authors), count,
-                         'before and after projects should have '
-                         'same authors')
+        assert len(fake_project.authors) == count
 
     def test_updates_abstract_only(self):
         fake_project = ScodeTestProject().project
         project_code = fake_project.project_code
         new_abstract = "Well, here's another nice mess you've gotten us into, Ollie"
-        self.assertNotEqual(fake_project.abstract, new_abstract,
-                            f'expecting new abstract {new_abstract} '
-                            f'but got {fake_project.abstract}')
+        assert fake_project.abstract != new_abstract
         args = [
             '-C', project_code,
             '-P', PROFILE,
@@ -122,14 +92,9 @@ class UpdaterTestCase(unittest.TestCase):
             updated = ScodeProjectUpdater(args=args).update_project()
             # only abstract should have been updated;
             # all else should be same
-            self.assertEqual(fake_project.title, updated.title,
-                             f'expecting same title, but before is '
-                             f'{fake_project.title} and after is {updated.title}')
-            self.assertEqual(new_abstract, updated.abstract,
-                             f'expecting same abstract, but before is '
-                             f'{fake_project.abstract} and updated is {updated.abstract}')
-            self.assertEqual(len(fake_project.authors),
-                             len(updated.authors))
+            assert fake_project.title == updated.title
+            assert new_abstract == updated.abstract
+            assert len(fake_project.authors) == len(updated.authors)
         except SystemExit as exc:
             pytest.fail(f'unexpected failure; return code = {exc.code}')
             raise
@@ -139,12 +104,8 @@ class UpdaterTestCase(unittest.TestCase):
         project_code = fake_project.project_code
         new_abstract = "I think you ought to know I'm feeling very depressed"
         new_title = 'A Survey of the Mattresses of Sqornshellous Zeta'
-        self.assertNotEqual(fake_project.abstract, new_abstract,
-                            f'expecting new abstract {new_abstract}, '
-                            f'but abstract was not changed from {fake_project.abstract}')
-        self.assertNotEqual(fake_project.title, new_title,
-                            f'expecting new title {new_title}, '
-                            f'but abstract was not changed from {fake_project.title}')
+        assert fake_project.abstract != new_abstract
+        assert fake_project.title != new_title
         args = [
             '-C', project_code,
             '-P', PROFILE,
@@ -153,13 +114,9 @@ class UpdaterTestCase(unittest.TestCase):
         ]
         try:
             updated = ScodeProjectUpdater(args=args).update_project()
-            self.assertEqual(new_title, updated.title,
-                             'title should  not have changed')
-            self.assertEqual(new_abstract, updated.abstract,
-                             'abstract should not have changed')
-            self.assertEqual(len(fake_project.authors),
-                             len(updated.authors),
-                             'authors should not have changed')
+            assert new_title == updated.title
+            assert new_abstract == updated.abstract
+            assert len(fake_project.authors) == len(updated.authors)
         except SystemExit as exc:
             pytest.fail(f'unexpected failure; exit code = {exc.code}')
             raise
@@ -172,16 +129,13 @@ class UpdaterTestCase(unittest.TestCase):
                               abstract=fake_project.abstract)
         new_abstract = "First there is a mountain, then there is no " \
                         "mountain, then there is"
-        self.assertNotEqual(new_abstract, fake_project.abstract)
+        assert new_abstract != fake_project.abstract
         new_project.abstract = new_abstract
         original_authors = fake_project.authors.copy()
-        self.assertEqual(4, len(original_authors),
-                         'expected 4 authors before update')
+        assert 4 == len(original_authors)
         last_author = original_authors[3]
         new_authors = original_authors[:3]
-        self.assertEqual(len(original_authors) - 1, len(new_authors),
-                         f'expecting {len(original_authors) - 1} new authors, '
-                         f'but there are {len(new_authors)}')
+        assert len(original_authors) - 1 == len(new_authors)
         new_project.authors = new_authors
         args = [
             '-C', project_code,
@@ -195,30 +149,23 @@ class UpdaterTestCase(unittest.TestCase):
         updated = None
         try:
             updated = ScodeProjectUpdater(args=args).update_project()
-            self.assertIsNotNone(updated, 'project should have been returned')
+            assert updated is not None
         except SystemExit as exc:
             pytest.fail(f'unexpected failure; return code = {exc.code}')
             raise
 
-        self.assertNotEqual(fake_project.abstract, updated.abstract,
-                            'abstract should have changed')
-        self.assertEqual(fake_project.title, updated.title,
-                         'title should not have changed')
-        expected = len(original_authors) - 1
-        actual = len(updated.authors)
-        self.assertEqual(expected, actual,
-                         'one author should have been removed')
+        assert fake_project.abstract != updated.abstract
+        assert fake_project.title == updated.title
+        assert len(original_authors) - 1 == len(updated.authors)
         authors_updated = last_author in updated.authors
-        self.assertFalse(authors_updated, 'THIS IS THE MESSAGE')
+        assert not authors_updated
         count = 0
         for orig_author in original_authors[:3]:
             for new_author in updated.authors:
                 if new_author.username == orig_author.username:
                     count += 1
                     break
-        self.assertEqual(len(new_authors), count,
-                         f'expected {len(new_authors)} authors in '
-                         f'updated project; there were {count}')
+        assert len(new_authors) == count
 
     def test_output_is_as_expected(self):
         fake_project = ScodeTestProject().project
@@ -230,22 +177,19 @@ class UpdaterTestCase(unittest.TestCase):
         updater = ScodeProjectUpdater(args=args)
         updater.update_project()
         output = updater.get_project_info()
-        self.assertIsNotNone(output, 'program output is expected')
-        self.assertTrue('Title: ' + fake_project.title in output,
-                        'title should be in output')
-        self.assertTrue('Abstract: ' + fake_project.abstract in output,
-                        'abstract should be in output')
+        assert output is not None
+        assert ('Title: ' + fake_project.title) in output
+        assert ('Abstract: ' + fake_project.abstract) in output
         pst_ids = [str(id) for id in get_author_pst_ids(fake_project)]
         pst_id_str = ' '.join(pst_ids)
-        self.assertTrue('Authors: ' + pst_id_str in output,
-                        f'output should have PST IDs {pst_ids}')
+        assert 'Authors: ' + pst_id_str in output
 
     def test_copes_with_single_pi(self):
         project = ScodeTestProject().project
         args = ['-P', PROFILE, '-C', project.project_code, '-I', '4686']
         try:
             updated = ScodeProjectUpdater(args=args).update_project()
-            self.assertEqual(1, len(updated.authors))
+            assert 1 == len(updated.authors)
         except SystemExit as ex:
             pytest.fail(f'update failed with exit code {ex.code}')
             raise
@@ -258,13 +202,12 @@ class UpdaterTestCase(unittest.TestCase):
 
         with pytest.raises(SystemExit) as exc:
             ScodeProjectUpdater(args=args).update_project()
-            self.assertEqual(2, exc.code, 'ALMA project should be rejected')
+            assert 2 == exc.code
 
     def test_update_failure_returns_expected_code(self):
         result = FailingUpdater().update_project()
-        self.assertIsInstance(result, SystemExit)
-        self.assertEqual(5, result.code,
-                         'expecting return code 5 for update failure')
+        assert isinstance(result, SystemExit)
+        assert 5 == result.code
 
     """ The following test should be moved to another test case, 
         where we'll use a bash script, via subprocess.call(), to create an 
@@ -282,57 +225,47 @@ class UpdaterTestCase(unittest.TestCase):
 
         # minimum required arguments -- profile & project -- omitted
         return_code = CommandLineUpdaterLauncher([]).run()
-        self.assertEqual(return_code, 2,
-                         'expected return code 2 for no args')
+        assert return_code == 2
 
         project_code = ScodeTestProject().project.project_code
 
         # profile not specified
         args = ['-C', project_code,]
         return_code = CommandLineUpdaterLauncher(args).run()
-        self.assertEqual(return_code, 2,
-                         'expecting return code 2 when profile not specified')
+        assert return_code == 2
 
         # project code not specified
         args = ['-P', PROFILE]
-        self.assertEqual(CommandLineUpdaterLauncher(args).run(), 2,
-                         'expecting return code 2 when project not specified')
+        assert CommandLineUpdaterLauncher(args).run() == 2
 
         # profile value missing
         args = ['-P', '-C', project_code]
         return_code = CommandLineUpdaterLauncher(args).run()
-        self.assertEqual(return_code, 2,
-                         'expecting return code 2 for missing profile')
+        assert return_code == 2
 
         # project code missing
         args = ['-P', PROFILE, '-C']
-        self.assertEqual(CommandLineUpdaterLauncher(args).run(), 2,
-                         'expecting return code 2 for missing project code')
+        assert CommandLineUpdaterLauncher(args).run() == 2
 
         # bad project code
         args = ['-P', PROFILE, '-C', 'bogus']
-        self.assertEqual(CommandLineUpdaterLauncher(args).run(), 3,
-                         'expecting return code 3 for invalid project code')
+        assert CommandLineUpdaterLauncher(args).run() == 3
 
         # bad profile
         args = ['-P', 'not_a_profile', '-C', project_code]
-        self.assertEqual(CommandLineUpdaterLauncher(args).run(), 1,
-                         'expecting return code 1 for invalid Capo profile')
+        assert CommandLineUpdaterLauncher(args).run() == 1
 
         # missing title as last argument
         args = ['-P', PROFILE, '-C', project_code, '-T']
-        self.assertEqual(CommandLineUpdaterLauncher(args).run(), 2,
-                         'expecting return code 2 for missing title')
+        assert CommandLineUpdaterLauncher(args).run() == 2
 
         # missing title as first argument
         args = ['-T', '-P', PROFILE, '-C', project_code,]
-        self.assertEqual(CommandLineUpdaterLauncher(args).run(), 2,
-                         'expecting return code 2 for missing title')
+        assert CommandLineUpdaterLauncher(args).run() == 2
 
         # nonexistent investigator
         args = ['-P', PROFILE, '-C', project_code, '-I', '-22']
-        self.assertEqual(CommandLineUpdaterLauncher(args).run(), 4,
-                         'expecting return code 4 for invalid investigator')
+        assert CommandLineUpdaterLauncher(args).run() == 4
 
 
     ### UTILITIES ###
@@ -454,16 +387,3 @@ class CommandLineUpdaterLauncher:
         except Exception as exc:
             _LOG.error(f'{exc}')
             return exc.returncode
-
-def build_updater_return_values():
-    ''' return codes and messages in the updater's "usage" string '''
-    return {
-        1: 'error with capo configuration',
-        2: 'error with input parameters',
-        3: 'project not found',
-        4: 'investigator not found',
-        5: 'update failed',
-    }
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index 8b6d8921529a657ded635d61ecdd0d11aec7f695..0000000000000000000000000000000000000000
--- a/requirements.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-pika==1.1.0
-pycapo==0.3.1
diff --git a/schema/README b/schema/README
deleted file mode 100644
index 98e4f9c44effe479ed38c66ba922e7bcc672916f..0000000000000000000000000000000000000000
--- a/schema/README
+++ /dev/null
@@ -1 +0,0 @@
-Generic single-database configuration.
\ No newline at end of file
diff --git a/schema/__init__.py b/schema/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/schema/alembic.ini b/schema/alembic.ini
deleted file mode 100644
index ab882257362ff96b9ca34232e0c9ddfcaf229330..0000000000000000000000000000000000000000
--- a/schema/alembic.ini
+++ /dev/null
@@ -1,85 +0,0 @@
-# A generic, single database configuration.
-
-[alembic]
-# path to migration scripts
-script_location = .
-
-# template used to generate migration files
-# file_template = %%(rev)s_%%(slug)s
-
-# timezone to use when rendering the date
-# within the migration file as well as the filename.
-# string value is passed to dateutil.tz.gettz()
-# leave blank for localtime
-# timezone =
-
-# max length of characters to apply to the
-# "slug" field
-# truncate_slug_length = 40
-
-# set to 'true' to run the environment during
-# the 'revision' command, regardless of autogenerate
-# revision_environment = false
-
-# set to 'true' to allow .pyc and .pyo files without
-# a source .py file to be detected as revisions in the
-# versions/ directory
-# sourceless = false
-
-# version location specification; this defaults
-# to ./versions.  When using multiple version
-# directories, initial revisions must be specified with --version-path
-# version_locations = %(here)s/bar %(here)s/bat ./versions
-
-# the output encoding used when revision files
-# are written from script.py.mako
-# output_encoding = utf-8
-
-sqlalchemy.url = driver://user:pass@localhost/dbname
-
-
-[post_write_hooks]
-# post_write_hooks defines scripts or Python functions that are run
-# on newly generated revision scripts.  See the documentation for further
-# detail and examples
-
-# format using "black" - use the console_scripts runner, against the "black" entrypoint
-# hooks=black
-# black.type=console_scripts
-# black.entrypoint=black
-# black.options=-l 79
-
-# Logging configuration
-[loggers]
-keys = root,sqlalchemy,alembic
-
-[handlers]
-keys = console
-
-[formatters]
-keys = generic
-
-[logger_root]
-level = WARN
-handlers = console
-qualname =
-
-[logger_sqlalchemy]
-level = WARN
-handlers =
-qualname = sqlalchemy.engine
-
-[logger_alembic]
-level = INFO
-handlers =
-qualname = alembic
-
-[handler_console]
-class = StreamHandler
-args = (sys.stderr,)
-level = NOTSET
-formatter = generic
-
-[formatter_generic]
-format = %(levelname)-5.5s [%(name)s] %(message)s
-datefmt = %H:%M:%S
diff --git a/schema/env.py b/schema/env.py
deleted file mode 100644
index 70518a2eef734a8fffcd787cfa397309469f8e76..0000000000000000000000000000000000000000
--- a/schema/env.py
+++ /dev/null
@@ -1,77 +0,0 @@
-from logging.config import fileConfig
-
-from sqlalchemy import engine_from_config
-from sqlalchemy import pool
-
-from alembic import context
-
-# this is the Alembic Config object, which provides
-# access to the values within the .ini file in use.
-config = context.config
-
-# Interpret the config file for Python logging.
-# This line sets up loggers basically.
-fileConfig(config.config_file_name)
-
-# add your model's MetaData object here
-# for 'autogenerate' support
-# from myapp import mymodel
-# target_metadata = mymodel.Base.metadata
-target_metadata = None
-
-# other values from the config, defined by the needs of env.py,
-# can be acquired:
-# my_important_option = config.get_main_option("my_important_option")
-# ... etc.
-
-
-def run_migrations_offline():
-    """Run migrations in 'offline' mode.
-
-    This configures the context with just a URL
-    and not an Engine, though an Engine is acceptable
-    here as well.  By skipping the Engine creation
-    we don't even need a DBAPI to be available.
-
-    Calls to context.execute() here emit the given string to the
-    script output.
-
-    """
-    url = config.get_main_option("sqlalchemy.url")
-    context.configure(
-        url=url,
-        target_metadata=target_metadata,
-        literal_binds=True,
-        dialect_opts={"paramstyle": "named"},
-    )
-
-    with context.begin_transaction():
-        context.run_migrations()
-
-
-def run_migrations_online():
-    """Run migrations in 'online' mode.
-
-    In this scenario we need to create an Engine
-    and associate a connection with the context.
-
-    """
-    connectable = engine_from_config(
-        config.get_section(config.config_ini_section),
-        prefix="sqlalchemy.",
-        poolclass=pool.NullPool,
-    )
-
-    with connectable.connect() as connection:
-        context.configure(
-            connection=connection, target_metadata=target_metadata
-        )
-
-        with context.begin_transaction():
-            context.run_migrations()
-
-
-if context.is_offline_mode():
-    run_migrations_offline()
-else:
-    run_migrations_online()
diff --git a/schema/script.py.mako b/schema/script.py.mako
deleted file mode 100644
index 2c0156303a8df3ffdc9de87765bf801bf6bea4a5..0000000000000000000000000000000000000000
--- a/schema/script.py.mako
+++ /dev/null
@@ -1,24 +0,0 @@
-"""${message}
-
-Revision ID: ${up_revision}
-Revises: ${down_revision | comma,n}
-Create Date: ${create_date}
-
-"""
-from alembic import op
-import sqlalchemy as sa
-${imports if imports else ""}
-
-# revision identifiers, used by Alembic.
-revision = ${repr(up_revision)}
-down_revision = ${repr(down_revision)}
-branch_labels = ${repr(branch_labels)}
-depends_on = ${repr(depends_on)}
-
-
-def upgrade():
-    ${upgrades if upgrades else "pass"}
-
-
-def downgrade():
-    ${downgrades if downgrades else "pass"}
diff --git a/schema/versions/.keep b/schema/versions/.keep
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/services/archive/__init__.py b/services/archive/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/set_up_project.sh b/set_up_project.sh
index 13f15491681a014bed2059e91746c16815f6da7a..32b21dd032552af4b921a8e026d85a741832f458 100755
--- a/set_up_project.sh
+++ b/set_up_project.sh
@@ -6,19 +6,18 @@
 TOP_LEVEL=/Users/jgoldste/Projects/data/
 cd $TOP_LEVEL
 
-current_env=$CONDA_DEFAULT_ENV
-if [ -z $current_env ]
+if [ -z $CONDA_DEFAULT_ENV ]
 then
   echo '>>> Updating conda environment....'
   conda env update
   echo '>>> Activating....'
   conda activate data
 else
-  echo "conda environment 'data' is active"
+  echo "conda environment '${CONDA_DEFAULT_ENV}' is active"
 fi
 
 SETUP=setup.py
-echo '>>> finding all $SETUP'
+echo ">>> looking for ${SETUP} in all modules...."
 SETUPS=$(find . -name $SETUP -exec echo '{}' \;)
 for item in $SETUPS
 do
@@ -27,8 +26,6 @@ do
   cd ${dir}
 
   python setup.py develop
-#    to_keep=$(basename -s $item)
-#    echo $to_keep
 done
 
 
diff --git a/shared/messaging/events/__init__.py b/shared/messaging/events/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/shared/schema/__init__.py b/shared/schema/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/shared/support/__init__.py b/shared/support/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/test/test_all.sh b/test/test_all.sh
new file mode 100755
index 0000000000000000000000000000000000000000..6d2b001bef07c51f8d26b7d16fa71a3cf86c84b3
--- /dev/null
+++ b/test/test_all.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+# Set up conda environment, if necessary,
+# then in each module:
+# 1. run setup
+# 2. if there is a test/ subdirectory, execute pytest therein
+
+TOP_LEVEL=/Users/jgoldste/Projects/data/
+cd $TOP_LEVEL
+
+if [ -z $CONDA_DEFAULT_ENV ]
+then
+  echo '>>> Updating conda environment....'
+  conda env update
+  echo '>>> Activating....'
+  conda activate data
+else
+  echo "conda environment '${CONDA_DEFAULT_ENV}' is active"
+fi
+
+SETUP=setup.py
+echo ">>> looking for ${SETUP} in all modules...."
+SETUPS=$(find . -name $SETUP -exec echo '{}' \;)
+for item in $SETUPS
+do
+  dir=$(dirname $item)
+  dir=${TOP_LEVEL}${dir:1}
+  cd ${dir}
+  if [ -z $? ]
+  then
+    exit $?
+  fi
+  echo ">>> running ${SETUP} in ${dir}...."
+  python setup.py develop
+  if [ -d $dir/test ]
+  then
+    cd test
+    pytest --log-level=DEBUG --showlocals
+  fi
+done
+
+