diff --git a/apps/__init__.py b/apps/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/__init__.py b/apps/cli/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/executables/alma_product_fetch/__init__.py b/apps/cli/executables/alma_product_fetch/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/executables/alma_reingester/__init__.py b/apps/cli/executables/alma_reingester/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/executables/datafetcher/__init__.py b/apps/cli/executables/datafetcher/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/executables/datafetcher/test/test_datafetcher.py b/apps/cli/executables/datafetcher/test/test_datafetcher.py
index e53f1887f8979424130d8a6175299b4bbea3eebc..73cb4e551b8072fc848aa0137b8efa018d3e3d07 100644
--- a/apps/cli/executables/datafetcher/test/test_datafetcher.py
+++ b/apps/cli/executables/datafetcher/test/test_datafetcher.py
@@ -3,17 +3,16 @@
 import os
 import subprocess
 import tempfile
-import unittest
 from pathlib import Path
 from typing import List
 from unittest.mock import MagicMock
 
 import pytest
 
-from ..src.datafetcher.commands import DataFetcher
-from ..src.datafetcher.errors import Errors
-from ..src.datafetcher.locations_report import LocationsReport
-from ..src.datafetcher.utilities import get_arg_parser, ExecutionSite, \
+from datafetcher.commands import DataFetcher
+from datafetcher.errors import Errors
+from datafetcher.locations_report import LocationsReport
+from datafetcher.utilities import get_arg_parser, ExecutionSite, \
     RetrievalMode, FlexLogger, ProductLocatorLookup
 
 from .testing_utils import TEST_PROFILE, LOCATION_REPORTS, \
@@ -34,7 +33,7 @@ TO EXECUTE THESE TESTS: from apps/cli/executables/datafetcher,
 
 '''
 
-class DataFetcherTestCase(unittest.TestCase):
+class TestDataFetcher:
     """ IMPORTANT NOTE: we CANNOT retrieve by copy if we don't have access to a
         location to which NGAS can write, e.g, lustre. Therefore, any test
         that involves -actual- retrieval of files must be by streaming, to
@@ -62,22 +61,21 @@ class DataFetcherTestCase(unittest.TestCase):
 
     """
 
-    @classmethod
-    def setUpClass(cls) -> None:
-        cls.profile = TEST_PROFILE
-        cls.settings = get_test_capo_settings()
-        cls.db_settings = get_metadata_db_settings(cls.profile)
-        cls.test_data = cls._initialize_test_data(cls)
-        cls.DATA_DIR = get_test_data_dir()
-        if cls.DATA_DIR is None:
+    @pytest.fixture(autouse=True, scope='function')
+    def setup_settings_datadir(self) -> None:
+        self.settings = get_test_capo_settings()
+        self.db_settings = get_metadata_db_settings(TEST_PROFILE)
+        self.test_data = self._initialize_test_data()
+        self.DATA_DIR = get_test_data_dir()
+        if self.DATA_DIR is None:
             pytest.fail(f'test data directory not found under {os.getcwd()}')
 
-    @classmethod
-    def setUp(cls) -> None:
+    @pytest.fixture(autouse=True, scope='function')
+    def make_tempdir(self) -> None:
         umask = os.umask(0o000)
-        cls.top_level = tempfile.mkdtemp()
+        self.top_level = tempfile.mkdtemp()
         os.umask(umask)
-        cls._LOG = FlexLogger(cls.__name__, cls.top_level)
+        self._LOG = FlexLogger(__name__, self.top_level)
 
     def test_bad_command_line(self):
 
@@ -96,10 +94,8 @@ class DataFetcherTestCase(unittest.TestCase):
         terminal_exception_thrown = False
 
         bad_locator_logfile = find_newest_fetch_log_file(self.top_level)
-        self.assertIsNotNone(bad_locator_logfile,
-                             f'we should have gotten a log file in {self.top_level}')
-        self.assertNotEqual(0, os.path.getsize(bad_locator_logfile),
-                            f'expecting a non-empty log file in {self.top_level}')
+        assert bad_locator_logfile is not None
+        assert 0 != os.path.getsize(bad_locator_logfile)
         with open(bad_locator_logfile) as log:
             log_contents = log.readlines()
 
@@ -110,8 +106,8 @@ class DataFetcherTestCase(unittest.TestCase):
                 terminal_exception_thrown = True
             if exception_found and terminal_exception_thrown:
                 break
-        self.assertTrue(exception_found, 'expecting NoLocatorException')
-        self.assertTrue(terminal_exception_thrown, 'terminal_exception should be thrown')
+        assert exception_found
+        assert terminal_exception_thrown
         bad_locator_logfile.unlink()
 
         # nonexistent locations file
@@ -133,7 +129,7 @@ class DataFetcherTestCase(unittest.TestCase):
                 terminal_exception_thrown = True
             if exception_found and terminal_exception_thrown:
                 break
-        self.assertTrue(exception_found, 'expecting FileNotFoundError')
+        assert exception_found
 
     def test_nothing_retrieved_if_dry_on_cmd_line(self):
         toplevel = Path(self.top_level)
@@ -146,16 +142,15 @@ class DataFetcherTestCase(unittest.TestCase):
         fetcher = CommandLineFetchLauncher(args, self._LOG)
         output = fetcher.run()
         logfile = find_newest_fetch_log_file(self.top_level)
-        self.assertEqual([], output, 'expecting no files for dry run')
-        self.assertNotEqual(0, os.path.getsize(logfile),
-                            'log file should be non-empty because verbose')
+        assert [] == output
+        assert 0 != os.path.getsize(logfile)
         Path.unlink(location_file)
 
         # make sure none of these files written
         file_count = 0
         for _ in os.walk(location_file):
             file_count += 1
-        self.assertEqual(0, file_count, 'no files should have been retrieved')
+        assert 0 == file_count
 
     def test_force_overwrite_from_cmd_line(self):
         toplevel = Path(self.top_level)
@@ -179,11 +174,9 @@ class DataFetcherTestCase(unittest.TestCase):
             for fname in fnames:
                 path = dest_dir / fname
                 sizes[path] = os.path.getsize(path)
-        self.assertEqual(37, len(sizes), 'expecting 37 files to be fetched')
+        assert 37 == len(sizes)
         fake_size = os.path.getsize(fake_file)
-        self.assertEqual(9339, fake_size, f'expecting '
-                                          f'{fake_file} to '
-                                          f'be 9339 bytes')
+        assert 9339 == fake_size
 
     def test_no_overwrite_from_cmd_line(self):
         toplevel = Path(self.top_level)
@@ -214,8 +207,7 @@ class DataFetcherTestCase(unittest.TestCase):
             if term_except_found and file_exists_found:
                 break
 
-        self.assertTrue(term_except_found and file_exists_found,
-                        'expecting terminal_exception for FileExistsError')
+        assert term_except_found and file_exists_found
 
     def test_cmd_line_more_output_when_verbose(self):
         report_file = get_mini_locations_file(
@@ -227,12 +219,10 @@ class DataFetcherTestCase(unittest.TestCase):
         fetcher = CommandLineFetchLauncher(args, self._LOG)
         retrieved = fetcher.run()
         num_files_expected = 37
-        self.assertEqual(num_files_expected, len(retrieved),
-                         f'expecting {num_files_expected} files')
+        assert num_files_expected == len(retrieved)
 
         verbose_logfile = find_newest_fetch_log_file(self.top_level)
-        self.assertNotEqual(0,  os.path.getsize(verbose_logfile),
-                            'log should contain debug messages')
+        assert 0 !=  os.path.getsize(verbose_logfile)
 
         [file.unlink() for file in retrieved]
         verbose_logfile.unlink()
@@ -243,11 +233,9 @@ class DataFetcherTestCase(unittest.TestCase):
                 '--profile', TEST_PROFILE, '--output-dir', self.top_level]
         fetcher = CommandLineFetchLauncher(args, self._LOG)
         retrieved = fetcher.run()
-        self.assertEqual(num_files_expected, len(retrieved),
-                         f'expecting {num_files_expected} files')
+        assert num_files_expected == len(retrieved)
         logfile = find_newest_fetch_log_file(self.top_level)
-        self.assertEqual(0,  os.path.getsize(logfile),
-                         f'{logfile} should be empty')
+        assert 0 ==  os.path.getsize(logfile)
 
     def test_can_stream_from_mini_locations_file(self):
         """ gin up a location report with just a few small files in it
@@ -258,44 +246,40 @@ class DataFetcherTestCase(unittest.TestCase):
         report_file = get_mini_locations_file(location_file)
         args = ['--location-file', str(report_file),
                 '--output-dir', self.top_level,
-                '--profile', self.profile]
+                '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         retrieved = fetch.run()
         file_count = len(retrieved)
-        self.assertEqual(37, file_count)
+        assert 37 == file_count
 
     def test_verbose_writes_stuff_to_log(self):
         path = Path(self.top_level, _LOCATION_FILENAME)
         report_file = get_mini_locations_file(path)
         args = ['--location-file', str(report_file),
                 '--output-dir', self.top_level,
-                '--profile', self.profile, '--verbose']
+                '--profile', TEST_PROFILE, '--verbose']
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         fetch.run()
 
         logfile = fetch.logfile
-        self.assertTrue(logfile.is_file(),
-                        f'expecting log file {logfile}')
-        self.assertNotEqual(0, os.path.getsize(logfile),
-                            'there should be entries in the log file')
+        assert logfile.is_file()
+        assert 0 != os.path.getsize(logfile)
 
     def test_empty_log_if_not_verbose(self):
         path = Path(self.top_level, _LOCATION_FILENAME)
         report_file = get_mini_locations_file(path)
         args = ['--location-file', str(report_file),
                 '--output-dir', self.top_level,
-                '--profile', self.profile]
+                '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         fetch.run()
 
         logfile = fetch.logfile
-        self.assertTrue(logfile.is_file(),
-                        f'expecting log file {logfile}')
-        self.assertEqual(0, os.path.getsize(logfile),
-                         'log file should be empty')
+        assert logfile.is_file()
+        assert 0 == os.path.getsize(logfile)
 
     def test_copy_attempt_throws_sys_exit_service_error(self):
         product_locator = self.test_data['13B-014']['product_locator']
@@ -314,8 +298,7 @@ class DataFetcherTestCase(unittest.TestCase):
         servers_report = fetch.servers_report
         for server in servers_report:
             entry = servers_report[server]
-            self.assertTrue(
-                entry['retrieve_method'].value == RetrievalMode.COPY.value)
+            assert entry['retrieve_method'].value == RetrievalMode.COPY.value
 
         # let's try just one file so we're not sitting here all day
         for server in servers_report:
@@ -323,14 +306,14 @@ class DataFetcherTestCase(unittest.TestCase):
             servers_report = {server: entry}
             break
         fetch.servers_report = servers_report
-        self.assertIsNotNone(fetch.servers_report[server])
+        assert fetch.servers_report[server] is not None
         files = fetch.servers_report[server]['files']
         fetch.servers_report[server]['files'] = [files[0]]
 
         try:
             with pytest.raises(SystemExit) as s_ex:
                 fetch.run()
-            self.assertEqual(Errors.NGAS_SERVICE_ERROR.value, s_ex.value.code)
+            assert Errors.NGAS_SERVICE_ERROR.value == s_ex.value.code
         finally:
             self.settings['execution_site'] = local_exec_site
 
@@ -338,31 +321,30 @@ class DataFetcherTestCase(unittest.TestCase):
         report_file = get_locations_file('VLA_BAD_SERVER')
         args = ['--location-file', str(report_file),
                 '--output-dir', self.top_level,
-                '--profile', self.profile]
+                '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         with pytest.raises(SystemExit) as s_ex:
             fetch.run()
         exc_code = s_ex.value.code
         expected = Errors.NGAS_SERVICE_ERROR.value
-        self.assertEqual(expected, exc_code)
+        assert expected == exc_code
 
     def test_throws_sys_exit_file_exists_if_overwrite_not_forced(self):
         toplevel = Path(self.top_level)
         location_file = get_mini_locations_file(
             Path(self.top_level, _LOCATION_FILENAME))
-        self.assertTrue(Path.exists(location_file),
-                        f'{location_file}')
+        assert Path.exists(location_file)
         destination = Path(toplevel, _EB_EXTERNAL_NAME)
         Path(destination).mkdir(parents=True, exist_ok=True)
-        self.assertTrue(destination.is_dir(), f'{destination}')
+        assert destination.is_dir()
 
         # stick a fake SDM in there so it will fall over
         fake_file = Path(destination, _ASDM_XML)
         with open(fake_file, 'w') as to_write:
             to_write.write('lalalalalala')
-        self.assertTrue(fake_file.exists(), f'expecting fake file: {fake_file}')
-        self.assertFalse(os.path.getsize(fake_file) == 0)
+        assert fake_file.exists()
+        assert os.path.getsize(fake_file) != 0
 
         args = ['--location-file', str(location_file),
                 '--output-dir', self.top_level,
@@ -375,14 +357,14 @@ class DataFetcherTestCase(unittest.TestCase):
             DataFetcher(namespace, self.settings).run()
         exc_code = exc.value.code
         expected = Errors.FILE_EXISTS_ERROR.value
-        self.assertEqual(expected, exc_code)
+        assert expected == exc_code
 
     def test_overwrites_when_forced(self):
         external_name = LOCATION_REPORTS[_VLA_SMALL_KEY]['external_name']
         toplevel = Path(self.top_level)
         destination = toplevel / external_name
         destination.mkdir(parents=True, exist_ok=True)
-        self.assertTrue(destination.is_dir(), f'{destination}')
+        assert destination.is_dir()
 
         # stick a fake SDM in there to see if overwrite really happens
         to_overwrite = _ASDM_XML
@@ -390,11 +372,8 @@ class DataFetcherTestCase(unittest.TestCase):
         text = '"Bother!" said Pooh. "Lock phasers on that heffalump!"'
         with open(fake_file, 'w') as to_write:
             to_write.write(text)
-        self.assertTrue(fake_file.exists(),
-                        f'{to_overwrite} should have been created')
-        self.assertEqual(len(text), os.path.getsize(fake_file),
-                         f'before overwrite, {to_overwrite} should be'
-                         f' {len(text)} bytes')
+        assert fake_file.exists()
+        assert len(text) == os.path.getsize(fake_file)
         report_metadata = LOCATION_REPORTS['VLA_SMALL_EB']
         external_name = report_metadata['external_name']
         destination = toplevel / external_name
@@ -414,13 +393,11 @@ class DataFetcherTestCase(unittest.TestCase):
         sizes = [file['size'] for file in files]
         total_size_expected = sum(sizes)
         num_files_expected = 37
-        self.assertEqual(num_files_expected, len(files),
-                         f"expecting {report_metadata['file_count']} files in report")
+        assert num_files_expected == len(files)
 
         fetch = DataFetcher(namespace, self.settings)
         retrieved = fetch.run()
-        self.assertEqual(num_files_expected, len(retrieved),
-                         f'expected {num_files_expected} files but got {len(retrieved)}')
+        assert num_files_expected == len(retrieved)
 
         # delete the .json so it doesn't mess up our total size computation
         Path.unlink(report_file)
@@ -431,49 +408,44 @@ class DataFetcherTestCase(unittest.TestCase):
             for fname in filenames:
                 path = Path(dirpath, fname)
                 total_size_actual += os.path.getsize(path)
-        self.assertEqual(total_size_expected, total_size_actual,
-                         f'expected total size={total_size_expected}; got {total_size_actual}')
+        assert total_size_expected == total_size_actual
 
     def test_sys_exit_file_error_on_bad_destination(self):
         file_spec = self.test_data['13B-014']
         args = ['--product-locator', file_spec['product_locator'],
                 '--output-dir', '/foo',
-                '--profile', self.profile]
+                '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         with pytest.raises(SystemExit) as s_ex:
             DataFetcher(namespace, self.settings)
-        self.assertEqual(Errors.FILE_NOT_FOUND_ERROR.value, s_ex.value.code,
-                         'should throw FILE_NOT_FOUND_ERROR')
+        assert Errors.FILE_NOT_FOUND_ERROR.value == s_ex.value.code
 
     def test_sys_exit_no_locator_for_bad_product_locator(self):
         args = ['--product-locator', '/foo',
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
 
         with pytest.raises(SystemExit) as s_ex:
             fetch = DataFetcher(namespace, self.settings)
             fetch.run()
-        self.assertEqual(Errors.NO_LOCATOR.value, s_ex.value.code,
-                         'should throw NO_LOCATOR')
+        assert Errors.NO_LOCATOR.value == s_ex.value.code
 
     def test_gets_expected_test_data(self):
-        self.assertIsNotNone(self.test_data['13B-014'])
+        assert self.test_data['13B-014'] is not None
         file_spec = self.test_data['13B-014']
-        self.assertEqual('13B-014.sb28862036.eb29155786.56782.5720116088',
-                         file_spec['external_name'])
+        assert '13B-014.sb28862036.eb29155786.56782.5720116088' == file_spec['external_name']
         locator = file_spec['product_locator']
-        self.assertTrue(locator.startswith('uid://evla/execblock/'))
+        assert locator.startswith('uid://evla/execblock/')
 
     def test_gets_vlbas_from_report_file(self):
         report_file = get_locations_file('VLBA_EB')
         args = ['--location-file', str(report_file),
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         report_files = fetch.locations_report.files_report['files']
 
-        self.assertEqual(16, len(report_files),
-                         f'expecting 16 report files in {self.top_level}')
+        assert 16 == len(report_files)
         expected_files = [Path(self.top_level, item['relative_path'])
                           for item in report_files]
 
@@ -482,8 +454,7 @@ class DataFetcherTestCase(unittest.TestCase):
         fetch.run = MagicMock(return_value=expected_files)
         actual_files = fetch.run()
         num_expected = len(expected_files)
-        self.assertEqual(num_expected, len(actual_files),
-                         f'expecting {num_expected} VLBA files in {self.top_level}')
+        assert num_expected == len(actual_files)
 
         match_count = 0
         for exp_file in expected_files:
@@ -492,37 +463,32 @@ class DataFetcherTestCase(unittest.TestCase):
                 if act_parent == exp_file.name:
                     match_count += 1
                     break
-        self.assertEqual(num_expected, match_count,
-                         f'{num_expected - match_count} file(s) are '
-                         f'unaccounted for')
+        assert num_expected == match_count
 
     def test_gets_large_vla_ebs_from_report_file(self):
         report_file = get_locations_file('VLA_LARGE_EB')
         args = ['--location-file', str(report_file),
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         report_files = fetch.locations_report.files_report['files']
-        self.assertEqual(46, len(report_files), 'expecting 46 files')
+        assert 46 == len(report_files)
         toplevel = Path(self.top_level)
         expected_files = [toplevel / item['relative_path']
                           for item in report_files]
         fetch.run = MagicMock(return_value=expected_files)
         actual_files = fetch.run()
         num_expected = len(expected_files)
-        self.assertEqual(num_expected, len(actual_files), f'expecting '
-                                                          f'{num_expected} '
-                                                          f'VLBA files')
+        assert num_expected == len(actual_files)
 
     def test_gets_images_from_report_file(self):
         report_file = get_locations_file('IMG')
         args = ['--location-file', str(report_file),
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         report_files = fetch.locations_report.files_report['files']
-        self.assertEqual(2, len(report_files),
-                         f'expecting 2 report files in {self.top_level}')
+        assert 2 == len(report_files)
         toplevel = Path(self.top_level)
         expected_files = [toplevel / item['relative_path']
                           for item in report_files]
@@ -530,62 +496,52 @@ class DataFetcherTestCase(unittest.TestCase):
         fetch.run = MagicMock(return_value=expected_files)
         actual_files = fetch.run()
         num_expected = len(expected_files)
-        self.assertEqual(num_expected, len(actual_files), f'expecting '
-                                                          f'{num_expected} '
-                                                          f'image files')
+        assert num_expected == len(actual_files)
 
     def test_gets_calibration_from_report_file(self):
         report_file = get_locations_file('CALIBRATION')
         args = ['--location-file', str(report_file),
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         report_files = fetch.locations_report.files_report['files']
-        self.assertEqual(1, len(report_files),
-                         f'expecting 1 report file in {self.top_level}')
+        assert 1 == len(report_files)
         file_spec = report_files[0]
 
         # calibration will have external name = relative path = subdirectory
         relative_path = file_spec['relative_path']
-        self.assertEqual(relative_path, file_spec['subdirectory'],
-                         'expecting relative_path same as subdirectory')
+        assert relative_path == file_spec['subdirectory']
 
         expected_files = [Path(self.top_level, relative_path)]
         fetch.run = MagicMock(return_value=expected_files)
         actual_files = fetch.run()
         num_expected = len(expected_files)
-        self.assertEqual(num_expected, len(actual_files), f'expecting '
-                                                          f'{num_expected} '
-                                                          f'calibration')
+        assert num_expected == len(actual_files)
 
     def test_gets_calibration_from_locator(self):
         external_name = LOCATION_REPORTS['CALIBRATION']['external_name']
         product_locator = ProductLocatorLookup(
             self.db_settings).look_up_locator_for_ext_name(external_name)
         args = ['--product-locator', product_locator,
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch = DataFetcher(namespace, self.settings)
         report_files = fetch.locations_report.files_report['files']
-        self.assertEqual(1, len(report_files),
-                         f'{external_name} should be 1 file in {self.top_level}')
+        assert 1 == len(report_files)
 
         file_spec = report_files[0]
 
         # calibration will have external name = relative path = subdirectory
         relative_path = file_spec['relative_path']
-        self.assertEqual(external_name, relative_path,
-                         'expecting external_name same as relative path')
-        self.assertEqual(relative_path, file_spec['subdirectory'],
-                         'expecting relative_path same as subdirectory')
+        assert external_name == relative_path
+        assert relative_path == file_spec['subdirectory']
 
         expected_files = [Path(self.top_level) / relative_path]
         fetch.run = MagicMock(return_value=expected_files)
         actual_files = fetch.run()
         num_expected = len(expected_files)
-        self.assertEqual(num_expected, len(actual_files), f'expecting '
-                                                          f'{num_expected} '
-                                                          f'calibration')
+        assert num_expected == len(actual_files)
+
     def test_retrieval_finds_size_mismatch(self):
         report_spec = LOCATION_REPORTS[_VLA_SMALL_KEY]
         external_name = report_spec['external_name']
@@ -593,59 +549,52 @@ class DataFetcherTestCase(unittest.TestCase):
         data_dir = Path(self.DATA_DIR)
         locations_file = data_dir / 'VLA_SMALL_EB_BUSTED.json'
         args = ['--location-file', str(locations_file),
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch1 = DataFetcher(namespace, self.settings)
         report_files = fetch1.locations_report.files_report['files']
-        self.assertEqual(44, len(report_files),
-                         f'{locations_file.name} should have 44 files')
+        assert 44 == len(report_files)
 
         filename = 'Weather.xml'
         for file in report_files:
             if filename == file['relative_path']:
-                self.assertEqual(165100, file['size'])
+                assert 165100 == file['size']
                 break
 
         product_locator = ProductLocatorLookup(self.db_settings) \
             .look_up_locator_for_ext_name(external_name)
         args = ['--product-locator', product_locator,
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
         fetch2 = DataFetcher(namespace, self.settings)
 
         locations_report = get_locations_report(_VLA_SMALL_KEY)
         fetch2.run = MagicMock(return_value=locations_report['files'])
         locator_files = fetch2.run()
-        self.assertEqual(len(report_files), len(locator_files),
-                         'should get same no. files from locator as from '
-                         'report file')
+        assert len(report_files) == len(locator_files)
         for file1 in report_files:
             for file2 in locator_files:
                 if file2['relative_path'] == file1['relative_path']:
                     if filename != file1['relative_path']:
-                        self.assertEqual(file2['size'], file1['size'],
-                                         'sizes should match')
+                        assert file2['size'] == file1['size']
                     else:
-                        self.assertNotEqual(file2['size'], file1['size'],
-                                            'sizes should match')
+                        assert file2['size'] != file1['size']
                     break
 
     def test_throws_sys_exit_missing_setting_if_no_args(self):
         args = []
         with pytest.raises(SystemExit) as s_ex:
             get_arg_parser().parse_args(args)
-        self.assertEqual(Errors.MISSING_SETTING.value, s_ex.value.code,
-                         'should throw MISSING_SETTING error')
+        assert Errors.MISSING_SETTING.value == s_ex.value.code
 
     def test_throws_sys_exit_no_locator_if_no_product_locator(self):
         args = ['--product-locator', '',
-                '--output-dir', self.top_level, '--profile', self.profile]
+                '--output-dir', self.top_level, '--profile', TEST_PROFILE]
         namespace = get_arg_parser().parse_args(args)
 
         with pytest.raises(SystemExit) as s_ex:
             DataFetcher(namespace, self.settings)
-        self.assertEqual(Errors.NO_LOCATOR.value, s_ex.value.code,
-                         'should throw NO_LOCATOR error')
+        assert Errors.NO_LOCATOR.value == s_ex.value.code
 
     # --------------------------------------------------------------------------
     #
diff --git a/apps/cli/executables/datafetcher/test/testing_utils.py b/apps/cli/executables/datafetcher/test/testing_utils.py
index 61fc3a33a3199fbb7e23a048ece9cbee9311b519..54794189d83406193ef57977440b2979446219de 100644
--- a/apps/cli/executables/datafetcher/test/testing_utils.py
+++ b/apps/cli/executables/datafetcher/test/testing_utils.py
@@ -9,13 +9,9 @@ from pathlib import Path
 
 from pycapo import CapoConfig
 
-from ..src.datafetcher.errors import \
-    MissingSettingsException, NoProfileException
-from ..src.datafetcher.locations_report import \
-    LocationsReport
-from ..src.datafetcher.utilities import \
-    REQUIRED_SETTINGS, get_arg_parser, \
-    ExecutionSite
+from datafetcher.errors import MissingSettingsException, NoProfileException
+from datafetcher.locations_report import LocationsReport
+from datafetcher.utilities import REQUIRED_SETTINGS, get_arg_parser, ExecutionSite
 
 TEST_PROFILE = 'local'
 
diff --git a/apps/cli/executables/epilogue/__init__.py b/apps/cli/executables/epilogue/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/executables/ingestion/__init__.py b/apps/cli/executables/ingestion/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/executables/vlba_grabber/__init__.py b/apps/cli/executables/vlba_grabber/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/executables/weblog_thumbs/__init__.py b/apps/cli/executables/weblog_thumbs/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/launchers/pymygdala/__init__.py b/apps/cli/launchers/pymygdala/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/launchers/wf/__init__.py b/apps/cli/launchers/wf/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/__init__.py b/apps/cli/utilities/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/datafinder/__init__.py b/apps/cli/utilities/datafinder/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/dumplogs/__init__.py b/apps/cli/utilities/dumplogs/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/faultchecker/__init__.py b/apps/cli/utilities/faultchecker/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/mr_books/__init__.py b/apps/cli/utilities/mr_books/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/mr_clean/__init__.py b/apps/cli/utilities/mr_clean/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/proprietary_setter/__init__.py b/apps/cli/utilities/proprietary_setter/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/qa_results/__init__.py b/apps/cli/utilities/qa_results/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/s_code_project_updater/__init__.py b/apps/cli/utilities/s_code_project_updater/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/apps/cli/utilities/s_code_project_updater/setup.py b/apps/cli/utilities/s_code_project_updater/setup.py
index 2427961859e8a06eaee55fb0771025e1f8cdaecb..3fb55970f3eb2930f568d43280c4847b6a7cf0d5 100644
--- a/apps/cli/utilities/s_code_project_updater/setup.py
+++ b/apps/cli/utilities/s_code_project_updater/setup.py
@@ -16,7 +16,7 @@ setup(
     author_email='dms-ssa@nrao.edu',
     url='TBD',
     license="GPL",
-    install_requires=['pycapo', 'pymygdala', 'schema', 'sqlalchemy', 'support'],
+    install_requires=['pycapo', 'pymygdala', 'schema', 'sqlalchemy'],
     tests_require=['pytest-mock'],
     requires=['sqlalchemy', 'mysqldb'],
     keywords=[],
diff --git a/apps/cli/utilities/s_code_project_updater/src/s_code_project_updater/commands.py b/apps/cli/utilities/s_code_project_updater/src/s_code_project_updater/commands.py
index 1b0be6cad27bdb4003a7bd0c923df4046fd5458b..d7f5f3ed53bef0da2a3cfeab98a7449a34e5ad83 100644
--- a/apps/cli/utilities/s_code_project_updater/src/s_code_project_updater/commands.py
+++ b/apps/cli/utilities/s_code_project_updater/src/s_code_project_updater/commands.py
@@ -15,12 +15,10 @@ from sqlalchemy import exc as sa_exc, asc, desc
 
 from pymygdala import LogHandler, SendNRAOEvent
 from s_code_project_updater import Telescope
-from shared.schema.src.schema import Author, Project, ExecutionBlock, \
-    create_session
-from shared.schema.src.schema.pstmodel import Person, UserAuthentication
-from shared.support.src.support.capo import get_my_capo_config
-from shared.support.src.support.logging import LOG_MESSAGE_FORMATTER, \
-    get_console_logger
+from schema import Author, Project, ExecutionBlock, create_session
+from schema.pstmodel import Person, UserAuthentication
+from support.capo import get_my_capo_config
+from support.logging import LOG_MESSAGE_FORMATTER, get_console_logger
 
 from ._version import ___version___ as version
 from .project_fetcher import ArchiveProjectFetcher
diff --git a/apps/cli/utilities/s_code_project_updater/src/s_code_project_updater/project_fetcher.py b/apps/cli/utilities/s_code_project_updater/src/s_code_project_updater/project_fetcher.py
index 4c4737ddb040f5242940bf2d57c39c7d4b809f36..639c0c3040f2febc42f49316462262b70113f318 100644
--- a/apps/cli/utilities/s_code_project_updater/src/s_code_project_updater/project_fetcher.py
+++ b/apps/cli/utilities/s_code_project_updater/src/s_code_project_updater/project_fetcher.py
@@ -4,11 +4,10 @@ import warnings
 
 from sqlalchemy import exc as sa_exc, asc, desc
 
-from shared.schema.src.schema import ArchiveDBSession, create_session, \
-    ExecutionBlock
-from shared.schema.src.schema.model import Project, Author
-from shared.support.src.support.capo import get_my_capo_config
-from shared.support.src.support.logging import get_console_logger
+from schema import ArchiveDBSession, create_session, ExecutionBlock
+from schema.model import Project, Author
+from support.capo import get_my_capo_config
+from support.logging import get_console_logger
 
 from . import Telescope
 
diff --git a/apps/cli/utilities/s_code_project_updater/test/test_projects.py b/apps/cli/utilities/s_code_project_updater/test/test_projects.py
index c6b8e9e60db5111f15cf8f4240431f83da636f3f..afb5d065a995a25176c735b4bbf40e62326cd7ba 100644
--- a/apps/cli/utilities/s_code_project_updater/test/test_projects.py
+++ b/apps/cli/utilities/s_code_project_updater/test/test_projects.py
@@ -3,7 +3,7 @@ import warnings
 
 from sqlalchemy import exc as sa_exc
 
-from shared.schema.src.schema import Author, Project
+from schema.model import Author, Project
 
 
 class ScodeTestProject():
diff --git a/apps/cli/utilities/s_code_project_updater/test/test_updater.py b/apps/cli/utilities/s_code_project_updater/test/test_updater.py
index 6f161f685b73d42f22c9e5d96223b99e15a241de..83ea0a3b4c6bec2a040fdc4059679f3682e1d34c 100755
--- a/apps/cli/utilities/s_code_project_updater/test/test_updater.py
+++ b/apps/cli/utilities/s_code_project_updater/test/test_updater.py
@@ -1,15 +1,14 @@
 import logging
 import os
 import subprocess
-import unittest
 import warnings
 
 from sqlalchemy import exc as sa_exc
 
 import pytest
 from s_code_project_updater.commands import ScodeProjectUpdater
-from shared.schema.src.schema import create_session, Project
-from shared.support.src.support.logging import get_console_logger
+from schema import create_session, Project
+from support.logging import get_console_logger
 
 from .test_projects import \
     ScodeTestProject, ScienceTestProject, AlmaTestProject, get_author_pst_ids
@@ -18,30 +17,21 @@ _LOG = get_console_logger("scode_project_updater_tests", logging.DEBUG)
 _UPDATE_COMMAND = 'update_sproj'
 PROFILE = 'local'
 
-class UpdaterTestCase(unittest.TestCase):
+class TestUpdater:
     ''' Exercises ScodeProjectUpdater '''
-
-    @classmethod
-    def setUpClass(cls) -> None:
+    @pytest.fixture(autouse=True, scope='function')
+    def install_test_data(self):
         os.environ['CAPO_PROFILE'] = PROFILE
-        cls.return_values = build_updater_return_values()
-
-    @classmethod
-    def setUp(cls) -> None:
-        cls.initialize_test_data(cls)
-
-    @classmethod
-    def tearDownClass(cls) -> None:
-        cls.remove_test_data(cls)
+        self.initialize_test_data()
+        yield
+        self.remove_test_data()
 
     def test_dry_run_does_not_update(self):
         fake_project = ScodeTestProject().project
         project_code = fake_project.project_code
         try:
             new_title = 'this is the new title'
-            self.assertNotEqual(fake_project.title, new_title,
-                                f'new title should be {new_title}; got '
-                                f'{fake_project.title}')
+            assert fake_project.title != new_title
             args = [
                 '-C', project_code,
                 '-P', PROFILE,
@@ -50,17 +40,9 @@ class UpdaterTestCase(unittest.TestCase):
                 ]
             updated = ScodeProjectUpdater(args=args).update_project()
             # nothing should have been updated
-            self.assertEqual(fake_project.title, updated.title,
-                             f'expecting same title, but before is '
-                             f'{fake_project.title} and after is {updated.title}')
-            self.assertEqual(fake_project.abstract, updated.abstract,
-                             f'expecting same abstract, but before is '
-                             f'{fake_project.abstract} and updated is {updated.abstract}')
-            self.assertEqual(len(fake_project.authors),
-                             len(updated.authors),
-                             f'expecting same number of authors, '
-                             f'but before has {len(fake_project.authors)} '
-                             f'and after has {len(updated.authors)}')
+            assert fake_project.title == updated.title
+            assert fake_project.abstract == updated.abstract
+            assert len(fake_project.authors) == len(updated.authors)
         except SystemExit as exc:
             pytest.fail(f'unexpected failure with return code {exc.code}')
             raise
@@ -83,36 +65,24 @@ class UpdaterTestCase(unittest.TestCase):
             pytest.fail(f'unexpected failure with return code {exc.code}')
             raise
 
-        self.assertIsNotNone(updated, 'we should have gotten a project back')
-
-        self.assertEqual(fake_project.title, updated.title,
-                         f'expecting same title, but before is '
-                         f'{fake_project.title} and after is {updated.title}')
-        self.assertEqual(fake_project.abstract, updated.abstract,
-                         f'expecting same abstract, but before is '
-                         f'{fake_project.abstract} and updated is {updated.abstract}')
-        self.assertEqual(len(fake_project.authors),
-                         len(updated.authors),
-                         f'expecting same number of authors, '
-                         f'but before has {len(fake_project.authors)} '
-                         f'and after has {len(updated.authors)}')
+        assert updated != None
+
+        assert fake_project.title == updated.title
+        assert fake_project.abstract == updated.abstract
+        assert len(fake_project.authors) == len(updated.authors)
         count = 0
         for orig_author in fake_project.authors:
             for author in updated.authors:
                 if author.username == orig_author.username:
                     count += 1
                     break
-        self.assertEqual(len(fake_project.authors), count,
-                         'before and after projects should have '
-                         'same authors')
+        assert len(fake_project.authors) == count
 
     def test_updates_abstract_only(self):
         fake_project = ScodeTestProject().project
         project_code = fake_project.project_code
         new_abstract = "Well, here's another nice mess you've gotten us into, Ollie"
-        self.assertNotEqual(fake_project.abstract, new_abstract,
-                            f'expecting new abstract {new_abstract} '
-                            f'but got {fake_project.abstract}')
+        assert fake_project.abstract != new_abstract
         args = [
             '-C', project_code,
             '-P', PROFILE,
@@ -122,14 +92,9 @@ class UpdaterTestCase(unittest.TestCase):
             updated = ScodeProjectUpdater(args=args).update_project()
             # only abstract should have been updated;
             # all else should be same
-            self.assertEqual(fake_project.title, updated.title,
-                             f'expecting same title, but before is '
-                             f'{fake_project.title} and after is {updated.title}')
-            self.assertEqual(new_abstract, updated.abstract,
-                             f'expecting same abstract, but before is '
-                             f'{fake_project.abstract} and updated is {updated.abstract}')
-            self.assertEqual(len(fake_project.authors),
-                             len(updated.authors))
+            assert fake_project.title == updated.title
+            assert new_abstract == updated.abstract
+            assert len(fake_project.authors) == len(updated.authors)
         except SystemExit as exc:
             pytest.fail(f'unexpected failure; return code = {exc.code}')
             raise
@@ -139,12 +104,8 @@ class UpdaterTestCase(unittest.TestCase):
         project_code = fake_project.project_code
         new_abstract = "I think you ought to know I'm feeling very depressed"
         new_title = 'A Survey of the Mattresses of Sqornshellous Zeta'
-        self.assertNotEqual(fake_project.abstract, new_abstract,
-                            f'expecting new abstract {new_abstract}, '
-                            f'but abstract was not changed from {fake_project.abstract}')
-        self.assertNotEqual(fake_project.title, new_title,
-                            f'expecting new title {new_title}, '
-                            f'but abstract was not changed from {fake_project.title}')
+        assert fake_project.abstract != new_abstract
+        assert fake_project.title != new_title
         args = [
             '-C', project_code,
             '-P', PROFILE,
@@ -153,13 +114,9 @@ class UpdaterTestCase(unittest.TestCase):
         ]
         try:
             updated = ScodeProjectUpdater(args=args).update_project()
-            self.assertEqual(new_title, updated.title,
-                             'title should  not have changed')
-            self.assertEqual(new_abstract, updated.abstract,
-                             'abstract should not have changed')
-            self.assertEqual(len(fake_project.authors),
-                             len(updated.authors),
-                             'authors should not have changed')
+            assert new_title == updated.title
+            assert new_abstract == updated.abstract
+            assert len(fake_project.authors) == len(updated.authors)
         except SystemExit as exc:
             pytest.fail(f'unexpected failure; exit code = {exc.code}')
             raise
@@ -172,16 +129,13 @@ class UpdaterTestCase(unittest.TestCase):
                               abstract=fake_project.abstract)
         new_abstract = "First there is a mountain, then there is no " \
                         "mountain, then there is"
-        self.assertNotEqual(new_abstract, fake_project.abstract)
+        assert new_abstract != fake_project.abstract
         new_project.abstract = new_abstract
         original_authors = fake_project.authors.copy()
-        self.assertEqual(4, len(original_authors),
-                         'expected 4 authors before update')
+        assert 4 == len(original_authors)
         last_author = original_authors[3]
         new_authors = original_authors[:3]
-        self.assertEqual(len(original_authors) - 1, len(new_authors),
-                         f'expecting {len(original_authors) - 1} new authors, '
-                         f'but there are {len(new_authors)}')
+        assert len(original_authors) - 1 == len(new_authors)
         new_project.authors = new_authors
         args = [
             '-C', project_code,
@@ -195,30 +149,23 @@ class UpdaterTestCase(unittest.TestCase):
         updated = None
         try:
             updated = ScodeProjectUpdater(args=args).update_project()
-            self.assertIsNotNone(updated, 'project should have been returned')
+            assert updated is not None
         except SystemExit as exc:
             pytest.fail(f'unexpected failure; return code = {exc.code}')
             raise
 
-        self.assertNotEqual(fake_project.abstract, updated.abstract,
-                            'abstract should have changed')
-        self.assertEqual(fake_project.title, updated.title,
-                         'title should not have changed')
-        expected = len(original_authors) - 1
-        actual = len(updated.authors)
-        self.assertEqual(expected, actual,
-                         'one author should have been removed')
+        assert fake_project.abstract != updated.abstract
+        assert fake_project.title == updated.title
+        assert len(original_authors) - 1 == len(updated.authors)
         authors_updated = last_author in updated.authors
-        self.assertFalse(authors_updated, 'THIS IS THE MESSAGE')
+        assert not authors_updated
         count = 0
         for orig_author in original_authors[:3]:
             for new_author in updated.authors:
                 if new_author.username == orig_author.username:
                     count += 1
                     break
-        self.assertEqual(len(new_authors), count,
-                         f'expected {len(new_authors)} authors in '
-                         f'updated project; there were {count}')
+        assert len(new_authors) == count
 
     def test_output_is_as_expected(self):
         fake_project = ScodeTestProject().project
@@ -230,22 +177,19 @@ class UpdaterTestCase(unittest.TestCase):
         updater = ScodeProjectUpdater(args=args)
         updater.update_project()
         output = updater.get_project_info()
-        self.assertIsNotNone(output, 'program output is expected')
-        self.assertTrue('Title: ' + fake_project.title in output,
-                        'title should be in output')
-        self.assertTrue('Abstract: ' + fake_project.abstract in output,
-                        'abstract should be in output')
+        assert output is not None
+        assert ('Title: ' + fake_project.title) in output
+        assert ('Abstract: ' + fake_project.abstract) in output
         pst_ids = [str(id) for id in get_author_pst_ids(fake_project)]
         pst_id_str = ' '.join(pst_ids)
-        self.assertTrue('Authors: ' + pst_id_str in output,
-                        f'output should have PST IDs {pst_ids}')
+        assert 'Authors: ' + pst_id_str in output
 
     def test_copes_with_single_pi(self):
         project = ScodeTestProject().project
         args = ['-P', PROFILE, '-C', project.project_code, '-I', '4686']
         try:
             updated = ScodeProjectUpdater(args=args).update_project()
-            self.assertEqual(1, len(updated.authors))
+            assert 1 == len(updated.authors)
         except SystemExit as ex:
             pytest.fail(f'update failed with exit code {ex.code}')
             raise
@@ -258,13 +202,12 @@ class UpdaterTestCase(unittest.TestCase):
 
         with pytest.raises(SystemExit) as exc:
             ScodeProjectUpdater(args=args).update_project()
-            self.assertEqual(2, exc.code, 'ALMA project should be rejected')
+            assert 2 == exc.code
 
     def test_update_failure_returns_expected_code(self):
         result = FailingUpdater().update_project()
-        self.assertIsInstance(result, SystemExit)
-        self.assertEqual(5, result.code,
-                         'expecting return code 5 for update failure')
+        assert isinstance(result, SystemExit)
+        assert 5 == result.code
 
     """ The following test should be moved to another test case, 
         where we'll use a bash script, via subprocess.call(), to create an 
@@ -282,57 +225,47 @@ class UpdaterTestCase(unittest.TestCase):
 
         # minimum required arguments -- profile & project -- omitted
         return_code = CommandLineUpdaterLauncher([]).run()
-        self.assertEqual(return_code, 2,
-                         'expected return code 2 for no args')
+        assert return_code == 2
 
         project_code = ScodeTestProject().project.project_code
 
         # profile not specified
         args = ['-C', project_code,]
         return_code = CommandLineUpdaterLauncher(args).run()
-        self.assertEqual(return_code, 2,
-                         'expecting return code 2 when profile not specified')
+        assert return_code == 2
 
         # project code not specified
         args = ['-P', PROFILE]
-        self.assertEqual(CommandLineUpdaterLauncher(args).run(), 2,
-                         'expecting return code 2 when project not specified')
+        assert CommandLineUpdaterLauncher(args).run() == 2
 
         # profile value missing
         args = ['-P', '-C', project_code]
         return_code = CommandLineUpdaterLauncher(args).run()
-        self.assertEqual(return_code, 2,
-                         'expecting return code 2 for missing profile')
+        assert return_code == 2
 
         # project code missing
         args = ['-P', PROFILE, '-C']
-        self.assertEqual(CommandLineUpdaterLauncher(args).run(), 2,
-                         'expecting return code 2 for missing project code')
+        assert CommandLineUpdaterLauncher(args).run() == 2
 
         # bad project code
         args = ['-P', PROFILE, '-C', 'bogus']
-        self.assertEqual(CommandLineUpdaterLauncher(args).run(), 3,
-                         'expecting return code 3 for invalid project code')
+        assert CommandLineUpdaterLauncher(args).run() == 3
 
         # bad profile
         args = ['-P', 'not_a_profile', '-C', project_code]
-        self.assertEqual(CommandLineUpdaterLauncher(args).run(), 1,
-                         'expecting return code 1 for invalid Capo profile')
+        assert CommandLineUpdaterLauncher(args).run() == 1
 
         # missing title as last argument
         args = ['-P', PROFILE, '-C', project_code, '-T']
-        self.assertEqual(CommandLineUpdaterLauncher(args).run(), 2,
-                         'expecting return code 2 for missing title')
+        assert CommandLineUpdaterLauncher(args).run() == 2
 
         # missing title as first argument
         args = ['-T', '-P', PROFILE, '-C', project_code,]
-        self.assertEqual(CommandLineUpdaterLauncher(args).run(), 2,
-                         'expecting return code 2 for missing title')
+        assert CommandLineUpdaterLauncher(args).run() == 2
 
         # nonexistent investigator
         args = ['-P', PROFILE, '-C', project_code, '-I', '-22']
-        self.assertEqual(CommandLineUpdaterLauncher(args).run(), 4,
-                         'expecting return code 4 for invalid investigator')
+        assert CommandLineUpdaterLauncher(args).run() == 4
 
 
     ### UTILITIES ###
@@ -454,16 +387,3 @@ class CommandLineUpdaterLauncher:
         except Exception as exc:
             _LOG.error(f'{exc}')
             return exc.returncode
-
-def build_updater_return_values():
-    ''' return codes and messages in the updater's "usage" string '''
-    return {
-        1: 'error with capo configuration',
-        2: 'error with input parameters',
-        3: 'project not found',
-        4: 'investigator not found',
-        5: 'update failed',
-    }
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index 8b6d8921529a657ded635d61ecdd0d11aec7f695..0000000000000000000000000000000000000000
--- a/requirements.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-pika==1.1.0
-pycapo==0.3.1
diff --git a/schema/README b/schema/README
deleted file mode 100644
index 98e4f9c44effe479ed38c66ba922e7bcc672916f..0000000000000000000000000000000000000000
--- a/schema/README
+++ /dev/null
@@ -1 +0,0 @@
-Generic single-database configuration.
\ No newline at end of file
diff --git a/schema/__init__.py b/schema/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/schema/alembic.ini b/schema/alembic.ini
deleted file mode 100644
index ab882257362ff96b9ca34232e0c9ddfcaf229330..0000000000000000000000000000000000000000
--- a/schema/alembic.ini
+++ /dev/null
@@ -1,85 +0,0 @@
-# A generic, single database configuration.
-
-[alembic]
-# path to migration scripts
-script_location = .
-
-# template used to generate migration files
-# file_template = %%(rev)s_%%(slug)s
-
-# timezone to use when rendering the date
-# within the migration file as well as the filename.
-# string value is passed to dateutil.tz.gettz()
-# leave blank for localtime
-# timezone =
-
-# max length of characters to apply to the
-# "slug" field
-# truncate_slug_length = 40
-
-# set to 'true' to run the environment during
-# the 'revision' command, regardless of autogenerate
-# revision_environment = false
-
-# set to 'true' to allow .pyc and .pyo files without
-# a source .py file to be detected as revisions in the
-# versions/ directory
-# sourceless = false
-
-# version location specification; this defaults
-# to ./versions.  When using multiple version
-# directories, initial revisions must be specified with --version-path
-# version_locations = %(here)s/bar %(here)s/bat ./versions
-
-# the output encoding used when revision files
-# are written from script.py.mako
-# output_encoding = utf-8
-
-sqlalchemy.url = driver://user:pass@localhost/dbname
-
-
-[post_write_hooks]
-# post_write_hooks defines scripts or Python functions that are run
-# on newly generated revision scripts.  See the documentation for further
-# detail and examples
-
-# format using "black" - use the console_scripts runner, against the "black" entrypoint
-# hooks=black
-# black.type=console_scripts
-# black.entrypoint=black
-# black.options=-l 79
-
-# Logging configuration
-[loggers]
-keys = root,sqlalchemy,alembic
-
-[handlers]
-keys = console
-
-[formatters]
-keys = generic
-
-[logger_root]
-level = WARN
-handlers = console
-qualname =
-
-[logger_sqlalchemy]
-level = WARN
-handlers =
-qualname = sqlalchemy.engine
-
-[logger_alembic]
-level = INFO
-handlers =
-qualname = alembic
-
-[handler_console]
-class = StreamHandler
-args = (sys.stderr,)
-level = NOTSET
-formatter = generic
-
-[formatter_generic]
-format = %(levelname)-5.5s [%(name)s] %(message)s
-datefmt = %H:%M:%S
diff --git a/schema/env.py b/schema/env.py
deleted file mode 100644
index 70518a2eef734a8fffcd787cfa397309469f8e76..0000000000000000000000000000000000000000
--- a/schema/env.py
+++ /dev/null
@@ -1,77 +0,0 @@
-from logging.config import fileConfig
-
-from sqlalchemy import engine_from_config
-from sqlalchemy import pool
-
-from alembic import context
-
-# this is the Alembic Config object, which provides
-# access to the values within the .ini file in use.
-config = context.config
-
-# Interpret the config file for Python logging.
-# This line sets up loggers basically.
-fileConfig(config.config_file_name)
-
-# add your model's MetaData object here
-# for 'autogenerate' support
-# from myapp import mymodel
-# target_metadata = mymodel.Base.metadata
-target_metadata = None
-
-# other values from the config, defined by the needs of env.py,
-# can be acquired:
-# my_important_option = config.get_main_option("my_important_option")
-# ... etc.
-
-
-def run_migrations_offline():
-    """Run migrations in 'offline' mode.
-
-    This configures the context with just a URL
-    and not an Engine, though an Engine is acceptable
-    here as well.  By skipping the Engine creation
-    we don't even need a DBAPI to be available.
-
-    Calls to context.execute() here emit the given string to the
-    script output.
-
-    """
-    url = config.get_main_option("sqlalchemy.url")
-    context.configure(
-        url=url,
-        target_metadata=target_metadata,
-        literal_binds=True,
-        dialect_opts={"paramstyle": "named"},
-    )
-
-    with context.begin_transaction():
-        context.run_migrations()
-
-
-def run_migrations_online():
-    """Run migrations in 'online' mode.
-
-    In this scenario we need to create an Engine
-    and associate a connection with the context.
-
-    """
-    connectable = engine_from_config(
-        config.get_section(config.config_ini_section),
-        prefix="sqlalchemy.",
-        poolclass=pool.NullPool,
-    )
-
-    with connectable.connect() as connection:
-        context.configure(
-            connection=connection, target_metadata=target_metadata
-        )
-
-        with context.begin_transaction():
-            context.run_migrations()
-
-
-if context.is_offline_mode():
-    run_migrations_offline()
-else:
-    run_migrations_online()
diff --git a/schema/script.py.mako b/schema/script.py.mako
deleted file mode 100644
index 2c0156303a8df3ffdc9de87765bf801bf6bea4a5..0000000000000000000000000000000000000000
--- a/schema/script.py.mako
+++ /dev/null
@@ -1,24 +0,0 @@
-"""${message}
-
-Revision ID: ${up_revision}
-Revises: ${down_revision | comma,n}
-Create Date: ${create_date}
-
-"""
-from alembic import op
-import sqlalchemy as sa
-${imports if imports else ""}
-
-# revision identifiers, used by Alembic.
-revision = ${repr(up_revision)}
-down_revision = ${repr(down_revision)}
-branch_labels = ${repr(branch_labels)}
-depends_on = ${repr(depends_on)}
-
-
-def upgrade():
-    ${upgrades if upgrades else "pass"}
-
-
-def downgrade():
-    ${downgrades if downgrades else "pass"}
diff --git a/schema/versions/.keep b/schema/versions/.keep
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/services/archive/__init__.py b/services/archive/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/shared/messaging/events/__init__.py b/shared/messaging/events/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/shared/schema/__init__.py b/shared/schema/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/shared/support/__init__.py b/shared/support/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000