diff --git a/client/src/components/Applications/AppBrowser/AppBrowser.jsx b/client/src/components/Applications/AppBrowser/AppBrowser.jsx index fe6c4f6b7..80125b74f 100644 --- a/client/src/components/Applications/AppBrowser/AppBrowser.jsx +++ b/client/src/components/Applications/AppBrowser/AppBrowser.jsx @@ -1,7 +1,11 @@ import React, { useState } from 'react'; -import { NavLink as RRNavLink, useRouteMatch } from 'react-router-dom'; +import { + NavLink as RRNavLink, + useRouteMatch, + useLocation, +} from 'react-router-dom'; +import queryString from 'query-string'; import { useSelector, shallowEqual } from 'react-redux'; -// import PropTypes from 'prop-types'; import { Nav, NavItem, NavLink, TabContent, TabPane } from 'reactstrap'; import { AppIcon, Icon, Message } from '_common'; import './AppBrowser.scss'; @@ -14,6 +18,8 @@ const findAppTab = (categoryDict, appId) => { }; const AppBrowser = () => { + const location = useLocation(); + const { appVersion } = queryString.parse(location.search); const { params } = useRouteMatch(); const [activeTab, setActiveTab] = useState(); @@ -80,6 +86,10 @@ const AppBrowser = () => { (app.version ? `?appVersion=${app.version}` : '') } activeClassName="active" + isActive={() => + `${params.appId}-${appVersion}` === + `${app.appId}-${app.version}` + } > diff --git a/client/src/components/Applications/AppForm/AppForm.jsx b/client/src/components/Applications/AppForm/AppForm.jsx index 3594d5f50..ef1cec206 100644 --- a/client/src/components/Applications/AppForm/AppForm.jsx +++ b/client/src/components/Applications/AppForm/AppForm.jsx @@ -42,7 +42,7 @@ const appShape = PropTypes.shape({ maxMinutes: PropTypes.number, tags: PropTypes.arrayOf(PropTypes.string), }), - systemHasKeys: PropTypes.bool, + systemNeedsKeys: PropTypes.bool, pushKeysSystem: PropTypes.shape({}), exec_sys: PropTypes.shape({ host: PropTypes.string, @@ -186,17 +186,21 @@ export const AppSchemaForm = ({ app }) => { hasStorageSystems, downSystems, execSystem, + defaultSystem, } = useSelector((state) => { const matchingExecutionHost = Object.keys(state.allocations.hosts).find( (host) => app.exec_sys.host === host || app.exec_sys.host.endsWith(`.${host}`) ); - const { defaultHost, configuration } = state.systems.storage; + const { defaultHost, configuration, defaultSystem } = state.systems.storage; + const hasCorral = configuration.length && - ['cloud.corral.tacc.utexas.edu', 'data.tacc.utexas.edu'].some((s) => - defaultHost.endsWith(s) - ); + [ + 'cloud.corral.tacc.utexas.edu', + 'data.tacc.utexas.edu', + 'cloud.data.tacc.utexas.edu', + ].some((s) => defaultHost.endsWith(s)); return { allocations: matchingExecutionHost ? state.allocations.hosts[matchingExecutionHost] @@ -216,14 +220,14 @@ export const AppSchemaForm = ({ app }) => { .map((downSys) => downSys.hostname) : [], execSystem: state.app ? state.app.exec_sys.host : '', + defaultSystem, }; }, shallowEqual); - const hideManageAccount = useSelector( (state) => state.workbench.config.hideManageAccount ); - const { systemHasKeys, pushKeysSystem } = app; + const { systemNeedsKeys, pushKeysSystem } = app; const missingLicense = app.license.type && !app.license.enabled; const pushKeys = (e) => { @@ -258,9 +262,12 @@ export const AppSchemaForm = ({ app }) => { nodeCount: app.definition.jobAttributes.nodeCount, coresPerNode: app.definition.jobAttributes.coresPerNode, maxMinutes: app.definition.jobAttributes.maxMinutes, - archiveSystemDir: '', + archiveSystemId: + defaultSystem || app.definition.jobAttributes.archiveSystemId, + archiveSystemDir: app.definition.jobAttributes.archiveSystemDir, archiveOnAppError: true, appId: app.definition.id, + execSystemId: app.definition.jobAttributes.execSystemId, }; let missingAllocation = false; if (app.exec_sys.batchScheduler === 'SLURM') { @@ -293,11 +300,11 @@ export const AppSchemaForm = ({ app }) => {
{/* The !! is needed because the second value of this shorthand is interpreted as a literal 0 if not. */} - {!!(!systemHasKeys && hasStorageSystems) && ( + {!!(systemNeedsKeys && hasStorageSystems) && (
There was a problem accessing your default My Data file system. If - this is your first time logging in, you may need to   + this is your first time logging in, you may need to  { nodeCount: getNodeCountValidation(queue, app), coresPerNode: getCoresPerNodeValidation(queue), maxMinutes: getMaxMinutesValidation(queue).required('Required'), + archiveSystemId: Yup.string(), archiveSystemDir: Yup.string(), allocation: Yup.string() .required('Required') @@ -497,51 +505,57 @@ export const AppSchemaForm = ({ app }) => { return (
- -
-
- Inputs + + {Object.keys(appFields.fileInputs).length > 0 && ( +
+
+ Inputs +
+ {Object.entries(appFields.fileInputs).map( + ([name, field]) => { + // TODOv3 handle fileInputArrays https://jira.tacc.utexas.edu/browse/TV3-8 + return ( + + ); + } + )} + {Object.entries(appFields.appArgs).map(([name, field]) => { + return ( + + {field.options + ? field.options.map((item) => { + let val = item; + if (val instanceof String) { + const tmp = {}; + tmp[val] = val; + val = tmp; + } + return Object.entries(val).map( + ([key, value]) => ( + + ) + ); + }) + : null} + + ); + })} + {/* TODOv3 handle parameterSet.envVariables */}
- {Object.entries(appFields.fileInputs).map(([name, field]) => { - // TODOv3 handle fileInputArrays https://jira.tacc.utexas.edu/browse/TV3-8 - return ( - - ); - })} - {Object.entries(appFields.appArgs).map(([name, field]) => { - return ( - - {field.options - ? field.options.map((item) => { - let val = item; - if (val instanceof String) { - const tmp = {}; - tmp[val] = val; - val = tmp; - } - return Object.entries(val).map(([key, value]) => ( - - )); - }) - : null} - - ); - })} - {/* TODOv3 handle parameterSet.envVariables */} -
+ )}
Configuration @@ -631,15 +645,22 @@ export const AppSchemaForm = ({ app }) => { required /> {!app.definition.notes.isInteractive ? ( - archive/jobs/${YYYY-MM-DD}/${JOB_NAME}-${JOB_ID}.' // eslint-disable-line no-template-curly-in-string - )} - name="archivePath" - type="text" - placeholder="archive/jobs/${YYYY-MM-DD}/${JOB_NAME}-${JOB_ID}" // eslint-disable-line no-template-curly-in-string - /> + <> + + + ) : null}
- - - - - ); -}; - -export default DataFilesPushKeysModal; diff --git a/client/src/components/DataFiles/fixtures/DataFiles.systems.fixture.js b/client/src/components/DataFiles/fixtures/DataFiles.systems.fixture.js index 12f8024d3..83566f512 100644 --- a/client/src/components/DataFiles/fixtures/DataFiles.systems.fixture.js +++ b/client/src/components/DataFiles/fixtures/DataFiles.systems.fixture.js @@ -61,6 +61,7 @@ const systemsFixture = { errorMessage: null, loading: false, defaultHost: 'frontera.tacc.utexas.edu', + defaultSystem: 'frontera', }, definitions: { list: [ diff --git a/client/src/components/DataFiles/tests/DataFiles.test.js b/client/src/components/DataFiles/tests/DataFiles.test.js index 98627ebf1..0223d7c22 100644 --- a/client/src/components/DataFiles/tests/DataFiles.test.js +++ b/client/src/components/DataFiles/tests/DataFiles.test.js @@ -77,6 +77,7 @@ describe('DataFiles', () => { errorMessage: null, loading: false, defaultHost: 'cloud.corral.tacc.utexas.edu', + defaultSystem: 'cloud.data.community', }, definitions: { list: [], diff --git a/client/src/components/History/History.jsx b/client/src/components/History/History.jsx index a70e2e666..dad6ac537 100644 --- a/client/src/components/History/History.jsx +++ b/client/src/components/History/History.jsx @@ -1,13 +1,6 @@ import React from 'react'; -import { - Route, - Switch, - Redirect, - useRouteMatch, - NavLink as RRNavLink, -} from 'react-router-dom'; +import { Route, Switch, Redirect, useRouteMatch } from 'react-router-dom'; import { useSelector, useDispatch } from 'react-redux'; -import { Nav, NavItem, NavLink } from 'reactstrap'; import queryString from 'query-string'; import { Button, Section } from '_common'; diff --git a/client/src/components/_common/SystemsPushKeysModal/SystemsPushKeysModal.jsx b/client/src/components/_common/SystemsPushKeysModal/SystemsPushKeysModal.jsx index 8448c0e3b..68d771f88 100644 --- a/client/src/components/_common/SystemsPushKeysModal/SystemsPushKeysModal.jsx +++ b/client/src/components/_common/SystemsPushKeysModal/SystemsPushKeysModal.jsx @@ -35,22 +35,16 @@ const SystemsPushKeysModal = () => { }; const pushKeys = ({ password, token }) => { - const hostnames = system.login - ? [system.login.host, system.storage.host] - : [system.storage.host]; - [...new Set(hostnames)].forEach((hostname) => { - dispatch({ - type: 'SYSTEMS_PUSH_KEYS', - payload: { - systemId: system.id, - hostname, - password, - token, - type: system.type, - reloadCallback: reloadPage, - onSuccess, - }, - }); + dispatch({ + type: 'SYSTEMS_PUSH_KEYS', + payload: { + systemId: system.id, + hostname: system.host, + password, + token, + reloadCallback: reloadPage, + onSuccess, + }, }); }; @@ -93,30 +87,16 @@ const SystemsPushKeysModal = () => { will allow you to access this system from this portal.{' '}

- {system.login && ( - - )} - { label="TACC Token" required disabled={submitting} + autocomplete="off" /> diff --git a/client/src/redux/reducers/apps.reducers.js b/client/src/redux/reducers/apps.reducers.js index e61a59c68..ddcc33225 100644 --- a/client/src/redux/reducers/apps.reducers.js +++ b/client/src/redux/reducers/apps.reducers.js @@ -63,7 +63,7 @@ export const initialAppState = { definition: {}, error: { isError: false }, loading: false, - systemHasKeys: true, + systemNeedsKeys: false, pushKeysSystem: {}, exec_sys: {}, license: {}, @@ -75,7 +75,7 @@ export function app(state = initialAppState, action) { return { ...state, definition: action.payload.definition, - systemHasKeys: action.payload.systemHasKeys, + systemNeedsKeys: action.payload.systemNeedsKeys, pushKeysSystem: action.payload.pushKeysSystem, exec_sys: action.payload.exec_sys, license: action.payload.license, @@ -88,7 +88,7 @@ export function app(state = initialAppState, action) { loading: true, error: { isError: false }, definition: {}, - systemHasKeys: true, + systemNeedsKeys: false, pushKeysSystem: {}, exec_sys: {}, license: {}, diff --git a/client/src/redux/reducers/datafiles.reducers.js b/client/src/redux/reducers/datafiles.reducers.js index bd507493e..dc862bee3 100644 --- a/client/src/redux/reducers/datafiles.reducers.js +++ b/client/src/redux/reducers/datafiles.reducers.js @@ -5,6 +5,7 @@ export const initialSystemState = { errorMessage: null, loading: false, defaultHost: '', + defaultSystem: '', }, definitions: { list: [], @@ -40,6 +41,7 @@ export function systems(state = initialSystemState, action) { ...state.storage, configuration: action.payload.system_list, defaultHost: action.payload.default_host, + defaultSystem: action.payload.default_system, loading: false, }, }; @@ -151,7 +153,6 @@ export const initialFilesState = { mkdir: false, rename: false, link: false, - pushKeys: false, trash: false, empty: false, compress: false, @@ -169,7 +170,6 @@ export const initialFilesState = { upload: {}, mkdir: {}, rename: {}, - pushKeys: {}, link: {}, showpath: {}, makePublic: {}, diff --git a/client/src/redux/sagas/apps.sagas.js b/client/src/redux/sagas/apps.sagas.js index f9001ad9b..3574808d4 100644 --- a/client/src/redux/sagas/apps.sagas.js +++ b/client/src/redux/sagas/apps.sagas.js @@ -23,7 +23,7 @@ function* getApp(action) { if ( currentApp.definition.id === appId && currentApp.definition.version === appVersion && - currentApp.systemHasKeys + !currentApp.systemNeedsKeys ) { return; } diff --git a/client/src/redux/sagas/datafiles.sagas.js b/client/src/redux/sagas/datafiles.sagas.js index 7eee696ef..5ac248e2f 100644 --- a/client/src/redux/sagas/datafiles.sagas.js +++ b/client/src/redux/sagas/datafiles.sagas.js @@ -79,41 +79,6 @@ export function* watchFetchSystems() { yield takeLeading('FETCH_SYSTEM_DEFINITION', fetchSystemDefinition); } -export async function pushKeysUtil(system, form) { - const url = `/api/accounts/systems/${system}/keys/`; - const request = await fetch(url, { - method: 'PUT', - headers: { 'X-CSRFToken': Cookies.get('csrftoken') }, - credentials: 'same-origin', - body: JSON.stringify({ form, action: 'push' }), - }); - return request; -} - -export function* watchPushKeys() { - yield takeLeading('DATA_FILES_PUSH_KEYS', pushKeys); -} - -export function* pushKeys(action) { - const form = { - password: action.payload.password, - token: action.payload.token, - type: action.payload.type, - hostname: null, - }; - - yield call(pushKeysUtil, action.payload.system, form); - - yield call(action.payload.reloadCallback); - yield put({ - type: 'DATA_FILES_TOGGLE_MODAL', - payload: { - operation: 'pushKeys', - props: {}, - }, - }); -} - export function* watchFetchFiles() { yield takeLatest('FETCH_FILES', fetchFiles); } diff --git a/client/src/redux/sagas/fixtures/notificationsJobsEvents.fixture.js b/client/src/redux/sagas/fixtures/notificationsJobsEvents.fixture.js index 38624a110..787f20869 100644 --- a/client/src/redux/sagas/fixtures/notificationsJobsEvents.fixture.js +++ b/client/src/redux/sagas/fixtures/notificationsJobsEvents.fixture.js @@ -24,7 +24,7 @@ export const jobStatusUpdatePending = { archive: true, archiveOnAppError: true, // eslint-disable-next-line no-template-curly-in-string - archivePath: 'archive/jobs/2020-09-20/${JOB_NAME}-${JOB_ID}', + archivePath: 'archive/jobs/2020-09-20/${JobName}-${JobUUID}', archiveSystem: 'frontera.home.username', nodeCount: 1, processorsPerNode: 20, diff --git a/client/src/redux/sagas/fixtures/notificationsList.fixture.js b/client/src/redux/sagas/fixtures/notificationsList.fixture.js index efa5d9b7d..1e695a1f5 100644 --- a/client/src/redux/sagas/fixtures/notificationsList.fixture.js +++ b/client/src/redux/sagas/fixtures/notificationsList.fixture.js @@ -440,7 +440,7 @@ export const notificationsListFixture = { archive: true, archiveOnAppError: true, // eslint-disable-next-line no-template-curly-in-string - archivePath: 'archive/jobs/2020-09-20/${JOB_NAME}-${JOB_ID}', + archivePath: 'archive/jobs/2020-09-20/${JobName}-${JobUUID}', archiveSystem: 'frontera.home.username', nodeCount: 1, processorsPerNode: 20, diff --git a/client/src/redux/sagas/index.js b/client/src/redux/sagas/index.js index c93b968b6..6a879a905 100644 --- a/client/src/redux/sagas/index.js +++ b/client/src/redux/sagas/index.js @@ -7,7 +7,6 @@ import { watchFetchSystems, watchFetchFiles, watchFetchFilesModal, - watchPushKeys, watchScrollFiles, watchRename, watchMove, @@ -59,7 +58,6 @@ export default function* rootSaga() { watchJobs(), watchJobDetails(), watchFetchSystems(), - watchPushKeys(), watchFetchFiles(), watchFetchFilesModal(), watchScrollFiles(), diff --git a/client/src/redux/sagas/jobs.sagas.js b/client/src/redux/sagas/jobs.sagas.js index 0ac816852..481e9516e 100644 --- a/client/src/redux/sagas/jobs.sagas.js +++ b/client/src/redux/sagas/jobs.sagas.js @@ -106,13 +106,6 @@ export async function fetchJobDetailsUtil(jobUuid) { return result.response; } -export async function fetchSystemUtil(system) { - const result = await fetchUtil({ - url: `/api/accounts/systems/${system}/`, - }); - return result.response; -} - export function* getJobDetails(action) { const { jobUuid } = action.payload; yield put({ diff --git a/client/src/redux/sagas/systems.sagas.js b/client/src/redux/sagas/systems.sagas.js index eb05ac082..e39127788 100644 --- a/client/src/redux/sagas/systems.sagas.js +++ b/client/src/redux/sagas/systems.sagas.js @@ -6,7 +6,6 @@ function* pushSystemKeys(action) { const form = { password: action.payload.password, token: action.payload.token, - type: action.payload.type, hostname: action.payload.hostname, }; yield put({ diff --git a/server/conftest.py b/server/conftest.py index 964002077..e962605a0 100644 --- a/server/conftest.py +++ b/server/conftest.py @@ -136,6 +136,11 @@ def agave_file_listing_mock(): yield json.load(open(os.path.join(settings.BASE_DIR, 'fixtures/agave/files/file-listing.json'))) +@pytest.fixture +def tapis_file_listing_mock(): + yield json.load(open(os.path.join(settings.BASE_DIR, 'fixtures/agave/files/tapis-file-listing.json'))) + + @pytest.fixture def agave_listing_mock(): yield json.load(open(os.path.join(settings.BASE_DIR, 'fixtures/agave/files/listing.json'))) diff --git a/server/portal/apps/accounts/api/urls.py b/server/portal/apps/accounts/api/urls.py index d0b360ece..4f9f075dd 100644 --- a/server/portal/apps/accounts/api/urls.py +++ b/server/portal/apps/accounts/api/urls.py @@ -3,23 +3,10 @@ :synopsis: Manager handling anything pertaining to accounts """ from django.conf.urls import url -from portal.apps.accounts.api.views.systems import ( - SystemsListView, - SystemView, - SystemTestView, - SystemRolesView, - SystemKeysView -) +from portal.apps.accounts.api.views.systems import SystemKeysView app_name = 'portal_accounts_api' urlpatterns = [ - url(r'^systems/list/?$', SystemsListView.as_view()), - url(r'^systems/(?P[\w.\-\/]+)/test/?$', - SystemTestView.as_view()), url(r'^systems/(?P[\w.\-\/]+)/keys/?$', SystemKeysView.as_view()), - url(r'^systems/(?P[\w.\-\/]+)/roles/?$', - SystemRolesView.as_view()), - url(r'^systems/(?P[\w.\-\/]+)/?$', - SystemView.as_view()), ] diff --git a/server/portal/apps/accounts/api/views/systems.py b/server/portal/apps/accounts/api/views/systems.py index 719447b3c..79434075f 100644 --- a/server/portal/apps/accounts/api/views/systems.py +++ b/server/portal/apps/accounts/api/views/systems.py @@ -9,99 +9,10 @@ from django.utils.decorators import method_decorator from portal.views.base import BaseApiView from portal.apps.accounts.managers import accounts as AccountsManager -from django.conf import settings - logger = logging.getLogger(__name__) -@method_decorator(login_required, name='dispatch') -class SystemsListView(BaseApiView): - """Systems View - - Main view for anything involving multiple systems - """ - - def get(self, request): - """ GET """ - offset = int(request.GET.get('offset', 0)) - limit = int(request.GET.get('limit', 100)) - response = {} - - storage_systems = AccountsManager.storage_systems( - request.user, - offset=offset, - limit=limit - ) - - storage_systems = [system for system in storage_systems if not system.id.startswith(settings.PORTAL_PROJECTS_SYSTEM_PREFIX)] - - response['storage'] = storage_systems - - exec_systems = AccountsManager.execution_systems( - request.user, - offset=offset, - limit=limit - ) - response['execution'] = exec_systems - - return JsonResponse( - { - 'response': response, - 'status': 200 - }, - encoder=AccountsManager.agave_system_serializer_cls - ) - - -@method_decorator(login_required, name='dispatch') -class SystemView(BaseApiView): - """Systems View - - Main view for anything involving one single system - """ - - def get(self, request, system_id): - """GET""" - system = AccountsManager.get_system(request.user, system_id) - return JsonResponse( - { - 'response': system, - 'status': 200 - }, - encoder=AccountsManager.agave_system_serializer_cls - ) - - -@method_decorator(login_required, name='dispatch') -class SystemTestView(BaseApiView): - """Systems View - - Main view for anything involving a system test - """ - - def put(self, request, system_id): # pylint: disable=no-self-use - """PUT""" - success, result = AccountsManager.test_system( - request.user, system_id - ) - if success: - return JsonResponse( - { - 'response': result, - 'status': 200 - } - ) - - return JsonResponse( - { - 'response': result, - 'status': 500 - }, - status=500 - ) - - @method_decorator(login_required, name='dispatch') class SystemKeysView(BaseApiView): """Systems View @@ -117,10 +28,9 @@ def put(self, request, system_id): """ body = json.loads(request.body) action = body['action'] - op = getattr(self, action) # pylint: disable=invalid-name + op = getattr(self, action) return op(request, system_id, body) - # pylint: disable=no-self-use, unused-argument def reset(self, request, system_id, body): """Resets a system's set of keys @@ -128,7 +38,7 @@ def reset(self, request, system_id, body): :param str system_id: System id """ pub_key = AccountsManager.reset_system_keys( - request.user.username, + request.user, system_id ) return JsonResponse({ @@ -144,26 +54,18 @@ def push(self, request, system_id, body): """ AccountsManager.reset_system_keys( - request.user.username, - system_id + request.user, + system_id, + hostname=body['form']['hostname'] ) - success, result, http_status = AccountsManager.add_pub_key_to_resource( - request.user.username, + _, result, http_status = AccountsManager.add_pub_key_to_resource( + request.user, password=body['form']['password'], token=body['form']['token'], system_id=system_id, hostname=body['form']['hostname'] ) - # if success and body['form']['type'] == 'STORAGE': - # # Index the user's home directory once keys are successfully pushed. - # # Schedule indexing for 11:59:59 today. - # index_time = datetime.now().replace(hour=11, minute=59, second=59) - # agave_indexer.apply_async(args=[system_id], eta=index_time) - # return JsonResponse({ - # 'systemId': system_id, - # 'message': 'OK' - # }) return JsonResponse( { @@ -172,16 +74,3 @@ def push(self, request, system_id, body): }, status=http_status ) - - -@method_decorator(login_required, name='dispatch') -class SystemRolesView(BaseApiView): - """Systems Roles View - - View for system roles inspection - """ - - def get(self, request, system_id): - client = request.user.tapis_oauth.client - data = client.systems.listRoles(systemId=system_id) - return JsonResponse({"status": 200, "response": data}) diff --git a/server/portal/apps/accounts/api/views/unit_test.py b/server/portal/apps/accounts/api/views/unit_test.py deleted file mode 100644 index 17b6087dc..000000000 --- a/server/portal/apps/accounts/api/views/unit_test.py +++ /dev/null @@ -1,59 +0,0 @@ -from django.test import TestCase, RequestFactory, override_settings -from mock import patch, MagicMock -import pytest -from django.contrib.auth import get_user_model -from portal.apps.accounts.api.views.systems import SystemsListView -from portal.libs.agave.models.systems.storage import StorageSystem - - -@pytest.mark.django_db(transaction=True) -class TestSystemsListView(TestCase): - fixtures = ['users', 'auth'] - - @classmethod - def setUpClass(cls): - super(TestSystemsListView, cls).setUpClass() - # Mock AccountsManager class - cls.mock_AccountsManager_patcher = patch('portal.apps.accounts.api.views.systems.AccountsManager') - cls.mock_AccountsManager = cls.mock_AccountsManager_patcher.start() - - cls.mock_client = MagicMock() - cls.mock_client.systems.get.return_value = {} - - cls.rf = RequestFactory() - cls.view = SystemsListView() - - @classmethod - def tearDownClass(cls): - super(TestSystemsListView, cls).tearDownClass() - cls.mock_AccountsManager_patcher.stop() - - def setUp(self): - super(TestSystemsListView, self).setUp() - - def tearDown(self): - super(TestSystemsListView, self).tearDown() - - @override_settings(PORTAL_PROJECTS_SYSTEM_PREFIX='project.prefix') - @patch('portal.apps.accounts.api.views.systems.JsonResponse') - def test_project_filter(self, mock_JsonResponse): - # Make some mock systems to be filtered - self.mock_AccountsManager.storage_systems.return_value = [ - StorageSystem(self.mock_client, id=id) for id in [ - "project.prefix.system1", - "project.prefix.system2", - "not.a.project.system1", - "not.a.project.system2" - ] - ] - self.mock_AccountsManager.execution_systems.return_value = [] - request = self.rf.get("/api/accounts/systems/list") - request.user = get_user_model().objects.get(username="username") - self.view.get(request) - - # See what got passed to the JsonResponse object - # (serialization doesn't work on the mocked StorageSystems) - args, kwargs = mock_JsonResponse.call_args_list[0] - - # There should be two storage systems returned - self.assertEqual(len(args[0]["response"]["storage"]), 2) diff --git a/server/portal/apps/accounts/managers/accounts.py b/server/portal/apps/accounts/managers/accounts.py index b802be0bc..aab95a9f6 100644 --- a/server/portal/apps/accounts/managers/accounts.py +++ b/server/portal/apps/accounts/managers/accounts.py @@ -12,13 +12,10 @@ ChannelException, SSHException ) -from portal.utils import encryption as EncryptionUtil -from portal.libs.agave.models.systems.storage import StorageSystem -from portal.libs.agave.models.systems.execution import ExecutionSystem -from portal.libs.agave.serializers import BaseAgaveSystemSerializer +from portal.utils.encryption import createKeyPair from portal.apps.accounts.models import SSHKeys from portal.apps.accounts.managers.ssh_keys import KeyCannotBeAdded -from portal.apps.accounts.managers.user_systems import UserSystemsManager +from portal.apps.onboarding.steps.system_access_v3 import create_system_credentials logger = logging.getLogger(__name__) @@ -60,103 +57,29 @@ def _lookup_keys_manager(user, password, token): return cls(user.username, password, token) -def setup(username, system): - """Fires necessary steps for setup - - Called asynchronously from portal.apps.auth.tasks.setup_user - - As of 03/2018 a new account setup means creating a home directory - (optional), creating an Agave system for that home directory - and saving the newly created keys in the database. - The private key will be encrypted using AES. - - :param str username: Account's username to setup - - :return: home_dir - - .. note:: - The django setting `PORTAL_USER_ACCOUNT_SETUP_STEPS` can be used to - add any additional steps after the default setup. - """ - - user = check_user(username) - mgr = UserSystemsManager(user, system) - home_dir = mgr.get_private_directory(user) - systemId = mgr.get_system_id() - system = StorageSystem(user.tapis_oauth.client, id=systemId) - success, result = system.test() - if success: - logger.info( - "{username} has valid configuration for {systemId}, skipping creation".format( - username=username, systemId=systemId - ) - ) - return home_dir - mgr.setup_private_system(user) - - return home_dir - - -def reset_system_keys(username, system_id): +def reset_system_keys(user, system_id, hostname=None): """Reset system's Keys - Creates a new set of keys, saves the set of key to the DB - and updates the Agave System. - - :param str username: Username + Creates a new set of keys, saves the set of keys to the DB + and updates the Tapis System. - .. note:: - If :param:`system_id` is a home system then the Home Manager - class will be used to reset the keys. - This because there might be some specific actions to do - when managing home directories + :param user: Django User object """ - user = check_user(username) + logger.info(f"Resetting credentials for user {user.username} on system {system_id}") + (priv_key_str, publ_key_str) = createKeyPair() + create_system_credentials(user, publ_key_str, priv_key_str, system_id, skipCredentialCheck=True) - sys_dict = user.tapis_oauth.client.systems.get(systemId=system_id) - if sys_dict['type'] == StorageSystem.TYPES.STORAGE: - sys = StorageSystem.from_dict(user.tapis_oauth.client, sys_dict) - elif sys_dict['type'] == ExecutionSystem.TYPES.EXECUTION: - sys = ExecutionSystem.from_dict(user.tapis_oauth.client, sys_dict) - - private_key = EncryptionUtil.create_private_key() - priv_key_str = EncryptionUtil.export_key(private_key, 'PEM') - public_key = EncryptionUtil.create_public_key(private_key) - publ_key_str = EncryptionUtil.export_key(public_key, 'OpenSSH') - - sys.set_storage_keys( - username, - priv_key_str, - publ_key_str, - update=(sys.type == StorageSystem.TYPES.STORAGE) - ) - SSHKeys.objects.update_keys( - user, - system_id=system_id, - priv_key=priv_key_str, - pub_key=publ_key_str - ) + if hostname is None: + sys = user.tapis_oauth.client.systems.getSystem(systemId=system_id) + hostname = sys.host - # Update keys for hostname too + # Update keys for hostname SSHKeys.objects.update_hostname_keys( user, - hostname=sys.storage.host, + hostname=hostname, priv_key=priv_key_str, pub_key=publ_key_str ) - if sys.type == ExecutionSystem.TYPES.EXECUTION: - sys.set_login_keys( - username, - priv_key_str, - publ_key_str - ) - - SSHKeys.objects.update_hostname_keys( - user, - hostname=sys.login.host, - priv_key=priv_key_str, - pub_key=publ_key_str - ) return publ_key_str @@ -168,18 +91,20 @@ def queue_pub_key_setup( system_id, hostname, port=22 -): # pylint: disable=too-many-arguments +): """Queue Public Key Setup Convenient function to queue a specific celery task """ + user = check_user(username) + from portal.apps.accounts.tasks import ( setup_pub_key, monitor_setup_pub_key ) res = setup_pub_key.apply_async( kwargs={ - 'username': username, + 'user': user, 'password': password, 'token': token, 'system_id': system_id, @@ -196,43 +121,40 @@ def queue_pub_key_setup( def add_pub_key_to_resource( - username, + user, password, token, system_id, hostname=None, port=22 -): # pylint: disable=too-many-arguments +): """Add Public Key to Remote Resource - :param str username: Username + :param user: Django User object :param str password: Username's pasword to remote resource :param str token: TACC's token - :param str system_id: Agave system's id + :param str system_id: Tapis system's id :param str hostname: Resource's hostname :param int port: Port to use for ssh connection :raises: :class:`~portal.apps.accounts.managers.` """ - if hostname is None: - user = check_user(username) - sys = get_system(user, system_id) - hostname = sys.storage.host - success = True - user = check_user(username) mgr = _lookup_keys_manager(user, password, token) message = "add_pub_key_to_resource" + + logger.info(f"Adding public key for user {user.username} on system {system_id}") try: + if hostname is None: + sys = user.tapis_oauth.client.systems.getSystem(systemId=system_id) + hostname = sys.host + transport = mgr.get_transport(hostname, port) try: pub_key = user.ssh_keys.for_hostname(hostname).public except ObjectDoesNotExist: - try: - pub_key = user.ssh_keys.for_system(system_id).public - except Exception: - raise + raise message = mgr.add_public_key( system_id, hostname, @@ -270,74 +192,13 @@ def add_pub_key_to_resource( return success, message, status -def storage_systems(user, offset=0, limit=100): - """Return all storage systems for a user. - - :param user: Django user's instance - :param int offset: Offset. - :param int limit: Limit. - """ - systems = [] - res = StorageSystem.list( - user.tapis_oauth.client, - type=StorageSystem.TYPES.STORAGE, - offset=offset, - limit=limit - ) - systems = list(res) - return systems - - -def execution_systems(user, offset=0, limit=100): - """Return all execution systems for a user. - - :param user: Django user's instance - :param int offset: Offset. - :param int limit: Limit. - """ - systems = [] - res = ExecutionSystem.list( - user.tapis_oauth.client, - type=ExecutionSystem.TYPES.EXECUTION, - offset=offset, - limit=limit - ) - systems = list(res) - return systems - - def get_system(user, system_id): """Returns system - :param user: Django's user object + :param user: Django User object :param str system_id: System id :returns: System object :rtype: :class:`StorageSystem` or :class:`ExecutionSystem` """ - system = user.tapis_oauth.client.systems.get(systemId=system_id) - if system.type == StorageSystem.TYPES.STORAGE: - sys = StorageSystem.from_dict(user.tapis_oauth.client, system) - elif system.type == ExecutionSystem.TYPES.EXECUTION: - sys = ExecutionSystem.from_dict(user.tapis_oauth.client, system) - - sys.test() - return sys - - -def test_system(user, system_id): - """Test system - - :param user: Django's user object - :param str system_id: System id - :returns: message - :rtype: str - """ - system = get_system(user, system_id) - success, result = system.test() - return success, { - 'message': result, - 'systemId': system_id - } - - -agave_system_serializer_cls = BaseAgaveSystemSerializer # pylint:disable=C0103 + system = user.tapis_oauth.client.systems.getSystem(systemId=system_id) + return system diff --git a/server/portal/apps/accounts/managers/ssh_keys.py b/server/portal/apps/accounts/managers/ssh_keys.py index 214d9102b..6c09b53ce 100644 --- a/server/portal/apps/accounts/managers/ssh_keys.py +++ b/server/portal/apps/accounts/managers/ssh_keys.py @@ -59,7 +59,8 @@ def _tacc_prompt_handler( """ answers = { 'password': self.password, - 'tacc_token_code': self.token + 'tacc_token_code': self.token, + 'tacc_token': self.token } resp = [] logger.debug('title: %s', title) diff --git a/server/portal/apps/accounts/managers/unit_test.py b/server/portal/apps/accounts/managers/unit_test.py index 1d4af42fe..75292c528 100644 --- a/server/portal/apps/accounts/managers/unit_test.py +++ b/server/portal/apps/accounts/managers/unit_test.py @@ -1,13 +1,9 @@ -from django.test import ( - TestCase, - override_settings -) +from django.test import TestCase from mock import MagicMock, patch -from django.core.exceptions import ObjectDoesNotExist -from portal.apps.accounts.managers.accounts import add_pub_key_to_resource, setup +from portal.apps.accounts.managers.accounts import add_pub_key_to_resource from portal.apps.accounts.managers.ssh_keys import KeysManager +from portal.apps.accounts.models import SSHKeys from django.contrib.auth import get_user_model -from django.contrib.auth.models import User from portal.apps.accounts.managers.ssh_keys import KeyCannotBeAdded from paramiko.ssh_exception import ( AuthenticationException, @@ -17,97 +13,86 @@ import pytest -@pytest.mark.django_db(transaction=True) -class AddPubKeyTests(TestCase): +@pytest.fixture +def user_with_keys(regular_user): + SSHKeys.objects.update_hostname_keys( + regular_user, + hostname='data.tacc.utexas.edu', + priv_key='privkey', + pub_key='pubkey' + ) + yield regular_user - # Setup fresh mocks before each test - def setUp(self): - # Patch _lookup_keys_manager function to return a mocked KeysManager class - self.patch_lookup_keys_manager = patch( - 'portal.apps.accounts.managers.accounts._lookup_keys_manager', - return_value=MagicMock( - spec=KeysManager - ) - ) - self.mock_lookup_keys_manager = self.patch_lookup_keys_manager.start() - - # Patch check_user to return a mocked User model - self.patch_check_user = patch('portal.apps.accounts.managers.accounts.check_user', - return_value=MagicMock( - spec=User - ) - ) - self.mock_check_user = self.patch_check_user.start() - - # Teardown mocks after each test - self.addCleanup(self.patch_lookup_keys_manager.stop) - self.addCleanup(self.patch_check_user.stop) - - def run_add_pub_key_to_resource(self): - username = "testuser" - password = "testpassword" - token = "123456" - system_id = "portal-home.testuser" - hostname = "data.tacc.utexas.edu" - return add_pub_key_to_resource(username, password, token, system_id, hostname) - - # Patch KeysManager.ssh_keys.for_system function to throw Exception. - # In reality, accessing ssh_keys attribute would throw RelatedOjbectDoesNotExist, - # but this is a dynamically created Exception class that is difficult to simulate/catch - # - # Exception occurs when ssh_keys are not tracked by user model and the user must reset keys - def test_user_model_exception(self): - self.mock_check_user.return_value.ssh_keys = MagicMock( - for_system=MagicMock( - side_effect=ObjectDoesNotExist("Mock Exception while setting pub_key") - ), - for_hostname=MagicMock( - side_effect=ObjectDoesNotExist("Mock Exception while setting pub_key") - ) + +@pytest.fixture +def mock_lookup_keys_manager(mocker): + yield mocker.patch( + 'portal.apps.accounts.managers.accounts._lookup_keys_manager', + return_value=MagicMock( + spec=KeysManager ) - result, message, status = self.run_add_pub_key_to_resource() - self.assertFalse(result) - self.assertEqual(status, 409) - - # Patch returned KeysManager mock so that its add_public_key function throws the desired exception - # - # AuthenticationException occurs with bad password/token when trying to push keys - def test_authentication_exception(self): - self.mock_lookup_keys_manager.return_value.add_public_key = MagicMock(side_effect=AuthenticationException()) - result, message, status = self.run_add_pub_key_to_resource() - self.assertFalse(result) - self.assertEqual(status, 403) - - # Channel exception occurs when server is reachable but returns an error while paramiko is attempting to open a channel - def test_channel_exception(self): - self.mock_lookup_keys_manager.return_value.add_public_key = MagicMock(side_effect=ChannelException(999, "Mock Channel Exception")) - result, message, status = self.run_add_pub_key_to_resource() - self.assertFalse(result) - self.assertEqual(status, 502) - - # SSHException occurs when paramiko is unable to open SSH connection to server - def test_ssh_exception(self): - self.mock_lookup_keys_manager.return_value.add_public_key = MagicMock(side_effect=SSHException()) - result, message, status = self.run_add_pub_key_to_resource() - self.assertFalse(result) - self.assertEqual(status, 502) - - # KeyCannotBeAdded exception occurs when authorized_keys file cannot be modified - def test_KeyCannotBeAdded_exception(self): - self.mock_lookup_keys_manager.return_value.add_public_key = MagicMock( - side_effect=KeyCannotBeAdded("MockKeyCannotBeAdded", "MockOutput", "MockErrorOutput")) - result, message, status = self.run_add_pub_key_to_resource() - self.assertFalse(result) - self.assertEqual(status, 503) - - # Catch all for unknown exception types - def test_unknown_exception(self): - exception_message = "Mock unknown exception" - self.mock_lookup_keys_manager.return_value.add_public_key = MagicMock(side_effect=Exception(exception_message)) - try: - result, message, status = self.run_add_pub_key_to_resource() - except Exception as exc: - self.assertEqual(str(exc), exception_message) + ) + + +def _run_add_pub_key_to_resource(user): + password = "testpassword" + token = "123456" + system_id = "portal-home.testuser" + hostname = "data.tacc.utexas.edu" + return add_pub_key_to_resource(user, password, token, system_id, hostname) + + +# AuthenticationException occurs with bad password/token when trying to push keys +def test_authentication_exception(user_with_keys, mock_lookup_keys_manager): + mock_lookup_keys_manager.return_value.add_public_key = MagicMock(side_effect=AuthenticationException("Authentication failed.")) + result, message, status = _run_add_pub_key_to_resource(user_with_keys) + assert result is False + assert status == 403 + assert message == "Authentication failed." + + +# Channel exception occurs when server is reachable but returns an error while paramiko is attempting to open a channel +def test_channel_exception(user_with_keys, mock_lookup_keys_manager): + mock_lookup_keys_manager.return_value.add_public_key = MagicMock(side_effect=ChannelException(999, "Mock Channel Exception")) + result, message, status = _run_add_pub_key_to_resource(user_with_keys) + assert result is False + assert status == 502 + + +# SSHException occurs when paramiko is unable to open SSH connection to server +def test_ssh_exception(user_with_keys, mock_lookup_keys_manager): + mock_lookup_keys_manager.return_value.add_public_key = MagicMock(side_effect=SSHException()) + result, message, status = _run_add_pub_key_to_resource(user_with_keys) + assert result is False + assert status == 502 + + +# KeyCannotBeAdded exception occurs when authorized_keys file cannot be modified +def test_KeyCannotBeAdded_exception(user_with_keys, mock_lookup_keys_manager): + mock_lookup_keys_manager.return_value.add_public_key = MagicMock( + side_effect=KeyCannotBeAdded("MockKeyCannotBeAdded", "MockOutput", "MockErrorOutput")) + result, message, status = _run_add_pub_key_to_resource(user_with_keys) + assert result is False + assert status == 503 + assert message == "KeyCannotBeAdded" + + +# Catch all for unknown exception types +def test_unknown_exception(user_with_keys, mock_lookup_keys_manager): + exception_message = "Mock unknown exception" + mock_lookup_keys_manager.return_value.add_public_key = MagicMock(side_effect=Exception(exception_message)) + try: + result, message, status = _run_add_pub_key_to_resource(user_with_keys) + except Exception as exc: + assert str(exc) == exception_message + + +# Exception occurs when ssh_keys are not tracked by user model and the user must reset keys +def test_user_model_exception(regular_user, mock_lookup_keys_manager): + result, message, status = _run_add_pub_key_to_resource(regular_user) + assert result is False + assert status == 409 + assert message == "User has no ssh_keys." @pytest.mark.django_db(transaction=True) @@ -139,17 +124,3 @@ def setUp(self): self.addCleanup(self.mock_systems_manager_patcher.stop) self.addCleanup(self.mock_storage_system_patcher.stop) self.addCleanup(self.mock_client_patcher.stop) - - @override_settings(PORTAL_USER_ACCOUNT_SETUP_STEPS=[]) - def test_setup_no_preexisting(self): - self.mock_storage_system.return_value.test.return_value = (False, None) - setup("username", "system") - self.mock_systems_manager.return_value.get_private_directory.assert_called_with(self.mock_user) - self.mock_systems_manager.return_value.setup_private_system.assert_called_with(self.mock_user) - - @override_settings(PORTAL_USER_ACCOUNT_SETUP_STEPS=[]) - def test_setup_preexisting(self): - self.mock_storage_system.return_value.test.return_value = (True, None) - setup("username", "system") - self.mock_systems_manager.return_value.get_private_directory.assert_called_with(self.mock_user) - self.mock_systems_manager.return_value.setup_private_system.assert_not_called() diff --git a/server/portal/apps/accounts/tasks/ssh.py b/server/portal/apps/accounts/tasks/ssh.py index 032f089ff..b595c6512 100644 --- a/server/portal/apps/accounts/tasks/ssh.py +++ b/server/portal/apps/accounts/tasks/ssh.py @@ -18,7 +18,7 @@ track_started=True ) # pylint: disable=too-many-arguments def setup_pub_key( - username, + user, password, token, system_id, @@ -28,7 +28,7 @@ def setup_pub_key( """Setup public keys for user""" from portal.apps.accounts.managers import accounts as AccountsManager output = AccountsManager.add_pub_key_to_resource( - username, + user, password, token, system_id, diff --git a/server/portal/apps/auth/tasks.py b/server/portal/apps/auth/tasks.py index 133eb5171..f28317570 100644 --- a/server/portal/apps/auth/tasks.py +++ b/server/portal/apps/auth/tasks.py @@ -1,4 +1,3 @@ -from celery import shared_task from portal.apps.users.utils import get_allocations, get_tas_allocations from django.conf import settings import logging @@ -6,19 +5,6 @@ logger = logging.getLogger(__name__) -@shared_task(bind=True, max_retries=None) -def setup_user(self, username, system): - """Setup workflow for each user - - Called asynchronously from portal.apps.auth.views.tapis_oauth_callback - :param str username: string username to setup systems for - :param dict systems: dict of systems from settings - """ - from portal.apps.accounts.managers.accounts import setup - logger.info(f"Setup task for {username} launched on {system}") - setup(username, system) - - def get_user_storage_systems(username, systems): """Create list of accessible storage system names for a user diff --git a/server/portal/apps/datafiles/views.py b/server/portal/apps/datafiles/views.py index 55c00b7a7..3b402612d 100644 --- a/server/portal/apps/datafiles/views.py +++ b/server/portal/apps/datafiles/views.py @@ -35,6 +35,7 @@ def get(self, request): # compare available storage systems to the systems a user can access response = {'system_list': []} if request.user.is_authenticated: + # TODOv3: simplify this if local_systems: user_systems = get_user_storage_systems(request.user.username, local_systems) for system_name, details in user_systems.items(): @@ -51,6 +52,7 @@ def get(self, request): ) default_system = user_systems[settings.PORTAL_DATA_DEPOT_LOCAL_STORAGE_SYSTEM_DEFAULT] response['default_host'] = default_system['host'] + response['default_system'] = default_system['systemId'] if portal_systems: response['system_list'] += portal_systems return JsonResponse(response) diff --git a/server/portal/apps/datafiles/views_unit_test.py b/server/portal/apps/datafiles/views_unit_test.py index f13096d99..7da57286a 100644 --- a/server/portal/apps/datafiles/views_unit_test.py +++ b/server/portal/apps/datafiles/views_unit_test.py @@ -6,6 +6,7 @@ from requests.exceptions import HTTPError from portal.apps.datafiles.models import Link from django.conf import settings +from tapipy.tapis import TapisResult pytestmark = pytest.mark.django_db @@ -184,11 +185,31 @@ def logging_metric_mock(mocker): def test_tapis_file_view_get_is_logged_for_metrics(client, authenticated_user, mock_tapis_client, - agave_file_listing_mock, agave_listing_indexer, logging_metric_mock): - mock_tapis_client.files.list.return_value = agave_file_listing_mock + tapis_file_listing_mock, logging_metric_mock): + tapis_listing_result = [TapisResult(**f) for f in tapis_file_listing_mock] + mock_tapis_client.files.listFiles.return_value = tapis_listing_result response = client.get("/api/datafiles/tapis/listing/private/frontera.home.username/test.txt/?length=1234") assert response.status_code == 200 - assert response.json() == {"data": {"listing": agave_file_listing_mock, "reachedEnd": True}} + assert response.json() == { + "data": { + "listing": [ + { + 'system': 'frontera.home.username', + 'type': 'dir' if f.type == 'dir' else 'file', + 'format': 'folder' if f.type == 'dir' else 'raw', + 'mimeType': f.mimeType, + 'path': f.path, + 'name': f.name, + 'length': f.size, + 'lastModified': f.lastModified, + '_links': { + 'self': {'href': f.url} + } + } for f in tapis_listing_result + ], + "reachedEnd": True + } + } # Ensure metric-related logging is being performed logging_metric_mock.assert_called_with( @@ -213,7 +234,7 @@ def test_tapis_file_view_put_is_logged_for_metrics(client, authenticated_user, m def test_tapis_file_view_post_is_logged_for_metrics(client, authenticated_user, mock_tapis_client, - agave_indexer, agave_listing_indexer, logging_metric_mock, + agave_indexer, logging_metric_mock, agave_file_mock, text_file_fixture): mock_tapis_client.files.importData.return_value = agave_file_mock response = client.post("/api/datafiles/tapis/upload/private/frontera.home.username/", @@ -304,6 +325,7 @@ def test_systems_list(client, authenticated_user, mocker, get_user_data): response = client.get('/api/datafiles/systems/list/') assert response.json() == { "default_host": "frontera.tacc.utexas.edu", + "default_system": "frontera.home.{username}", "system_list": [ { "name": "My Data (Frontera)", diff --git a/server/portal/apps/onboarding/steps/system_access_v3.py b/server/portal/apps/onboarding/steps/system_access_v3.py index 72c204ec7..38e189cf0 100644 --- a/server/portal/apps/onboarding/steps/system_access_v3.py +++ b/server/portal/apps/onboarding/steps/system_access_v3.py @@ -4,21 +4,46 @@ from portal.apps.onboarding.steps.abstract import AbstractStep from portal.apps.onboarding.state import SetupState from django.conf import settings -from portal.utils.encryption import create_private_key, create_public_key, export_key +from portal.utils.encryption import createKeyPair +from portal.libs.agave.utils import service_account from tapipy.errors import BaseTapyException -def createKeyPair(): - private_key = create_private_key() - priv_key_str = export_key(private_key, 'PEM') - public_key = create_public_key(private_key) - publ_key_str = export_key(public_key, 'OpenSSH') +logger = logging.getLogger(__name__) - return priv_key_str, publ_key_str + +def create_system_credentials(user, public_key, private_key, system_id, skipCredentialCheck=False) -> int: + """ + Set an RSA key pair as the user's auth credential on a Tapis system. + """ + logger.info(f"Creating user credential for {user.username} on Tapis system {system_id}") + data = {'privateKey': private_key, 'publicKey': public_key} + client = user.tapis_oauth.client + client.systems.createUserCredential( + systemId=system_id, + userName=user.username, + skipCredentialCheck=skipCredentialCheck, + **data + ) + + +def set_user_permissions(user, system_id): + """Apply read/write/execute permissions to a user on a system.""" + logger.info(f"Adding {user.username} permissions to Tapis system {system_id}") + client = service_account() + client.systems.grantUserPerms( + systemId=system_id, + userName=user.username, + permissions=['READ', 'MODIFY', 'EXECUTE']) + client.files.grantPermissions( + systemId=system_id, + path="/", + username=user.username, + permission='MODIFY' + ) class SystemAccessStepV3(AbstractStep): - logger = logging.getLogger(__name__) def __init__(self, user): """ @@ -47,41 +72,45 @@ def register_public_key(self, publicKey, system_id) -> int: response.raise_for_status return response.status_code - def push_system_credentials(self, public_key, private_key, system_id) -> int: - """ - Set an RSA key pair as the user's auth credential on a Tapis system. - """ - data = {'privateKey': private_key, 'publicKey': public_key} - self.user.tapis_oauth.client.systems.createUserCredential( - systemId=system_id, - userName=self.user.username, - **data - ) - def check_system(self, system_id) -> None: """ Check whether a user already has access to a storage system by attempting a listing. """ self.user.tapis_oauth.client.files.listFiles(systemId=system_id, path="/") - def generate_and_push_credentials(self, system_id): - (priv, pub) = createKeyPair() - try: - self.register_public_key(pub, system_id) - self.push_system_credentials(pub, priv, system_id) - self.log(f"Access granted for system: {system_id}") - except (HTTPError, BaseTapyException) as e: - self.logger.error(e) - self.fail(f"Failed to push credentials to system: {system_id}") - def process(self): - self.log("Processing system access for user") - for system in self.settings.get('tapis_systems') or []: + self.log(f"Processing system access for user {self.user.username}") + for system in self.settings.get('access_systems') or []: + try: + set_user_permissions(self.user, system) + self.log(f"Successfully granted permissions for system: {system}") + except BaseTapyException as e: + logger.error(e) + self.fail(f"Failed to grant permissions for system: {system}") + + for system in self.settings.get('credentials_systems') or []: try: self.check_system(system) - self.log(f"Access already granted for system: {system}") + self.log(f"Credentials already created for system: {system}") + continue except BaseTapyException: - self.generate_and_push_credentials(system) + self.log(f"Creating credentials for system: {system}") + + (priv, pub) = createKeyPair() + + try: + self.register_public_key(pub, system) + self.log(f"Successfully registered public key for system: {system}") + except HTTPError as e: + logger.error(e) + self.fail(f"Failed to register public key with key service for system: {system}") + + try: + create_system_credentials(self.user, pub, priv, system) + self.log(f"Successfully created credentials for system: {system}") + except BaseTapyException as e: + logger.error(e) + self.fail(f"Failed to create credentials for system: {system}") if self.state != SetupState.FAILED: self.complete("User is processed.") diff --git a/server/portal/apps/onboarding/steps/system_creation.py b/server/portal/apps/onboarding/steps/system_creation.py deleted file mode 100644 index c57c390a3..000000000 --- a/server/portal/apps/onboarding/steps/system_creation.py +++ /dev/null @@ -1,37 +0,0 @@ -from portal.apps.onboarding.steps.abstract import AbstractStep -from portal.apps.onboarding.state import SetupState -from portal.apps.search.tasks import index_allocations -from portal.apps.auth.tasks import setup_user, get_user_storage_systems -from django.conf import settings -import logging - - -class SystemCreationStep(AbstractStep): - logger = logging.getLogger(__name__) - - def __init__(self, user): - """ - Call super class constructor - """ - super(SystemCreationStep, self).__init__(user) - - def display_name(self): - return "Storage" - - def description(self): - return """Setting up access to data files on the storage systems. If unsuccessful, please submit a ticket via our Get Help page.""" - - def prepare(self): - self.state = SetupState.PENDING - self.log("Awaiting storage system creation") - - def process(self): - self.log("Setting up storage systems") - index_allocations(self.user.username) - system_names = get_user_storage_systems( - self.user.username, settings.PORTAL_DATA_DEPOT_LOCAL_STORAGE_SYSTEMS - ) - for system in system_names: - self.log("Setting up system {}".format(system)) - setup_user(self.user.username, system) - self.complete("Finished setting up storage systems") diff --git a/server/portal/apps/onboarding/steps/system_creation_unit_test.py b/server/portal/apps/onboarding/steps/system_creation_unit_test.py deleted file mode 100644 index aca60d2a5..000000000 --- a/server/portal/apps/onboarding/steps/system_creation_unit_test.py +++ /dev/null @@ -1,38 +0,0 @@ -from portal.apps.onboarding.steps.system_creation import SystemCreationStep -from mock import call -import pytest - - -@pytest.fixture(autouse=True) -def mock_get_user_storage_systems(mocker, settings): - mock = mocker.patch('portal.apps.onboarding.steps.system_creation.get_user_storage_systems') - mock.return_value = settings.PORTAL_DATA_DEPOT_LOCAL_STORAGE_SYSTEMS - yield mock - - -@pytest.fixture(autouse=True) -def mock_index_allocations(mocker): - yield mocker.patch('portal.apps.onboarding.steps.system_creation.index_allocations') - - -@pytest.fixture(autouse=True) -def mock_log(mocker): - yield mocker.patch.object(SystemCreationStep, 'log') - - -@pytest.fixture -def mock_setup_user(mocker): - yield mocker.patch('portal.apps.onboarding.steps.system_creation.setup_user') - - -@pytest.fixture -def mock_complete(mocker): - yield mocker.patch.object(SystemCreationStep, 'complete') - - -def test_process(mock_setup_user, regular_user): - step = SystemCreationStep(regular_user) - step.process() - frontera_call = call("username", "frontera") - longhorn_call = call("username", "longhorn") - mock_setup_user.assert_has_calls([frontera_call, longhorn_call], any_order=True) diff --git a/server/portal/apps/workspace/api/tests.py b/server/portal/apps/workspace/api/tests.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/server/portal/apps/workspace/api/views.py b/server/portal/apps/workspace/api/views.py index 25090c3db..0989132a0 100644 --- a/server/portal/apps/workspace/api/views.py +++ b/server/portal/apps/workspace/api/views.py @@ -4,23 +4,22 @@ """ import logging import json -from urllib.parse import urlparse -from django.utils import timezone from django.http import JsonResponse from django.conf import settings from django.contrib.auth.decorators import login_required from django.utils.decorators import method_decorator # from django.urls import reverse # TODOv3 from django.db.models.functions import Coalesce +from django.core.exceptions import ObjectDoesNotExist +from tapipy.errors import BaseTapyException, InternalServerError from portal.views.base import BaseApiView from portal.exceptions.api import ApiException from portal.apps.licenses.models import LICENSE_TYPES, get_license_info from portal.libs.agave.utils import service_account from portal.libs.agave.serializers import BaseTapisResultSerializer -from portal.apps.workspace.managers.user_applications import UserApplicationsManager # TODOv3 -# from portal.utils.translations import url_parse_inputs # TODOv3 from portal.apps.accounts.managers.user_systems import UserSystemsManager from portal.apps.workspace.models import AppTrayCategory, AppTrayEntry +from portal.apps.onboarding.steps.system_access_v3 import create_system_credentials from .handlers.tapis_handlers import tapis_get_handler logger = logging.getLogger(__name__) @@ -64,6 +63,28 @@ def _get_app(app_id, app_version, user): return data +def _test_listing_with_existing_keypair(system, user): + # TODOv3: Add Tapis system test utility method with proper error handling + tapis = user.tapis_oauth.client + + # Check for existing keypair stored for this hostname + try: + keys = user.ssh_keys.for_hostname(hostname=system.host) + priv_key_str = keys.private_key() + publ_key_str = keys.public + except ObjectDoesNotExist: + return False + + # Attempt listing a second time after credentials are added to system + try: + create_system_credentials(user, publ_key_str, priv_key_str, system.id) + tapis.files.listFiles(systemId=system.id, path="/") + except BaseTapyException: + return False + + return True + + @method_decorator(login_required, name='dispatch') class AppsView(BaseApiView): def get(self, request, *args, **kwargs): @@ -74,18 +95,21 @@ def get(self, request, *args, **kwargs): METRICS.debug("user:{} is requesting app id:{} version:{}".format(request.user.username, app_id, app_version)) data = _get_app(app_id, app_version, request.user) - # TODOv3: Test user default storage system (for archiving) https://jira.tacc.utexas.edu/browse/TV3-94 - data['systemHasKeys'] = True - # if settings.PORTAL_DATA_DEPOT_LOCAL_STORAGE_SYSTEMS: - # # check if default system needs keys pushed - # default_sys = UserSystemsManager( - # request.user, - # settings.PORTAL_DATA_DEPOT_LOCAL_STORAGE_SYSTEM_DEFAULT - # ) - # storage_sys = StorageSystem(tapis, default_sys.get_system_id()) - # success, _ = storage_sys.test() - # data['systemHasKeys'] = success - # data['pushKeysSystem'] = storage_sys.to_dict() + if settings.PORTAL_DATA_DEPOT_LOCAL_STORAGE_SYSTEMS and settings.PORTAL_DATA_DEPOT_LOCAL_STORAGE_SYSTEM_DEFAULT: + # check if default system needs keys pushed + default_sys = UserSystemsManager( + request.user, + settings.PORTAL_DATA_DEPOT_LOCAL_STORAGE_SYSTEM_DEFAULT + ) + system_id = default_sys.get_system_id() + system_def = tapis.systems.getSystem(systemId=system_id) + + try: + tapis.files.listFiles(systemId=system_id, path="/") + except InternalServerError: + success = _test_listing_with_existing_keypair(system_def, request.user) + data['systemNeedsKeys'] = not success + data['pushKeysSystem'] = system_def else: METRICS.debug("user:{} is requesting all apps".format(request.user.username)) data = {'appListing': tapis.apps.getApps()} @@ -119,7 +143,8 @@ def get(self, request, *args, **kwargs): limit=limit, startAfter=offset, orderBy='lastUpdated(desc),name(asc)', - _tapis_query_parameters={'tags.contains': portal_name} + _tapis_query_parameters={'tags.contains': portal_name}, + select='allAttributes' ) return JsonResponse( @@ -173,42 +198,19 @@ def post(self, request, *args, **kwargs): ) # submit job elif job_post: - METRICS.info("user:{} is submitting job:{}".format(request.user.username, job_post)) - default_sys = UserSystemsManager( - request.user, - settings.PORTAL_DATA_DEPOT_LOCAL_STORAGE_SYSTEM_DEFAULT - ) - if True: # TODOv3 ignoring archiving for the moment (https://jira.tacc.utexas.edu/browse/TV3-94) - if job_post.get('archiveSystemDir'): - del job_post['archiveSystemDir'] - if job_post.get('archiveOnAppError'): - job_post['archiveOnAppError'] = False - if job_post.get('archive'): - del job_post['archive'] - # TODOv3 check if cleaning is still needed below (maybe better to do on frontend?) - # cleaning archive path value - elif job_post.get('archiveSystemDir'): - parsed = urlparse(job_post['archiveSystemDir']) - if parsed.path.startswith('/') and len(parsed.path) > 1: - # strip leading '/' - archive_path = parsed.path[1:] - elif parsed.path == '': - # if path is blank, set to root of system - archive_path = '/' - else: - archive_path = parsed.path - - job_post['archiveSystemDir'] = archive_path - - if parsed.netloc: - job_post['archiveSystemId'] = parsed.netloc - else: - job_post['archiveSystemId'] = default_sys.get_system_id() - else: - job_post['archiveSystemDir'] = \ - 'archive/jobs/{}/${{JOB_NAME}}-${{JOB_ID}}'.format( - timezone.now().strftime('%Y-%m-%d')) + METRICS.info("processing job submission for user:{}: {}".format(request.user.username, job_post)) + + # TODOv3: How do we know if portal has HOME vs WORK? + # cleaning archive path value + if not job_post.get('archiveSystemId'): + # TODOv3: Do away with UserSystemsManager + default_sys = UserSystemsManager( + request.user, + settings.PORTAL_DATA_DEPOT_LOCAL_STORAGE_SYSTEM_DEFAULT + ) job_post['archiveSystemId'] = default_sys.get_system_id() + if not job_post.get('archiveSystemDir'): + job_post['archiveSystemDir'] = 'HOST_EVAL($HOME)/tapis-jobs-archive/${{JobCreateDate}}/${{JobName}}-${{JobUUID}}' # check for running licensed apps lic_type = job_post['licenseType'] if 'licenseType' in job_post else None @@ -226,19 +228,22 @@ def post(self, request, *args, **kwargs): job_post['parameterSet']['envVariables'] = [license_var] del job_post['licenseType'] - # TODOv3 need to check if execution system needs keys (https://jira.tacc.utexas.edu/browse/TV3-94) - # Get or create application based on allocation and execution system - apps_mgr = UserApplicationsManager(request.user) - print(apps_mgr) # TODOv3 testing workaround (to avoid flake8 error) - # app = apps_mgr.get_or_create_app(job_post['appId'], job_post['allocation']) - - # TODOv3 need to check if execution system needs keys (https://jira.tacc.utexas.edu/browse/TV3-94) - # code: UserApplicationsManager get_or_create_app) - # if app.exec_sys: - # return JsonResponse({"response": {"execSys": app.exec_sys.to_dict()}}) - - if 'parameterSet' not in job_post: - job_post['parameterSet'] = {} + # Test file listing on relevant systems to determine whether keys need to be pushed manually + for system_id in list(set([job_post['archiveSystemId'], job_post['execSystemId']])): + try: + tapis.files.listFiles(systemId=system_id, path="/") + except InternalServerError: + system_def = tapis.systems.getSystem(systemId=system_id) + success = _test_listing_with_existing_keypair(system_def, request.user) + if not success: + logger.info(f"Keys for user {request.user.username} must be manually pushed to system: {system_id}") + return JsonResponse( + { + 'status': 200, + 'response': {"execSys": system_def}, + }, + encoder=BaseTapisResultSerializer + ) if settings.DEBUG: wh_base_url = settings.WH_BASE_URL + '/webhooks/' @@ -258,9 +263,15 @@ def post(self, request, *args, **kwargs): # 'event': e} # for e in settings.PORTAL_JOB_NOTIFICATION_STATES] + logger.info("user:{} is submitting job:{}".format(request.user.username, job_post)) response = tapis.jobs.submitJob(**job_post) - - return JsonResponse({"response": response}, encoder=BaseTapisResultSerializer) + return JsonResponse( + { + 'status': 200, + 'response': response, + }, + encoder=BaseTapisResultSerializer + ) @method_decorator(login_required, name='dispatch') @@ -312,8 +323,7 @@ def get(self, request, job_uuid): class AppsTrayView(BaseApiView): def getPrivateApps(self, user): tapis = user.tapis_oauth.client - # TODOv3: make sure to exclude public apps - apps_listing = tapis.apps.getApps(select="version,id,notes", search=f"(owner.eq.{user.username})~(enabled.eq.true)") + apps_listing = tapis.apps.getApps(select="version,id,notes", search="(enabled.eq.true)", listType="MINE") my_apps = list(map(lambda app: { "label": getattr(app.notes, 'label', app.id), "version": app.version, @@ -324,21 +334,26 @@ def getPrivateApps(self, user): return my_apps def getPublicApps(self, user): - # TODOv3: make tapipy request for public apps to compare against apps in AppTrayEntry + tapis = user.tapis_oauth.client + apps_listing = tapis.apps.getApps(select="version,id,notes", search="(enabled.eq.true)", listType="SHARED_PUBLIC") categories = [] html_definitions = {} # Traverse category records in descending priority for category in AppTrayCategory.objects.all().order_by('-priority'): - # Retrieve all apps known to the portal in that directory + # Retrieve all apps known to the portal in that category tapis_apps = list(AppTrayEntry.objects.all().filter(available=True, category=category, appType='tapis') .order_by(Coalesce('label', 'appId')).values('appId', 'appType', 'html', 'icon', 'label', 'version')) + + # Only return Tapis apps that are known to exist and are enabled + tapis_apps = [x for x in tapis_apps if any(x['appId'] in [y.id, f'{y.id}-{y.version}'] for y in apps_listing)] + html_apps = list(AppTrayEntry.objects.all().filter(available=True, category=category, appType='html') .order_by(Coalesce('label', 'appId')).values('appId', 'appType', 'html', 'icon', 'label', 'version')) categoryResult = { "title": category.category, - "apps": tapis_apps + "apps": [{k: v for k, v in tapis_app.items() if v != ''} for tapis_app in tapis_apps] # Remove empty strings from response } # Add html apps to html_definitions diff --git a/server/portal/apps/workspace/api/views_unit_test.py b/server/portal/apps/workspace/api/views_unit_test.py index 92d0bcf10..dca87bae8 100644 --- a/server/portal/apps/workspace/api/views_unit_test.py +++ b/server/portal/apps/workspace/api/views_unit_test.py @@ -1,4 +1,3 @@ -from mock import MagicMock from django.conf import settings from portal.apps.workspace.api.views import JobsView, AppsTrayView from portal.apps.workspace.models import AppTrayCategory @@ -22,19 +21,6 @@ def get_user_data(mocker): yield mock -@pytest.fixture -def apps_manager(mocker): - mock_apps_manager = mocker.patch( - 'portal.apps.workspace.api.views.UserApplicationsManager' - ) - # Patch the User Applications Manager to return a fake cloned app - mock_app = MagicMock() - mock_app.id = "mock_app" - mock_app.exec_sys = False - mock_apps_manager.return_value.get_or_create_app.return_value = mock_app - yield mock_apps_manager - - @pytest.fixture def job_submmission_definition(): with open(os.path.join(settings.BASE_DIR, 'fixtures', 'job-submission.json')) as f: @@ -48,7 +34,7 @@ def logging_metric_mock(mocker): @pytest.mark.skip(reason="job post not implemented yet") def test_job_post(client, authenticated_user, get_user_data, mock_tapis_client, - apps_manager, job_submmission_definition): + job_submmission_definition): mock_tapis_client.jobs.resubmitJob.return_value = TapisResult(**{ 'uuid': '1234', }) @@ -63,7 +49,7 @@ def test_job_post(client, authenticated_user, get_user_data, mock_tapis_client, def test_job_post_cancel(client, authenticated_user, get_user_data, mock_tapis_client, - apps_manager, job_submmission_definition): + job_submmission_definition): mock_tapis_client.jobs.cancelJob.return_value = TapisResult(**{ 'uuid': '1234', }) @@ -78,7 +64,7 @@ def test_job_post_cancel(client, authenticated_user, get_user_data, mock_tapis_c def test_job_post_resubmit(client, authenticated_user, get_user_data, mock_tapis_client, - apps_manager, job_submmission_definition): + job_submmission_definition): mock_tapis_client.jobs.resubmitJob.return_value = TapisResult(**{ 'uuid': '1234', }) @@ -93,7 +79,7 @@ def test_job_post_resubmit(client, authenticated_user, get_user_data, mock_tapis def test_job_post_invalid(client, authenticated_user, get_user_data, mock_tapis_client, - apps_manager, job_submmission_definition): + job_submmission_definition): response = client.post( "/api/workspace/jobs", data=json.dumps({"action": "invalid action", "job_uuid": "1234"}), @@ -104,17 +90,28 @@ def test_job_post_invalid(client, authenticated_user, get_user_data, mock_tapis_ def test_job_post_is_logged_for_metrics(client, authenticated_user, get_user_data, mock_tapis_client, - apps_manager, job_submmission_definition, logging_metric_mock): - mock_tapis_client.jobs.submit.return_value = {"id": "1234"} + job_submmission_definition, logging_metric_mock): + mock_tapis_client.jobs.submitJob.return_value = {"id": "1234"} + mock_tapis_client.files.listFiles.return_value = {"path": ""} client.post( "/api/workspace/jobs", data=json.dumps(job_submmission_definition), content_type="application/json" ) + + tapis_job_submission = { + **job_submmission_definition, + 'archiveSystemId': 'frontera.home.username', + 'archiveSystemDir': 'HOST_EVAL($HOME)/tapis-jobs-archive/${{JobCreateDate}}/${{JobName}}-${{JobUUID}}', + 'tags': ['test'] + } + + tapis_job_submission['parameterSet']['envVariables'] = [{'key': '_webhook_base_url', 'value': 'http://testserver/webhooks/'}] + # Ensure metric-related logging is being performed logging_metric_mock.assert_called_with("user:{} is submitting job:{}".format(authenticated_user.username, - job_submmission_definition)) + tapis_job_submission)) def request_jobs_util(rf, authenticated_user, query_params={}): diff --git a/server/portal/apps/workspace/managers/__init__.py b/server/portal/apps/workspace/managers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/server/portal/apps/workspace/managers/base.py b/server/portal/apps/workspace/managers/base.py deleted file mode 100644 index 337e32fa8..000000000 --- a/server/portal/apps/workspace/managers/base.py +++ /dev/null @@ -1,218 +0,0 @@ -""" -.. module: apps.data_depot.managers.base - :synopsis: Abstract classes to build Data Depot file managers. -""" -import logging -from abc import ABCMeta, abstractmethod, abstractproperty -from six import add_metaclass - -logger = logging.getLogger(__name__) - - -@add_metaclass(ABCMeta) -class AbstractWorkspaceManager: - """Abstract class describing a Manager describing the basic functionality for - various resources needed to manage apps, e.g. Jobs, Monitors, Metadata, etc... - - .. rubric:: Rationale - - The *Workspace** should be the one place to go when a user needs to execute an - application within the portal. These applications might live in different - places and might be executed in different ways. These managers attempt to - standardize this by creating a small abstraction layer. - """ - - def __init__(self, request, **kwargs): - """Inspect the request object to initialize manager. - - :param request: Django request object. - """ - try: - self._ac = request.user.tapis_oauth.client - self.username = request.user.username - except AttributeError: - self._ac = None - self.username = 'AnonymousUser' - - @abstractproperty - def requires_auth(self): - """Check if we should check for auth""" - return True - - @abstractmethod - def get(self, *args, **kwargs): - """Get single object instance""" - return NotImplemented - - @abstractmethod - def list(self, *args, **kargs): - """Get list of objects""" - return NotImplemented - - -@add_metaclass(ABCMeta) -class AbstractApplicationsManager: - """Abstract class describing a Manager for a user's cloned applications - and cloned execution systems. - """ - - def __init__( - self, - user, - *args, - **kwargs): # pylint: disable=unused-argument - """Initialize Manager - - :param user: Django user instance - """ - self.user = user - self.client = self.user.tapis_oauth.client - - @abstractmethod - def get_clone_system_id(self, *args, **kwargs): - """Gets system id to deploy cloned app materials to. - - *System Id* is a string, unique id for each system. - This function returns the system id for a user's home system. - - :returns: System unique id - :rtype: str - """ - return NotImplemented - - @abstractmethod - def get_application(self, *args, **kwargs): - """Gets an application - - :param str appId: Unique id of the application - - :returns: Application instance - :rtype: class Application - """ - return NotImplemented - - @abstractmethod - def check_app_for_updates(self, *args, **kwargs): - """Checks cloned app for updates against host app by comparing the revision - of the host app to the 'cloneRevision' tag inserted into the cloned apps tags. - - :param cloned_app: Application instance of the cloned application - :param host_app_id: Agave id of the host application - :host_app: Application instance of the host application - - :returns: update_required - :rtype: bool - """ - return NotImplemented - - @abstractmethod - def clone_application(self, *args, **kwargs): - """Clones an application given a host app, allocation, and target name. - - ..note: checks if cloned Execution System already exists for user, - and creates it if not. - - :param str allocation: Project allocation - :param str cloned_app_name: Name of application clone - :param str host_app_id: Agave id of host app - :param host_app: Application instance of host app - - :returns: Application instance - :rtype: class Application - """ - return NotImplemented - - @abstractmethod - def get_or_create_cloned_app(self, *args, **kwargs): - """Gets or creates a cloned app for the user. - - Generates a cloned app id and tries to fetch that app. - If the app exists, check for updates. - - If app does not exist, clone the host app to cloned app id. - - :param host_app: Application instance of host app - :param str allocation: Project allocation for app to be run on - - :returns: Application instance - :rtype: class Application - """ - return NotImplemented - - @abstractmethod - def get_or_create_app(self, *args, **kwargs): - """Gets or creates application for user. - - If application selected is owned by user, return the app, - else clone the app to the same exec system with the - specified allocation. - - ..note: Entry point. - - :param str appId: Agave id of application selected to run - :param str allocation: Project alloction for app to run on - - :returns: Application instance - :rtype: class Application - """ - return NotImplemented - - @abstractmethod - def clone_execution_system(self, *args, **kwargs): - """Clone execution system for user. - - :param str host_system_id: Agave id of host execution system - :param str new_system_id: id for system clone - :param str alloc: Project allocation for system's custom directives - - :returns: ExecutionSystem instance - :rtype: ExecutionSystem - """ - return NotImplemented - - @abstractmethod - def set_system_definition(self, *args, **kwargs): - """Initializes Agave execution system - - :param class system: ExecutionSystem instance - :param str allocation: Project allocation for customDirectives - - :returns: ExecutionSystem instance - :rtype: class ExecutionSystem - """ - return NotImplemented - - @abstractmethod - def validate_exec_system(self, *args, **kwargs): - """Validate execution system and generate keys for it - - :param system_id: Agave system id - :param alloc: Project allocation for system - - :returns: ExecutionSystsem instance - :rtype: class ExecutionSystem - """ - return NotImplemented - - @abstractmethod - def get_exec_system(self, *args, **kwargs): - """Gets an execution system - - :param systemId: Agave Execution system id - - :returns: ExecutionSystem instance - :rtype: class ExecutionSystem - """ - return NotImplemented - - @abstractmethod - def get_or_create_exec_system(self, *args, **kwargs): - """Gets or creates user's execution system - - :param str clonedSystemId: Agave id of new system to be created - :param str hostSystemId: Agave id of host system to clone from - :param str alloc: Project allocation for system - - :returns: Agave response for the system - """ - return NotImplemented diff --git a/server/portal/apps/workspace/managers/unit_test.py b/server/portal/apps/workspace/managers/unit_test.py deleted file mode 100644 index 11f4c1c18..000000000 --- a/server/portal/apps/workspace/managers/unit_test.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -import json -from django.conf import settings -from mock import patch -from django.test import TransactionTestCase -from django.contrib.auth import get_user_model -import pytest - -from portal.apps.workspace.managers.user_applications import UserApplicationsManager -from portal.libs.agave.models.applications import Application -from portal.libs.agave.models.systems.execution import ExecutionSystem - - -pytestmark = pytest.mark.django_db - - -class TestUserApplicationsManager(TransactionTestCase): - fixtures = ['users', 'auth', 'accounts'] - - @classmethod - def setUpClass(cls): - super(TestUserApplicationsManager, cls).setUpClass() - cls.magave_patcher = patch( - 'portal.apps.auth.models.TapisOAuthToken.client', - autospec=True - ) - cls.magave = cls.magave_patcher.start() - cls.mock_systems_manager_patcher = patch( - 'portal.apps.workspace.managers.user_applications.UserSystemsManager' - ) - cls.mock_systems_manager = cls.mock_systems_manager_patcher.start() - cls.mock_systems_manager.get_system_id.return_value = 'frontera.home.username' - - @classmethod - def tearDownClass(cls): - cls.magave_patcher.stop() - cls.mock_systems_manager_patcher.stop() - - def setUp(self): - user = get_user_model().objects.get(username='username') - - self.user_application_manager = UserApplicationsManager(user) - - agave_path = os.path.join(settings.BASE_DIR, 'fixtures/agave') - with open( - os.path.join( - agave_path, - 'systems', - 'execution.json' - ) - ) as _file: - self.execution_sys = json.load(_file) - - def test_set_system_definition_scratch_path_to_scratch(self): - self.mock_systems_manager.get_sys_tas_user_dir.return_value = '/home/1234/username' - self.mock_systems_manager.get_private_directory.return_value = '1234/username' - sys = ExecutionSystem.from_dict( - self.magave, - self.execution_sys - ) - - sys.login.host = 'stampede2.tacc.utexas.edu' - - with patch.object(UserApplicationsManager, 'get_exec_system', return_value=sys): - exec_sys_def = self.user_application_manager.set_system_definition('test_id', 'test_alloc') - - self.assertIn('/scratch', exec_sys_def.scratch_dir) - self.assertNotIn('/work', exec_sys_def.scratch_dir) - - # removing until we add a system with '/work' in it's scratch path - # def test_set_system_definition_scratch_path_to_work(self): - # self.mock_systems_manager.get_sys_tas_user_dir.return_value = '/home/1234/username' - # self.mock_systems_manager.get_private_directory.return_value = '1234/username' - - # sys = ExecutionSystem.from_dict( - # self.magave, - # self.execution_sys - # ) - - # sys.login.host = 'stampede2.tacc.utexas.edu' - - # with patch.object(UserApplicationsManager, 'get_exec_system', return_value=sys): - # exec_sys_def = self.user_application_manager.set_system_definition('test_id', 'test_alloc') - - # self.assertIn('/work', exec_sys_def.scratch_dir) - # self.assertNotIn('/scratch', exec_sys_def.scratch_dir) - - @patch('portal.apps.auth.models.TapisOAuthToken.client') - def test_check_app_for_updates_with_matching_clone_revision(self, mock_client): - host_app = Application(mock_client, revision=3) - cloned_app = Application(mock_client, tags=['cloneRevision:3']) - - self.assertFalse(self.user_application_manager.check_app_for_updates(cloned_app=cloned_app, host_app=host_app)) - - @patch('portal.apps.auth.models.TapisOAuthToken.client') - def test_check_app_for_updates_with_wrong_clone_revision(self, mock_client): - host_app = Application(mock_client, revision=3) - cloned_app = Application(mock_client, tags=['cloneRevision:2']) - - self.assertTrue(self.user_application_manager.check_app_for_updates(cloned_app=cloned_app, host_app=host_app)) - - @patch('portal.apps.auth.models.TapisOAuthToken.client') - def test_check_app_for_updates_with_missing_clone_revision(self, mock_client): - host_app = Application(mock_client, revision=3) - cloned_app = Application(mock_client) - - self.assertTrue(self.user_application_manager.check_app_for_updates(cloned_app=cloned_app, host_app=host_app)) diff --git a/server/portal/apps/workspace/managers/user_applications.py b/server/portal/apps/workspace/managers/user_applications.py deleted file mode 100644 index f8c41cd46..000000000 --- a/server/portal/apps/workspace/managers/user_applications.py +++ /dev/null @@ -1,400 +0,0 @@ -""" -..: module:: apps.workspace.managers.user_applications - : synopsis: Manager handling user's cloned applications and systems -""" -import logging - -from requests.exceptions import HTTPError - -from django.core.exceptions import ObjectDoesNotExist -from django.conf import settings - -from portal.libs.agave.models.systems.execution import ExecutionSystem -from portal.libs.agave.models.applications import Application -from portal.apps.workspace.managers.base import AbstractApplicationsManager -from portal.apps.accounts.managers.user_systems import UserSystemsManager - -# pylint: disable=invalid-name -logger = logging.getLogger(__name__) -# pylint: enable=invalid-name - - -class UserApplicationsManager(AbstractApplicationsManager): - """User Applications Manager - - Class that provides workflows to clone apps and execution systems for a user. - - """ - - def __init__(self, *args, **kwargs): - super(UserApplicationsManager, self).__init__(*args, **kwargs) - self.user_systems_mgr = UserSystemsManager(self.user) - - def get_clone_system_id(self): - """Gets system id to deploy cloned app materials to. - - *System Id* is a string, unique id for each system. - This function returns the system id for a user's home system. - - :returns: System unique id - :rtype: str - """ - - sys_id = self.user_systems_mgr.get_system_id() - return sys_id - - def get_application(self, appId): - """Gets an application - - :param str appId: Unique id of the application - - :returns: Application instance - :rtype: class Application - """ - - app = Application(self.client, id=appId) - return app - - def check_app_for_updates(self, cloned_app, host_app_id=None, host_app=None): - """Checks cloned app for updates against host app by comparing the revision - of the host app to the 'cloneRevision' tag inserted into the cloned apps tags. - - :param cloned_app: Application instance of the cloned application - :param host_app_id: Agave id of the host application - :param host_app: Application instance of the host application - - :returns: update_required - :rtype: bool - """ - update_required = False - - # compare cloned app revision number to original app revision number - if not host_app: - host_app = self.get_application(host_app_id) - - logger.debug('Looking for revision match in tags for app def: {}'.format(cloned_app.to_dict())) - # find revision number in tags - tag_match = [s for s in cloned_app.tags if 'cloneRevision' in s] - if not tag_match: - logger.error('No cloneRevision in tags, app should be updated to ensure consistency.') - update_required = True - else: - try: - clone_rev = int(tag_match[0].split(':')[1]) - if clone_rev != host_app.revision: - logger.warning('Cloned app revision does not match host: {} != {}'.format( - clone_rev, - host_app.revision - )) - update_required = True - except ValueError as exc: - logger.exception('cloneRevision in tags cannot be converted to integer, app should be updated to ensure consistency. %s', exc) - update_required = True - - return update_required - - def get_or_create_cloned_app_exec_system(self, host_exec_id, allocation): - host_exec = ExecutionSystem(self.client, host_exec_id) - host_exec_user_role = host_exec.roles.for_user(username=self.user.username) - if host_exec_user_role and host_exec_user_role.role == 'OWNER': - cloned_exec_sys = host_exec - logger.debug('Using current execution system {}'.format(cloned_exec_sys.id)) - else: - cloned_exec_id = '{username}.{allocation}.exec.{resource}.{execType}.{revision}'.format( - username=self.user.username.replace('_', '-'), - allocation=allocation, - resource=host_exec.login.host.replace('.tacc.utexas.edu', ''), - execType=host_exec.execution_type, - revision=host_exec.revision - ) - logger.debug('Getting cloned execution system: {}'.format(cloned_exec_id)) - cloned_exec_sys = self.get_or_create_exec_system(cloned_exec_id, host_exec.id, allocation) - return cloned_exec_sys - - def clone_application(self, allocation, cloned_app_name, host_app_id=None, host_app=None): - """Clones an application given a host app, allocation, and target name. - - ..note: checks if cloned Execution System already exists for user, - and creates it if not. - - :param str allocation: Project allocation - :param str cloned_app_name: Name of application clone - :param str host_app_id: Agave id of host app - :param host_app: Application instance of host app - - :returns: Application instance - :rtype: class Application - """ - if not host_app: - host_app = self.get_application(host_app_id) - - logger.debug('Starting process to clone new application for user with id: {}-{}.0'.format( - cloned_app_name, - host_app.revision)) - - cloned_exec_sys = self.get_or_create_cloned_app_exec_system(host_app.execution_system, allocation) - - cloned_depl_path = '.APPDATA/{appName}-{rev}.0'.format( - appName=cloned_app_name, - rev=host_app.revision - ) - - logger.debug('Cloning app id {}-{} with exec sys {} at path {} on deployment sys {}'.format( - cloned_app_name, - host_app.revision, - cloned_exec_sys.id, - cloned_depl_path, - self.get_clone_system_id(), - )) - cloned_app = host_app.clone(self.client, - depl_path=cloned_depl_path, - exec_sys=cloned_exec_sys.id, - depl_sys=self.get_clone_system_id(), - name=cloned_app_name, - ver='{}.0'.format(host_app.revision) - ) - - # add host revision number to cloned app's tags - cloned_app.tags.append('cloneRevision:{}'.format(host_app.revision)) - cloned_app.update() - - # if system needs keys, pass system along with app object to instantiate push keys modal - if hasattr(cloned_exec_sys, 'needs_keys'): - cloned_app.exec_sys = cloned_exec_sys - - return cloned_app - - def get_or_create_cloned_app(self, host_app, allocation, cloned_execution_system): - """Gets or creates a cloned app for the user. - - Generates a cloned app id and tries to fetch that app. - If the app exists, check for updates. - - If app does not exist, clone the host app to cloned app id. - - :param host_app: Application instance of host app - :param str allocation: Project allocation for app to be run on - :param ExecutionSystem cloned_execution_system: Cloned execution system - - :returns: Application instance - :rtype: class Application - """ - - # cloned_app_name is of the form 'prtl.clone.sal.PT2050-DataX.compress-0.1u1' - # NOTE: host revision # will be appended to cloned_app_id, e.g. prtl.clone.sal.PT2050-DataX.compress-0.1u1-2.0 - cloned_app_name = 'prtl.clone.{username}.{allocation}.{appId}'.format( - username=self.user.username, - allocation=allocation, - appId=host_app.id - ) - - cloned_app_id = '{appId}-{rev}.0'.format( - appId=cloned_app_name, - rev=host_app.revision) - try: - cloned_app = self.get_application(cloned_app_id) - - logger.debug('Cloned app {} found. Checking for updates...'.format(cloned_app_id)) - - if cloned_app.execution_system != cloned_execution_system.id: - logger.info("Cloned app {} has outdated execution system ('{}' != '{}'). Recreating...".format( - cloned_app_id, cloned_app.execution_system, cloned_execution_system.id)) - cloned_app.delete() - cloned_app = self.clone_application(allocation, cloned_app_name, host_app=host_app) - return cloned_app - - if not cloned_app.available: - logger.info('Cloned app {} is unavailable. Recreating...'.format(cloned_app_id)) - cloned_app.delete() - cloned_app = self.clone_application(allocation, cloned_app_name, host_app=host_app) - return cloned_app - - if not host_app.is_public: - update_required = self.check_app_for_updates(cloned_app, host_app=host_app) - if update_required: - # Need to update cloned app by deleting and re-cloning - logger.warning('Cloned app is being updated (i.e. deleted and re-cloned)') - cloned_app.delete() - cloned_app = self.clone_application(allocation, cloned_app_name, host_app=host_app) - else: - logger.debug('Cloned app is current with host.') - - return cloned_app - - except HTTPError as exc: - if exc.response.status_code == 404: - logger.debug('No app found with id {}. Cloning app...'.format(cloned_app_id)) - cloned_app = self.clone_application(allocation, cloned_app_name, host_app=host_app) - return cloned_app - else: - raise - - def get_or_create_app(self, appId, allocation): - """Gets or creates application for user. - - If application selected is owned by user, return the app, - else clone the app to the same exec system with the - specified allocation. - - ..note: Entry point. - - :param str appId: Agave id of application selected to run - :param str allocation: Project alloction for app to run on - - :returns: Application instance - :rtype: class Application - """ - - host_app = self.get_application(appId) - - # if app is owned by user, no need to clone - if host_app.owner == self.user.username: - logger.info('User is app owner, no need to clone. Returning original app.') - app = host_app - exec_sys = ExecutionSystem(self.client, app.execution_system, ignore_error=None) - else: - exec_sys = self.get_or_create_cloned_app_exec_system(host_app.execution_system, allocation) - app = self.get_or_create_cloned_app(host_app, allocation, exec_sys) - - # Check if app's execution system needs keys reset and pushed - if not app.exec_sys: - sys_ok, res = exec_sys.test() - if not sys_ok and (exec_sys.owner == self.user.username): - logger.debug(res) - logger.info('System {} needs new keys.'.format(exec_sys.id)) - app.exec_sys = exec_sys - - return app - - def clone_execution_system(self, host_system_id, new_system_id, alloc): - """Clone execution system for user. - - :param str host_system_id: Agave id of host execution system - :param str new_system_id: id for system clone - :param str alloc: Project allocation for system's custom directives - - :returns: ExecutionSystem instance - :rtype: ExecutionSystem - """ - - clone_body = { - 'action': 'CLONE', - 'id': new_system_id - } - - cloned_sys = self.client.systems.manage(body=clone_body, systemId=host_system_id) - - sys = self.validate_exec_system(cloned_sys['id'], alloc) - - return sys - - def set_system_definition( - self, - system_id, - allocation - ): # pylint:disable=arguments-differ - """Initializes Agave execution system - - :param class system_id: ExecutionSystem ID - :param str allocation: Project allocation for customDirectives - - :returns: ExecutionSystem instance - :rtype: class ExecutionSystem - """ - system = self.get_exec_system(system_id) - - if not system.available: - system.enable() - - storage_settings = {} - exec_settings = {} - for host, val in settings.PORTAL_EXEC_SYSTEMS.items(): - if host in system.storage.host: - storage_settings = val - if host in system.login.host: - exec_settings = val - - system.site = settings.PORTAL_DOMAIN - system.name = "Execution system for user {}".format(self.user.username) - system.storage.home_dir = storage_settings['home_dir'].format( - self.user_systems_mgr.get_private_directory()) if 'home_dir' in storage_settings else '' - system.storage.port = system.login.port - system.storage.root_dir = '/' - system.storage.auth.username = self.user.username - system.storage.auth.type = system.AUTH_TYPES.SSHKEYS - system.login.auth.username = self.user.username - system.login.auth.type = system.AUTH_TYPES.SSHKEYS - system.work_dir = '/work/{}'.format(self.user_systems_mgr.get_private_directory()) - system.scratch_dir = exec_settings['scratch_dir'].format( - self.user_systems_mgr.get_private_directory()) if 'scratch_dir' in exec_settings else '' - - if system.scheduler == 'SLURM': - for queue in system.queues.all(): - if queue.custom_directives: - queue.custom_directives = '-A {}'.format(allocation) - return system - - def validate_exec_system(self, system_id, alloc, *args, **kwargs): - """Validate execution system and generate keys for it - - :param system_id: Agave system id - :param alloc: Project allocation for system - - :returns: ExecutionSystsem instance - :rtype: class ExecutionSystem - """ - - system = self.set_system_definition( - system_id, - alloc - ) - - # NOTE: Check if host keys already exist for user for both login and storage hosts - for auth_block in [system.login, system.storage]: - try: - keys = self.user.ssh_keys.for_hostname(hostname=auth_block.host) - priv_key_str = keys.private_key() - publ_key_str = keys.public - auth_block.auth.public_key = publ_key_str - auth_block.auth.private_key = priv_key_str - except ObjectDoesNotExist: - system.needs_keys = True - auth_block.auth.public_key = 'public_key' - auth_block.auth.private_key = 'private_key' - - system.update() - - return system - - def get_exec_system(self, systemId, *args, **kwargs): - """Gets an execution system - - :param systemId: Agave Execution system id - - :returns: ExecutionSystem instance - :rtype: class ExecutionSystem - """ - - exec_sys = ExecutionSystem(self.client, systemId, ignore_error=None) - return exec_sys - - def get_or_create_exec_system(self, clonedSystemId, hostSystemId, alloc, *args, **kwargs): - """Gets or creates user's execution system - - :param str clonedSystemId: Agave id of new system to be created - :param str hostSystemId: Agave id of host system to clone from - :param str alloc: Project allocation for system - - :returns: Agave response for the system - """ - try: - exec_sys = self.get_exec_system(clonedSystemId) - if not exec_sys.available: - exec_sys = self.validate_exec_system(exec_sys.id, alloc) - logger.debug('Execution system found') - return exec_sys - except HTTPError as exc: - if exc.response.status_code == 404: - logger.debug('No execution system found, cloning system') - exec_sys = self.clone_execution_system(hostSystemId, clonedSystemId, alloc) - return exec_sys diff --git a/server/portal/fixtures/agave/files/file-listing.json b/server/portal/fixtures/agave/files/file-listing.json index 1e4e2d8d3..4829aadce 100644 --- a/server/portal/fixtures/agave/files/file-listing.json +++ b/server/portal/fixtures/agave/files/file-listing.json @@ -22,4 +22,4 @@ "href": "https://api.tacc.utexas.edu/files/v2/history/system/portal.home.username//parent_folder/sub_folder/file.txt" } } -}] \ No newline at end of file +}] diff --git a/server/portal/fixtures/agave/files/tapis-file-listing.json b/server/portal/fixtures/agave/files/tapis-file-listing.json new file mode 100644 index 000000000..e1e8ba8d7 --- /dev/null +++ b/server/portal/fixtures/agave/files/tapis-file-listing.json @@ -0,0 +1,50 @@ +[ + { + "mimeType": null, + "type": "file", + "owner": "0", + "group": "0", + "nativePermissions": "rwxrwxrwx", + "url": "tapis://cloud.data.community/bin", + "lastModified": "2020-04-23T06:25:56Z", + "name": "bin", + "path": "bin", + "size": 7 + }, + { + "mimeType": null, + "type": "dir", + "owner": "0", + "group": "0", + "nativePermissions": "r-xr-xr-x", + "url": "tapis://cloud.data.community/boot", + "lastModified": "2022-05-24T20:32:05Z", + "name": "boot", + "path": "boot", + "size": 4096 + }, + { + "mimeType": null, + "type": "dir", + "owner": "0", + "group": "0", + "nativePermissions": "rwxr-xr-x", + "url": "tapis://cloud.data.community/corral", + "lastModified": "2022-09-16T16:48:43Z", + "name": "corral", + "path": "corral", + "size": 4096 + }, + { + "mimeType": null, + "type": "dir", + "owner": "0", + "group": "0", + "nativePermissions": "rwxr-xr-x", + "url": "tapis://cloud.data.community/dev", + "lastModified": "2022-05-24T20:31:51Z", + "name": "dev", + "path": "dev", + "size": 3140 + } + ] diff --git a/server/portal/fixtures/job-submission.json b/server/portal/fixtures/job-submission.json index b92407625..642a0c9b2 100644 --- a/server/portal/fixtures/job-submission.json +++ b/server/portal/fixtures/job-submission.json @@ -3,6 +3,7 @@ "appId": "compress-0.1", "appVersion": "0.0.1", "description": "test compression job 1", + "execSystemId": "frontera", "fileInputs": [ { "name": "Target", diff --git a/server/portal/libs/agave/operations.py b/server/portal/libs/agave/operations.py index 3e1da37fc..c28a24b05 100644 --- a/server/portal/libs/agave/operations.py +++ b/server/portal/libs/agave/operations.py @@ -6,10 +6,11 @@ import logging from elasticsearch_dsl import Q from portal.libs.elasticsearch.indexes import IndexedFile -from portal.apps.search.tasks import agave_indexer, agave_listing_indexer +from portal.apps.search.tasks import agave_indexer from portal.exceptions.api import ApiException from portal.libs.agave.utils import text_preview, get_file_size, increment_file_name from portal.libs.agave.filter_mapping import filter_mapping +from pathlib import Path logger = logging.getLogger(__name__) @@ -37,20 +38,32 @@ def listing(client, system, path, offset=0, limit=100, *args, **kwargs): List of dicts containing file metadata from Elasticsearch """ - raw_listing = client.files.list(systemId=system, - filePath=urllib.parse.quote(path), - offset=int(offset) + 1, - limit=int(limit)) + raw_listing = client.files.listFiles(systemId=system, + path=urllib.parse.quote(path), + offset=int(offset) + 1, + limit=int(limit)) try: # Convert file objects to dicts for serialization. - listing = list(map(dict, raw_listing)) + listing = list(map(lambda f: { + 'system': system, + 'type': 'dir' if f.type == 'dir' else 'file', + 'format': 'folder' if f.type == 'dir' else 'raw', + 'mimeType': f.mimeType, + 'path': f.path, + 'name': f.name, + 'length': f.size, + 'lastModified': f.lastModified, + '_links': { + 'self': {'href': f.url} + }}, raw_listing)) except IndexError: # Return [] if the listing is empty. listing = [] # Update Elasticsearch after each listing. - agave_listing_indexer.delay(listing) + # TODOV3: test/verify indexing operations + # agave_listing_indexer.delay(listing) return {'listing': listing, 'reachedEnd': len(listing) < int(limit)} @@ -132,6 +145,7 @@ def search(client, system, path='', offset=0, limit=100, query_string='', filter 'reachedEnd': len(hits) < int(limit)} +# TODOV3: rewrite using v3 postit service TBD. def download(client, system, path, href, force=True, max_uses=3, lifetime=600, **kwargs): """Creates a postit pointing to this file. @@ -189,18 +203,16 @@ def mkdir(client, system, path, dir_name): ------- dict """ - body = { - 'action': 'mkdir', - 'path': dir_name - } - result = client.files.manage(systemId=system, - filePath=urllib.parse.quote(path), - body=body) - agave_indexer.apply_async(kwargs={'systemId': system, - 'filePath': path, - 'recurse': False}) - return result + path = Path(path) / Path(dir_name) + path_input = urllib.parse.quote(str(path)) + client.files.mkdir(systemId=system, path=path_input) + + # TODOV3: test/verify indexing operations + # agave_indexer.apply_async(kwargs={'systemId': system, + # 'filePath': path, + # 'recurse': False}) + return {"result": "OK"} def move(client, src_system, src_path, dest_system, dest_path, file_name=None): @@ -535,7 +547,7 @@ def download_bytes(client, system, path): BytesIO object representing the downloaded file. """ file_name = os.path.basename(path) - resp = client.files.download(systemId=system, filePath=path) - result = io.BytesIO(resp.content) + resp = client.files.getContents(systemId=system, path=path) + result = io.BytesIO(resp) result.name = file_name return result diff --git a/server/portal/libs/agave/operations_unit_test.py b/server/portal/libs/agave/operations_unit_test.py index 1ef26bfd1..c05b0f54d 100644 --- a/server/portal/libs/agave/operations_unit_test.py +++ b/server/portal/libs/agave/operations_unit_test.py @@ -1,30 +1,34 @@ from mock import patch, MagicMock from requests.exceptions import HTTPError from django.test import TestCase -from agavepy.agave import AttrDict +from tapipy.tapis import TapisResult from elasticsearch_dsl import Q from elasticsearch_dsl.response import Hit from portal.libs.agave.operations import listing, search, mkdir, move, copy, rename, makepublic from portal.exceptions.api import ApiException +from unittest import skip class TestOperations(TestCase): + # TODOv3: test/verify indexing operations + @skip(reason="TODOv3: convert to v3 Tapis") @patch('portal.libs.agave.operations.agave_listing_indexer') def test_listing(self, mock_indexer): client = MagicMock() - mock_listing = [AttrDict({'system': 'test.system', - 'path': '/path/to/file'})] - client.files.list.return_value = mock_listing + mock_listing = [TapisResult(**{'system': 'test.system', + 'path': '/path/to/file'})] + client.files.listFiles.return_value = mock_listing ls = listing(client, 'test.system', '/path/to/file') - client.files.list.assert_called_with(systemId='test.system', - filePath='/path/to/file', - offset=1, - limit=100) + client.files.listFiles.assert_called_with(systemId='test.system', + path='/path/to/file', + offset=1, + limit=100) - mock_indexer.delay.assert_called_with([{'system': 'test.system', - 'path': '/path/to/file'}]) + # TODOv3: test/verify indexing operations + # mock_indexer.delay.assert_called_with([{'system': 'test.system', + # 'path': '/path/to/file'}]) self.assertEqual(ls, {'listing': [{'system': 'test.system', 'path': '/path/to/file'}], @@ -63,9 +67,10 @@ def test_search(self, mock_search): def test_mkdir(self, mock_indexer): client = MagicMock() mkdir(client, 'test.system', '/root', 'testfolder') - client.files.manage.assert_called_with(systemId='test.system', filePath='/root', body={'action': 'mkdir', 'path': 'testfolder'}) + client.files.mkdir.assert_called_with(systemId='test.system', path='/root/testfolder') - mock_indexer.apply_async.assert_called_with(kwargs={'systemId': 'test.system', 'filePath': '/root', 'recurse': False}) + # TODOv3: test/verify indexing operations + # mock_indexer.apply_async.assert_called_with(kwargs={'systemId': 'test.system', 'filePath': '/root', 'recurse': False}) @patch('portal.libs.agave.operations.move') def test_rename(self, mock_move): diff --git a/server/portal/libs/agave/serializers.py b/server/portal/libs/agave/serializers.py index 68a7b0e69..36b152620 100644 --- a/server/portal/libs/agave/serializers.py +++ b/server/portal/libs/agave/serializers.py @@ -104,9 +104,15 @@ def _serialize(self, obj): for nk, nv in v.items(): v[nk] = self._serialize(nv) return _wrapped + elif isinstance(obj, list): + for index, item in enumerate(obj): + obj[index] = self._serialize(item) + elif isinstance(obj, dict): + for nk, nv in obj.items(): + obj[nk] = self._serialize(nv) return obj def default(self, obj): - if isinstance(obj, TapisResult): + if isinstance(obj, (TapisResult, list, dict)): return self._serialize(obj) return json.JSONEncoder.encode(self, obj) diff --git a/server/portal/libs/agave/unit_test.py b/server/portal/libs/agave/unit_test.py index 708ff1931..c39f5f75b 100644 --- a/server/portal/libs/agave/unit_test.py +++ b/server/portal/libs/agave/unit_test.py @@ -17,6 +17,7 @@ BaseAgaveSystemSerializer ) from portal.libs.agave import utils as AgaveUtils +from unittest import skip # pylint: disable=invalid-name logger = logging.getLogger(__name__) @@ -241,6 +242,7 @@ def test_to_camel_case(self): res = AgaveUtils.to_camel_case(attr) self.assertEqual(res, 'someAttribute') + @skip(reason="TODOv3: update for v3 tapis") def test_walk(self): """Test `walk` util function.""" self.magave.reset_mock() @@ -277,6 +279,7 @@ def test_walk(self): flat_listing[index]['path'] ) + @skip(reason="TODOv3: update for v3 tapis") def test_walk_levels(self): """Test `walk_levels` util.""" self.magave.reset_mock() diff --git a/server/portal/libs/agave/utils.py b/server/portal/libs/agave/utils.py index cb6f79892..a5912b3ab 100644 --- a/server/portal/libs/agave/utils.py +++ b/server/portal/libs/agave/utils.py @@ -79,8 +79,20 @@ def walk(client, system, path, bottom_up=False, yield_base=True): """ from portal.libs.agave.models.files import BaseFile - files = client.files.list(systemId=system, - filePath=urllib.parse.quote(path)) + _files = client.files.listFiles(systemId=system, + path=urllib.parse.quote(path)) + files = list(map(lambda f: { + 'system': system, + 'type': 'dir' if f.type == 'dir' else 'file', + 'format': 'folder' if f.type == 'dir' else 'raw', + 'mimeType': f.mimeType, + 'path': f.path, + 'name': f.name, + 'length': f.size, + 'lastModified': f.lastModified, + '_links': { + 'self': {'href': f.url} + }}, _files)) for json_file in files: json_file.pop('_links', None) if json_file['name'] == '.': @@ -116,10 +128,22 @@ def iterate_level(client, system, path, limit=100): offset = 0 while True: - page = client.files.list(systemId=system, - filePath=urllib.parse.quote(path), - offset=int(offset), - limit=int(limit)) + _page = client.files.listFiles(systemId=system, + path=urllib.parse.quote(path), + offset=int(offset), + limit=int(limit)) + page = list(map(lambda f: { + 'system': system, + 'type': 'dir' if f.type == 'dir' else 'file', + 'format': 'folder' if f.type == 'dir' else 'raw', + 'mimeType': f.mimeType, + 'path': f.path, + 'name': f.name, + 'length': f.size, + 'lastModified': f.lastModified, + '_links': { + 'self': {'href': f.url} + }}, _page)) yield from page offset += limit if len(page) != limit: diff --git a/server/portal/settings/settings_custom.example.py b/server/portal/settings/settings_custom.example.py index a5e57c263..231ad0faf 100644 --- a/server/portal/settings/settings_custom.example.py +++ b/server/portal/settings/settings_custom.example.py @@ -170,10 +170,6 @@ 'rt_queue': 'Life Sciences' # Defaults to 'Accounting' if left blank } }, - { - 'step': 'portal.apps.onboarding.steps.system_creation.SystemCreationStep', - 'settings': {} - } ] """ @@ -187,9 +183,11 @@ 'settings': {} }, { - 'step': 'portal.apps.onboarding.steps.key_service_creation.KeyServiceCreationStep', - 'settings': {} - } + 'step': 'portal.apps.onboarding.steps.system_access_v3.SystemAccessStepV3', + 'settings': { + 'tapis_systems': ['cloud.data.community'], + } + }, ] ####################### diff --git a/server/portal/settings/settings_default.py b/server/portal/settings/settings_default.py index 1a9ba810c..20b3dbf31 100644 --- a/server/portal/settings/settings_default.py +++ b/server/portal/settings/settings_default.py @@ -69,19 +69,20 @@ _PORTAL_DATA_DEPOT_LOCAL_STORAGE_SYSTEMS = { 'stockyard': { 'name': 'My Data (Work)', - 'description': 'My Data on Stockyard for {username}', + 'description': 'My Data on Stockyard', 'site': 'cep', - 'systemId': 'cloud.corral.work.{username}', - 'host': 'cloud.corral.tacc.utexas.edu', + 'systemId': 'cloud.data.community', + 'host': 'cloud.data.tacc.utexas.edu', 'rootDir': '/work/{tasdir}', - 'port': 2222, + 'port': 22, 'icon': None, + 'default': True # TODOv3: replace PORTAL_DATA_DEPOT_LOCAL_STORAGE_SYSTEM_DEFAULT with 'default' field }, 'frontera': { 'name': 'My Data (Frontera)', - 'description': 'My Data on Frontera for {username}', + 'description': 'My Data on Frontera', 'site': 'cep', - 'systemId': 'frontera.home.{username}', + 'systemId': 'frontera', 'host': 'frontera.tacc.utexas.edu', 'rootDir': '/home1/{tasdir}', 'port': 22, @@ -89,16 +90,7 @@ } } -_PORTAL_DATAFILES_STORAGE_SYSTEMS = [ - { - 'name': 'My Data (secure.corral)', - 'system': 'a2cps.secure.corral.dev', - 'scheme': 'private', - 'api': 'tapis', - 'icon': None, - 'siteSearchPriority': 1 - }, -] +_PORTAL_DATAFILES_STORAGE_SYSTEMS = [] ######################## # DJANGO APP: ONBOARDING @@ -143,14 +135,11 @@ 'rt_queue': 'Life Sciences' # Defaults to 'Accounting' if left blank } }, - { - 'step': 'portal.apps.onboarding.steps.system_creation.SystemCreationStep', - 'settings': {} - }, { 'step': 'portal.apps.onboarding.steps.system_access_v3.SystemAccessStepV3', 'settings': { - 'tapis_systems': ['frontera', 'stampede2.community'], # Tapis systems to grant user credentials + 'access_systems': ['cloud.data.community', 'frontera', 'stampede2.community'], # Tapis systems to grant file access + 'credentials_systems': ['cloud.data.community'] # Tapis systems to grant user credentials with the keys service } }, ] @@ -168,7 +157,8 @@ { 'step': 'portal.apps.onboarding.steps.system_access_v3.SystemAccessStepV3', 'settings': { - 'tapis_systems': ['cloud.data.community'], + 'access_systems': ['cloud.data.community', 'frontera', 'stampede2.community'], + 'credentials_systems': ['cloud.data.community'] } }, ] diff --git a/server/portal/utils/encryption.py b/server/portal/utils/encryption.py index bea698909..a53691239 100644 --- a/server/portal/utils/encryption.py +++ b/server/portal/utils/encryption.py @@ -16,6 +16,15 @@ # pylint: enable=invalid-name +def createKeyPair(): + private_key = create_private_key() + priv_key_str = export_key(private_key, 'PEM') + public_key = create_public_key(private_key) + publ_key_str = export_key(public_key, 'OpenSSH') + + return priv_key_str, publ_key_str + + def create_private_key(bits=2048): """Creates a brand new RSA key