From 746dde1049bf61f6c091354f8dfd445ec092e829 Mon Sep 17 00:00:00 2001 From: Samson Akol Date: Wed, 30 Apr 2025 22:15:25 +0300 Subject: [PATCH 1/7] adds back the flake8-print dependency to flake8 --- .pre-commit-config.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e0bad917b5..74a96a7a26 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,6 +12,9 @@ repos: rev: 7.1.2 hooks: - id: flake8 + additional_dependencies: [ + 'flake8-print==5.0.0' + ] - repo: https://github.com/asottile/reorder_python_imports rev: v3.14.0 hooks: From 4c8ab73b6b77b06978f8732edc51d6a4657382cc Mon Sep 17 00:00:00 2001 From: Samson Akol Date: Wed, 30 Apr 2025 23:12:14 +0300 Subject: [PATCH 2/7] Adds black python formatter hook to pre-commit --- .pre-commit-config.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 74a96a7a26..2189a3ffd4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -28,3 +28,11 @@ repos: entry: pnpm run lint-frontend:format language: system files: \.(js|vue|scss|css)$ +# Always keep black as the final hook so it reformats any other reformatting. +- repo: https://github.com/python/black + rev: 20.8b1 + hooks: + - id: black + additional_dependencies: [ + 'click==8.0.4' + ] From 6a6e48a4ea1e53e7d9d569cf686de91966ee27ea Mon Sep 17 00:00:00 2001 From: Samson Akol Date: Thu, 1 May 2025 14:08:21 +0300 Subject: [PATCH 3/7] Adds no auto-named migrations pre-commit hook --- .pre-commit-config.yaml | 97 +++++++++++++++++++++++++++++++---------- 1 file changed, 75 insertions(+), 22 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2189a3ffd4..3b663b0af5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,38 +1,91 @@ repos: -- repo: https://github.com/pre-commit/pre-commit-hooks + - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: - - id: trailing-whitespace - - id: check-added-large-files + - id: trailing-whitespace + - id: check-added-large-files exclude: '^.+?\.ttf$' - - id: debug-statements - - id: end-of-file-fixer + - id: debug-statements + - id: end-of-file-fixer exclude: '^.+?\.json.+?\.yml$' -- repo: https://github.com/PyCQA/flake8 + - repo: https://github.com/PyCQA/flake8 rev: 7.1.2 hooks: - - id: flake8 + - id: flake8 additional_dependencies: [ 'flake8-print==5.0.0' ] -- repo: https://github.com/asottile/reorder_python_imports + - repo: https://github.com/asottile/reorder_python_imports rev: v3.14.0 hooks: - - id: reorder-python-imports + - id: reorder-python-imports language_version: python3 -- repo: local + - repo: local hooks: - - id: frontend-lint - name: Linting of JS, Vue, SCSS and CSS files - description: This hook handles all frontend linting for Kolibri Studio - entry: pnpm run lint-frontend:format - language: system - files: \.(js|vue|scss|css)$ -# Always keep black as the final hook so it reformats any other reformatting. -- repo: https://github.com/python/black + - id: frontend-lint + name: Linting of JS, Vue, SCSS and CSS files + description: This hook handles all frontend linting for Kolibri Studio + entry: pnpm run lint-frontend:format + language: system + files: \.(js|vue|scss|css)$ + - repo: local + hooks: + - id: no-auto-migrations + name: no auto-named migrations + entry: We do not allow _auto_ in migration names. Please give the migration a telling name. + language: fail + files: .*/migrations/.*_auto_.*\.py$ + exclude: (?x)^( + contentcuration/kolibri_content/migrations/0014_auto_20210603_1536.py| + contentcuration/kolibri_content/migrations/0023_auto_20250417_1516.py| + contentcuration/kolibri_content/migrations/0007_auto_20200613_0050.py| + contentcuration/kolibri_content/migrations/0004_auto_20180910_2342.py| + contentcuration/kolibri_content/migrations/0002_auto_20180327_1414.py| + contentcuration/kolibri_content/migrations/0022_auto_20240915_1414.py| + contentcuration/kolibri_content/migrations/0011_auto_20210504_1744.py| + contentcuration/kolibri_content/migrations/0010_auto_20210202_0604.py| + contentcuration/kolibri_content/migrations/0018_auto_20220224_2031.py| + contentcuration/kolibri_content/migrations/0019_auto_20230207_0116.py| + contentcuration/kolibri_content/migrations/0005_auto_20190424_1709.py| + contentcuration/kolibri_content/migrations/0006_auto_20191028_2325.py| + contentcuration/kolibri_content/migrations/0015_auto_20210707_1606.py| + contentcuration/kolibri_content/migrations/0013_auto_20210519_1759.py| + contentcuration/kolibri_content/migrations/0012_auto_20210511_1605.py| + contentcuration/kolibri_content/migrations/0021_auto_20240612_1847.py| + contentcuration/search/migrations/0002_auto_20201215_2110.py| + contentcuration/contentcuration/migrations/0001_squashed_0094_auto_20180910_2342.py| + contentcuration/contentcuration/migrations/0002_auto_20181220_1734.py| + contentcuration/contentcuration/migrations/0102_auto_20190904_1627.py| + contentcuration/contentcuration/migrations/0110_auto_20200511_2245.py| + contentcuration/contentcuration/migrations/0104_auto_20191028_2325.py| + contentcuration/contentcuration/migrations/0111_auto_20200513_2252.py| + contentcuration/contentcuration/migrations/0130_auto_20210706_2005.py| + contentcuration/contentcuration/migrations/0098_auto_20190424_1709.py| + contentcuration/contentcuration/migrations/0128_auto_20210511_1605.py| + contentcuration/contentcuration/migrations/0112_auto_20200613_0050.py| + contentcuration/contentcuration/migrations/0127_auto_20210504_1744.py| + contentcuration/contentcuration/migrations/0106_auto_20191113_0217.py| + contentcuration/contentcuration/migrations/0103_auto_20190905_0408.py| + contentcuration/contentcuration/migrations/0121_auto_20210305_2028.py| + contentcuration/contentcuration/migrations/0133_auto_20220124_2149.py| + contentcuration/contentcuration/migrations/0132_auto_20210708_0011.py| + contentcuration/contentcuration/migrations/0120_auto_20210128_1646.py| + contentcuration/contentcuration/migrations/0131_auto_20210707_2326.py| + contentcuration/contentcuration/migrations/0126_auto_20210219_2314.py| + contentcuration/contentcuration/migrations/0107_auto_20191115_2344.py| + contentcuration/contentcuration/migrations/0151_auto_20250417_1516.py| + contentcuration/contentcuration/migrations/0099_auto_20190715_2201.py| + contentcuration/contentcuration/migrations/0123_auto_20210407_0057.py| + contentcuration/contentcuration/migrations/0129_auto_20210519_2213.py| + contentcuration/contentcuration/migrations/0109_auto_20191202_1759.py| + contentcuration/kolibri_public/migrations/0004_auto_20240612_1847.py| + contentcuration/kolibri_public/migrations/0006_auto_20250417_1516.py| + )$ + # Always keep black as the final hook so it reformats any other reformatting. + - repo: https://github.com/python/black rev: 20.8b1 hooks: - - id: black - additional_dependencies: [ - 'click==8.0.4' - ] + - id: black + additional_dependencies: [ + 'click==8.0.4' + ] From 3c94870e402fb8f68bc77c54e33cbbf045ea44d6 Mon Sep 17 00:00:00 2001 From: Samson Akol Date: Thu, 1 May 2025 16:14:23 +0300 Subject: [PATCH 4/7] updates flake8 rules --- setup.cfg | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index dfad758215..3e5e6361e1 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,8 +1,17 @@ [flake8] max-line-length = 160 max-complexity = 10 -exclude = contentcuration/contentcuration/migrations/*, contentcuration/kolibri_content/migrations/* -ignore = E402,W503,W504,E123,E122,E126 +exclude = + contentcuration/contentcuration/migrations/*, + contentcuration/kolibri_content/migrations/*, + contentcuration/search/migrations/*, + contentcuration/kolibri_public/migrations/*, + contentcuration/automation/migrations/* +ignore = E402,W503,W504,E123,E122,E126,E203 +per-file-ignores = + # Allow print statements in tests + test_*.py:T201 + [kolibri:i18n] project = kolibri-studio locale_data_folder = contentcuration/locale From 264868b471a8d7d0ffb3b7720a4e6504d3b3a8c4 Mon Sep 17 00:00:00 2001 From: Samson Akol Date: Thu, 8 May 2025 16:03:49 +0300 Subject: [PATCH 5/7] Replaces frontend only linting check with pre-commit check --- .../{frontendlint.yml => pre-commit.yml} | 29 ++++++++++++++----- 1 file changed, 22 insertions(+), 7 deletions(-) rename .github/workflows/{frontendlint.yml => pre-commit.yml} (58%) diff --git a/.github/workflows/frontendlint.yml b/.github/workflows/pre-commit.yml similarity index 58% rename from .github/workflows/frontendlint.yml rename to .github/workflows/pre-commit.yml index 7a6df55e63..a3ac3a4b21 100644 --- a/.github/workflows/frontendlint.yml +++ b/.github/workflows/pre-commit.yml @@ -1,4 +1,4 @@ -name: Javascript Linting +name: Linting on: push: @@ -7,6 +7,10 @@ on: - hotfixes - master pull_request: + branches: + - unstable + - hotfixes + - master jobs: pre_job: @@ -20,27 +24,38 @@ jobs: uses: fkirc/skip-duplicate-actions@master with: github_token: ${{ github.token }} - paths: '["**.vue", "**.js", "pnpm-lock.yaml", ".github/workflows/frontendlint.yml"]' - test: - name: Frontend linting + paths_ignore: '["**.po", "**.json"]' + linting: + name: All file linting needs: pre_job if: ${{ needs.pre_job.outputs.should_skip != 'true' }} runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: '3.10' - name: Use pnpm uses: pnpm/action-setup@v4 - name: Use Node.js uses: actions/setup-node@v4 with: node-version: '18.x' - cache: 'pnpm' + - name: Get pnpm store path + id: pnpm-store-path + run: echo "dir=$(pnpm store path)" >> $GITHUB_OUTPUT + - name: Cache Node.js modules + uses: actions/cache@v4 + with: + path: ${{ steps.pnpm-store-path.outputs.dir }} + key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm- - name: Install dependencies run: | pnpm install --frozen-lockfile pnpm rebuild node-sass - - name: Run tests - run: pnpm run lint-frontend:format + - uses: pre-commit/action@v3.0.1 - name: Run pre-commit-ci-lite uses: pre-commit-ci/lite-action@v1.1.0 if: always() From 8ccaaa60efd1c07b220aefce5a307e4791345111 Mon Sep 17 00:00:00 2001 From: Samson Akol Date: Thu, 8 May 2025 16:29:49 +0300 Subject: [PATCH 6/7] Python linting updates # Conflicts: # contentcuration/contentcuration/utils/publish.py # Conflicts: # contentcuration/contentcuration/tests/viewsets/test_recommendations.py # contentcuration/contentcuration/urls.py --- .dockerignore | 2 +- .../community-contribution-labeling.yml | 2 +- .github/workflows/pre-commit.yml | 11 +- LICENSE | 1 - contentcuration/automation/admin.py | 1 - contentcuration/automation/apps.py | 4 +- .../automation/migrations/0001_initial.py | 68 +- contentcuration/automation/models.py | 10 +- contentcuration/automation/tests.py | 1 - .../automation/tests/appnexus/test_base.py | 20 +- .../tests/test_recommendations_cache_model.py | 23 +- .../automation/utils/appnexus/base.py | 10 +- contentcuration/automation/views.py | 1 - contentcuration/contentcuration/admin.py | 9 +- contentcuration/contentcuration/api.py | 16 +- contentcuration/contentcuration/apps.py | 2 +- .../contentcuration/collectstatic_settings.py | 6 +- .../constants/completion_criteria.py | 14 +- .../constants/feature_flags.py | 4 +- .../contentcuration/constants/feedback.py | 12 +- .../contentcuration/db/advisory_lock.py | 6 +- .../contentcuration/db/models/expressions.py | 14 +- .../contentcuration/db/models/functions.py | 3 + .../contentcuration/db/models/manager.py | 53 +- .../contentcuration/db/models/query.py | 21 +- contentcuration/contentcuration/decorators.py | 2 + contentcuration/contentcuration/dev_urls.py | 4 +- contentcuration/contentcuration/forms.py | 112 +- .../commands/count_public_resources.py | 19 +- .../fix_duplicate_assessment_items.py | 23 +- .../commands/fix_exercise_complete.py | 210 +- .../management/commands/garbage_collect.py | 7 +- .../management/commands/mark_incomplete.py | 276 ++- .../commands/reconcile_change_tasks.py | 44 +- .../commands/reconcile_publishing_status.py | 17 +- .../management/commands/restore_channel.py | 19 +- .../commands/set_content_mimetypes.py | 5 +- .../set_default_learning_activities.py | 41 +- .../management/commands/set_file_duration.py | 35 +- .../commands/set_orm_based_has_captions.py | 35 +- .../management/commands/set_storage_used.py | 6 +- .../management/commands/setup.py | 160 +- .../commands/setup_perftest_data.py | 10 +- .../management/commands/test_server_perf.py | 35 +- .../contentcuration/middleware/db_readonly.py | 8 +- .../middleware/error_reporting.py | 3 +- .../contentcuration/middleware/locale.py | 4 +- .../contentcuration/middleware/session.py | 4 +- .../migration_production_settings.py | 6 +- .../0001_squashed_0094_auto_20180910_2342.py | 1945 ++++++++++++----- .../migrations/0002_auto_20181220_1734.py | 6 +- .../migrations/0003_copy_data.py | 18 +- .../0004_remove_rename_json_field.py | 10 +- .../contentcuration/migrations/0097_task.py | 71 +- .../migrations/0098_auto_20190424_1709.py | 97 +- .../migrations/0099_auto_20190715_2201.py | 6 +- .../0100_calculate_included_languages.py | 6 +- .../0101_extra_fields_json_field.py | 22 +- .../migrations/0102_auto_20190904_1627.py | 6 +- .../migrations/0103_auto_20190905_0408.py | 65 +- .../migrations/0104_auto_20191028_2325.py | 37 +- .../migrations/0105_channel_published_data.py | 6 +- .../migrations/0106_auto_20191113_0217.py | 6 +- .../migrations/0107_auto_20191115_2344.py | 74 +- .../migrations/0108_mptt_tree_id_migration.py | 17 +- .../migrations/0109_auto_20191202_1759.py | 10 +- .../migrations/0110_auto_20200511_2245.py | 10 +- .../migrations/0111_auto_20200513_2252.py | 10 +- .../migrations/0112_auto_20200613_0050.py | 85 +- .../migrations/0113_channel_tagline.py | 6 +- .../0114_assessment_item_unique_keypair.py | 28 +- .../0116_index_channel_contentnode_file.py | 10 +- .../migrations/0118_relaunch_migrations.py | 104 +- .../migrations/0119_task_channel_id.py | 6 +- .../migrations/0120_auto_20210128_1646.py | 10 +- .../migrations/0121_auto_20210305_2028.py | 8 +- .../migrations/0122_file_modified_index.py | 8 +- .../migrations/0123_auto_20210407_0057.py | 10 +- .../migrations/0124_user_feature_flags.py | 6 +- .../0125_user_feature_flags_default.py | 10 +- .../migrations/0126_auto_20210219_2314.py | 30 +- .../migrations/0127_auto_20210504_1744.py | 39 +- .../migrations/0128_auto_20210511_1605.py | 41 +- .../migrations/0129_auto_20210519_2213.py | 276 ++- .../migrations/0130_auto_20210706_2005.py | 93 +- .../migrations/0131_auto_20210707_2326.py | 4 +- .../migrations/0132_auto_20210708_0011.py | 4 +- .../migrations/0133_auto_20220124_2149.py | 20 +- .../migrations/0135_add_metadata_labels.py | 26 +- .../0136_contentnode_suggested_duration.py | 12 +- .../migrations/0137_channelhistory.py | 57 +- .../contentcuration/migrations/0138_change.py | 65 +- .../migrations/0139_django_celery_results.py | 38 +- .../migrations/0140_delete_task.py | 4 +- .../migrations/0141_add_task_signature.py | 29 +- ...142_remove_file_file_media_duration_int.py | 6 +- .../0143_file_file_media_duration_int.py | 25 +- .../migrations/0144_soft_delete_user.py | 48 +- .../migrations/0145_custom_task_metadata.py | 61 +- .../migrations/0146_drop_taskresult_fields.py | 31 +- .../migrations/0147_alter_formatpreset_id.py | 44 +- ...nsevent_recommendationsinteractionevent.py | 128 +- .../0149_unpublishable_change_field.py | 13 +- .../0150_bloompub_format_and_preset.py | 76 +- ..._alter_recommendationsevent_time_hidden.py | 6 +- contentcuration/contentcuration/models.py | 1318 ++++++++--- .../contentcuration/node_metadata/cte.py | 68 +- .../contentcuration/node_metadata/query.py | 43 +- .../not_production_settings.py | 4 +- .../contentcuration/perftools/objective.py | 66 +- .../contentcuration/production_settings.py | 20 +- .../contentcuration/serializers.py | 45 +- contentcuration/contentcuration/settings.py | 321 ++- contentcuration/contentcuration/signals.py | 2 +- contentcuration/contentcuration/tasks.py | 55 +- .../permissions/permissions_email_subject.txt | 2 +- .../templates/perseus/exercise.json | 2 +- .../registration/custom_email_subject.txt | 2 +- .../registration_information_email.txt | 2 +- .../templatetags/export_tags.py | 48 +- .../templatetags/license_tags.py | 110 +- .../templatetags/perseus_tags.py | 1 + .../templatetags/translation_tags.py | 20 +- .../tests/db/test_advisory_lock.py | 250 ++- .../contentcuration/tests/helpers.py | 1 + ...ment_item_keypair_constraint_migration_.py | 7 +- .../contentcuration/tests/test_asynctask.py | 24 +- .../tests/test_channel_model.py | 12 +- .../tests/test_chef_pipeline.py | 4 +- .../tests/test_completion_criteria.py | 21 +- .../tests/test_contentnodes.py | 382 +++- .../tests/test_createchannel.py | 16 +- .../contentcuration/tests/test_decorators.py | 4 +- .../tests/test_exportchannel.py | 417 ++-- .../tests/test_format_preset_model.py | 2 - .../contentcuration/tests/test_forms.py | 67 +- .../contentcuration/tests/test_gcs_storage.py | 36 +- .../contentcuration/tests/test_models.py | 425 ++-- .../contentcuration/tests/test_parser.py | 12 +- ..._rectify_source_field_migraiton_command.py | 90 +- .../tests/test_restore_channel.py | 169 +- .../tests/test_secrettoken_model.py | 1 - .../contentcuration/tests/test_serializers.py | 176 +- .../contentcuration/tests/test_setlanguage.py | 60 +- .../contentcuration/tests/test_settings.py | 7 +- .../tests/test_storage_common.py | 17 +- .../contentcuration/tests/test_sync.py | 122 +- .../contentcuration/tests/test_utils.py | 59 +- .../tests/test_zipcontentview.py | 38 +- .../contentcuration/tests/testdata.py | 269 +-- .../contentcuration/tests/utils/__init__.py | 11 +- .../tests/utils/celery/test_tasks.py | 4 +- .../tests/utils/migration_test_case.py | 9 +- .../contentcuration/tests/utils/test_cache.py | 34 +- .../tests/utils/test_garbage_collect.py | 69 +- .../contentcuration/tests/utils/test_nodes.py | 23 +- .../tests/utils/test_recommendations.py | 475 ++-- .../contentcuration/tests/views/test_nodes.py | 56 +- .../tests/views/test_settings.py | 17 +- .../contentcuration/tests/views/test_users.py | 54 +- .../tests/views/test_views_base.py | 14 +- .../tests/views/test_views_internal.py | 92 +- .../contentcuration/tests/viewsets/base.py | 51 +- .../tests/viewsets/test_assessmentitem.py | 232 +- .../tests/viewsets/test_bookmark.py | 9 +- .../tests/viewsets/test_channel.py | 467 ++-- .../tests/viewsets/test_channelset.py | 56 +- .../tests/viewsets/test_clipboard.py | 30 +- .../tests/viewsets/test_contentnode.py | 719 ++++-- .../tests/viewsets/test_file.py | 255 ++- .../tests/viewsets/test_flagged.py | 85 +- .../tests/viewsets/test_invitation.py | 147 +- .../tests/viewsets/test_recommendations.py | 271 ++- .../tests/viewsets/test_user.py | 54 +- contentcuration/contentcuration/urls.py | 321 ++- .../utils/automation_manager.py | 8 +- .../contentcuration/utils/cache.py | 9 +- .../contentcuration/utils/celery/app.py | 4 +- .../contentcuration/utils/celery/tasks.py | 61 +- .../contentcuration/utils/cloud_storage.py | 3 +- .../contentcuration/utils/csv_writer.py | 144 +- .../contentcuration/utils/db_tools.py | 95 +- .../contentcuration/utils/files.py | 81 +- .../contentcuration/utils/garbage_collect.py | 65 +- .../contentcuration/utils/gcs_storage.py | 18 +- .../contentcuration/utils/import_tools.py | 435 ++-- .../contentcuration/utils/incidents.py | 97 +- .../contentcuration/utils/nodes.py | 246 ++- .../contentcuration/utils/pagination.py | 33 +- .../contentcuration/utils/parser.py | 107 +- .../contentcuration/utils/publish.py | 769 +++++-- .../contentcuration/utils/recommendations.py | 209 +- .../contentcuration/utils/secretmanagement.py | 20 +- .../contentcuration/utils/sentry.py | 4 +- .../contentcuration/utils/storage_common.py | 29 +- contentcuration/contentcuration/utils/sync.py | 7 +- .../contentcuration/utils/transcription.py | 8 +- contentcuration/contentcuration/utils/user.py | 6 +- .../contentcuration/views/admin.py | 24 +- contentcuration/contentcuration/views/base.py | 96 +- .../contentcuration/views/internal.py | 243 +- .../contentcuration/views/json_dump.py | 1 + .../contentcuration/views/nodes.py | 47 +- contentcuration/contentcuration/views/pwa.py | 1 + .../contentcuration/views/settings.py | 8 +- .../contentcuration/views/users.py | 63 +- contentcuration/contentcuration/views/zip.py | 72 +- .../viewsets/assessmentitem.py | 26 +- .../contentcuration/viewsets/base.py | 71 +- .../contentcuration/viewsets/bookmark.py | 8 +- .../contentcuration/viewsets/channel.py | 259 ++- .../contentcuration/viewsets/channelset.py | 16 +- .../contentcuration/viewsets/common.py | 17 +- .../contentcuration/viewsets/contentnode.py | 146 +- .../contentcuration/viewsets/feedback.py | 56 +- .../contentcuration/viewsets/file.py | 17 +- .../contentcuration/viewsets/invitation.py | 18 +- .../viewsets/recommendation.py | 28 +- .../contentcuration/viewsets/sync/base.py | 10 +- .../viewsets/sync/constants.py | 42 +- .../contentcuration/viewsets/sync/endpoint.py | 115 +- .../contentcuration/viewsets/sync/utils.py | 19 +- .../contentcuration/viewsets/user.py | 50 +- contentcuration/kolibri_content/__init__.py | 2 +- contentcuration/kolibri_content/apps.py | 4 +- .../kolibri_content/base_models.py | 27 +- contentcuration/kolibri_content/fields.py | 1 + .../migrations/0001_initial.py | 8 +- .../0003_contentnode_coach_content.py | 6 +- .../0008_channelmetadata_tagline.py | 6 +- .../migrations/0009_contentnode_options.py | 6 +- .../migrations/0010_auto_20210202_0604.py | 56 +- .../migrations/0011_auto_20210504_1744.py | 38 +- .../migrations/0012_auto_20210511_1605.py | 40 +- .../migrations/0013_auto_20210519_1759.py | 51 +- .../migrations/0014_auto_20210603_1536.py | 20 +- .../migrations/0015_auto_20210707_1606.py | 118 +- .../migrations/0016_contentnode_duration.py | 6 +- .../migrations/0018_auto_20220224_2031.py | 26 +- .../migrations/0019_auto_20230207_0116.py | 58 +- .../migrations/0020_alter_file_preset.py | 43 +- .../migrations/0021_auto_20240612_1847.py | 106 +- .../migrations/0022_auto_20240915_1414.py | 66 +- contentcuration/kolibri_public/apps.py | 4 +- .../kolibri_public/import_metadata_view.py | 36 +- .../export_channels_to_kolibri_public.py | 61 +- ...ify_incorrect_contentnode_source_fields.py | 26 +- .../migrations/0002_mptttreeidmanager.py | 14 +- .../migrations/0004_auto_20240612_1847.py | 77 +- .../0005_alter_localfile_extension.py | 36 +- contentcuration/kolibri_public/search.py | 12 +- contentcuration/kolibri_public/stopwords.py | 4 +- contentcuration/kolibri_public/tests/base.py | 14 +- .../kolibri_public/tests/test_content_app.py | 24 +- .../tests/test_importmetadata_api.py | 20 +- .../kolibri_public/tests/test_mapper.py | 68 +- .../tests/test_public_v1_api.py | 4 +- contentcuration/kolibri_public/urls.py | 22 +- .../kolibri_public/utils/annotation.py | 13 +- .../kolibri_public/utils/mapper.py | 94 +- contentcuration/kolibri_public/views.py | 21 +- contentcuration/kolibri_public/views_v1.py | 79 +- contentcuration/search/apps.py | 2 +- contentcuration/search/constants.py | 32 +- .../commands/set_channel_tsvectors.py | 33 +- .../commands/set_contentnode_tsvectors.py | 81 +- .../search/migrations/0001_initial.py | 32 +- .../migrations/0002_auto_20201215_2110.py | 6 +- .../search/migrations/0003_fulltextsearch.py | 94 +- contentcuration/search/models.py | 22 +- .../search/tests/test_savesearch.py | 27 +- contentcuration/search/tests/test_search.py | 39 +- contentcuration/search/urls.py | 6 +- contentcuration/search/utils.py | 8 +- .../search/viewsets/contentnode.py | 30 +- deploy/generatejsconstantfiles.py | 35 +- deploy/mime.types | 2 +- deploy/probers/base.py | 37 +- deploy/probers/channel_creation_probe.py | 23 +- deploy/probers/channel_edit_page_probe.py | 4 +- deploy/probers/channel_update_probe.py | 16 +- deploy/probers/postgres_probe.py | 18 +- deploy/probers/postmark_api_probe.py | 23 +- deploy/probers/publishing_status_probe.py | 21 +- deploy/probers/task_queue_probe.py | 8 +- deploy/probers/topic_creation_probe.py | 23 +- deploy/probers/unapplied_changes_probe.py | 10 +- docker/entrypoint.py | 11 +- .../apply-inheritable-metadata.feature | 2 +- .../quick-edit-single-resource.feature | 2 +- k8s/create-cloudsql-proxy.sh | 2 +- 291 files changed, 13690 insertions(+), 5847 deletions(-) diff --git a/.dockerignore b/.dockerignore index f5772ce6bb..233f6836f8 100644 --- a/.dockerignore +++ b/.dockerignore @@ -6,4 +6,4 @@ __pycache__ *.pyc *.swp k8s/templates/ -cloudbuild-*.yaml \ No newline at end of file +cloudbuild-*.yaml diff --git a/.github/workflows/community-contribution-labeling.yml b/.github/workflows/community-contribution-labeling.yml index f91d36a7cd..701465ba1c 100644 --- a/.github/workflows/community-contribution-labeling.yml +++ b/.github/workflows/community-contribution-labeling.yml @@ -9,4 +9,4 @@ jobs: uses: learningequality/.github/.github/workflows/community-contribution-label.yml@main secrets: LE_BOT_APP_ID: ${{ secrets.LE_BOT_APP_ID }} - LE_BOT_PRIVATE_KEY: ${{ secrets.LE_BOT_PRIVATE_KEY }} \ No newline at end of file + LE_BOT_PRIVATE_KEY: ${{ secrets.LE_BOT_PRIVATE_KEY }} diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index a3ac3a4b21..8658181f97 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -41,16 +41,7 @@ jobs: uses: actions/setup-node@v4 with: node-version: '18.x' - - name: Get pnpm store path - id: pnpm-store-path - run: echo "dir=$(pnpm store path)" >> $GITHUB_OUTPUT - - name: Cache Node.js modules - uses: actions/cache@v4 - with: - path: ${{ steps.pnpm-store-path.outputs.dir }} - key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }} - restore-keys: | - ${{ runner.os }}-pnpm- + cache: 'pnpm' - name: Install dependencies run: | pnpm install --frozen-lockfile diff --git a/LICENSE b/LICENSE index b10a6fd5cb..c0688ab8ea 100644 --- a/LICENSE +++ b/LICENSE @@ -19,4 +19,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/contentcuration/automation/admin.py b/contentcuration/automation/admin.py index 4185d360e9..5d28852b15 100644 --- a/contentcuration/automation/admin.py +++ b/contentcuration/automation/admin.py @@ -1,3 +1,2 @@ # from django.contrib import admin - # Register your models here. diff --git a/contentcuration/automation/apps.py b/contentcuration/automation/apps.py index eaa1d3d4e1..0fbbd020ac 100644 --- a/contentcuration/automation/apps.py +++ b/contentcuration/automation/apps.py @@ -2,5 +2,5 @@ class AutomationConfig(AppConfig): - default_auto_field = 'django.db.models.BigAutoField' - name = 'automation' + default_auto_field = "django.db.models.BigAutoField" + name = "automation" diff --git a/contentcuration/automation/migrations/0001_initial.py b/contentcuration/automation/migrations/0001_initial.py index 27563d71b7..6b62bc0ae7 100644 --- a/contentcuration/automation/migrations/0001_initial.py +++ b/contentcuration/automation/migrations/0001_initial.py @@ -11,43 +11,59 @@ class Migration(migrations.Migration): initial = True dependencies = [ - ('kolibri_public', '0005_alter_localfile_extension'), + ("kolibri_public", "0005_alter_localfile_extension"), ] operations = [ migrations.CreateModel( - name='RecommendationsCache', + name="RecommendationsCache", fields=[ - ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), - ('request_hash', models.CharField(max_length=32, null=True)), - ('topic_id', models.UUIDField()), - ('rank', models.IntegerField(default=0, null=True)), - ('override_threshold', models.BooleanField(default=False)), - ('timestamp', models.DateTimeField(auto_now_add=True)), - ('channel', models.ForeignKey( - blank=True, - null=True, - on_delete=django.db.models.deletion.CASCADE, - related_name='channel_recommendations', - to='contentcuration.channel')), - ('contentnode', models.ForeignKey( - blank=True, - null=True, - on_delete=django.db.models.deletion.CASCADE, - related_name='contentnode_recommendations', - to='kolibri_public.contentnode')), + ( + "id", + models.UUIDField( + default=uuid.uuid4, + editable=False, + primary_key=True, + serialize=False, + ), + ), + ("request_hash", models.CharField(max_length=32, null=True)), + ("topic_id", models.UUIDField()), + ("rank", models.IntegerField(default=0, null=True)), + ("override_threshold", models.BooleanField(default=False)), + ("timestamp", models.DateTimeField(auto_now_add=True)), + ( + "channel", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_recommendations", + to="contentcuration.channel", + ), + ), + ( + "contentnode", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="contentnode_recommendations", + to="kolibri_public.contentnode", + ), + ), ], ), migrations.AddIndex( - model_name='recommendationscache', - index=models.Index(fields=['request_hash'], name='request_hash_idx'), + model_name="recommendationscache", + index=models.Index(fields=["request_hash"], name="request_hash_idx"), ), migrations.AddIndex( - model_name='recommendationscache', - index=models.Index(fields=['contentnode'], name='contentnode_idx'), + model_name="recommendationscache", + index=models.Index(fields=["contentnode"], name="contentnode_idx"), ), migrations.AlterUniqueTogether( - name='recommendationscache', - unique_together={('request_hash', 'contentnode')}, + name="recommendationscache", + unique_together={("request_hash", "contentnode")}, ), ] diff --git a/contentcuration/automation/models.py b/contentcuration/automation/models.py index 48d52134e6..5739145914 100644 --- a/contentcuration/automation/models.py +++ b/contentcuration/automation/models.py @@ -18,14 +18,14 @@ class RecommendationsCache(models.Model): ContentNode, null=True, blank=True, - related_name='contentnode_recommendations', + related_name="contentnode_recommendations", on_delete=models.CASCADE, ) channel = models.ForeignKey( Channel, null=True, blank=True, - related_name='channel_recommendations', + related_name="channel_recommendations", on_delete=models.CASCADE, ) rank = models.IntegerField(default=0, null=True) @@ -33,8 +33,8 @@ class RecommendationsCache(models.Model): timestamp = models.DateTimeField(auto_now_add=True) class Meta: - unique_together = ('request_hash', 'contentnode') + unique_together = ("request_hash", "contentnode") indexes = [ - models.Index(fields=['request_hash'], name=REQUEST_HASH_INDEX_NAME), - models.Index(fields=['contentnode'], name=CONTENTNODE_INDEX_NAME), + models.Index(fields=["request_hash"], name=REQUEST_HASH_INDEX_NAME), + models.Index(fields=["contentnode"], name=CONTENTNODE_INDEX_NAME), ] diff --git a/contentcuration/automation/tests.py b/contentcuration/automation/tests.py index a79ca8be56..601fc8616b 100644 --- a/contentcuration/automation/tests.py +++ b/contentcuration/automation/tests.py @@ -1,3 +1,2 @@ # from django.test import TestCase - # Create your tests here. diff --git a/contentcuration/automation/tests/appnexus/test_base.py b/contentcuration/automation/tests/appnexus/test_base.py index 3fa3fa8e74..e29358e45c 100644 --- a/contentcuration/automation/tests/appnexus/test_base.py +++ b/contentcuration/automation/tests/appnexus/test_base.py @@ -13,29 +13,33 @@ def test_session_with_max_connection_age_request(): - with patch.object(requests.Session, 'request') as mock_request: + with patch.object(requests.Session, "request") as mock_request: session = SessionWithMaxConnectionAge() - session.request('GET', 'https://example.com') + session.request("GET", "https://example.com") assert mock_request.call_count == 1 def test_session_with_max_connection_age_not_closing_connections(): - with patch.object(requests.Session, 'close') as mock_close, patch.object(requests.Session, 'request') as mock_request: + with patch.object(requests.Session, "close") as mock_close, patch.object( + requests.Session, "request" + ) as mock_request: session = SessionWithMaxConnectionAge(60) - session.request('GET', 'https://example.com') + session.request("GET", "https://example.com") time.sleep(0.1) - session.request('GET', 'https://example.com') + session.request("GET", "https://example.com") assert mock_close.call_count == 0 assert mock_request.call_count == 2 def test_session_with_max_connection_age_closing_connections(): - with patch.object(requests.Session, 'close') as mock_close, patch.object(requests.Session, 'request') as mock_request: + with patch.object(requests.Session, "close") as mock_close, patch.object( + requests.Session, "request" + ) as mock_request: session = SessionWithMaxConnectionAge(1) - session.request('GET', 'https://example.com') + session.request("GET", "https://example.com") time.sleep(2) - session.request('GET', 'https://example.com') + session.request("GET", "https://example.com") assert mock_close.call_count == 1 assert mock_request.call_count == 2 diff --git a/contentcuration/automation/tests/test_recommendations_cache_model.py b/contentcuration/automation/tests/test_recommendations_cache_model.py index 01024c3ee0..535e45c21e 100644 --- a/contentcuration/automation/tests/test_recommendations_cache_model.py +++ b/contentcuration/automation/tests/test_recommendations_cache_model.py @@ -9,32 +9,31 @@ class TestRecommendationsCache(StudioTestCase): - def setUp(self): self.topic_id = uuid.uuid4() self.content_node = ContentNode.objects.create( id=uuid.uuid4(), - title='Test Content Node', + title="Test Content Node", content_id=uuid.uuid4(), channel_id=uuid.uuid4(), ) self.channel = Channel.objects.create( id=uuid.uuid4(), - name='Test Channel', + name="Test Channel", actor_id=1, ) self.cache = RecommendationsCache.objects.create( - request_hash='test_hash', + request_hash="test_hash", topic_id=self.topic_id, contentnode=self.content_node, channel=self.channel, rank=1, - override_threshold=False + override_threshold=False, ) def test_cache_creation(self): self.assertIsInstance(self.cache, RecommendationsCache) - self.assertEqual(self.cache.request_hash, 'test_hash') + self.assertEqual(self.cache.request_hash, "test_hash") self.assertEqual(self.cache.topic_id, self.topic_id) self.assertEqual(self.cache.contentnode, self.content_node) self.assertEqual(self.cache.channel, self.channel) @@ -42,26 +41,25 @@ def test_cache_creation(self): self.assertFalse(self.cache.override_threshold) def test_cache_retrieval(self): - retrieved_cache = RecommendationsCache.objects.get(request_hash='test_hash') + retrieved_cache = RecommendationsCache.objects.get(request_hash="test_hash") self.assertEqual(retrieved_cache, self.cache) def test_cache_uniqueness(self): with self.assertRaises(IntegrityError): RecommendationsCache.objects.create( - request_hash='test_hash', + request_hash="test_hash", topic_id=self.topic_id, contentnode=self.content_node, channel=self.channel, rank=2, - override_threshold=True + override_threshold=True, ) def test_bulk_create_ignore_conflicts_true(self): initial_count = RecommendationsCache.objects.count() try: RecommendationsCache.objects.bulk_create( - [self.cache, self.cache], - ignore_conflicts=True + [self.cache, self.cache], ignore_conflicts=True ) except IntegrityError: self.fail("bulk_create raised IntegrityError unexpectedly!") @@ -72,6 +70,5 @@ def test_bulk_create_ignore_conflicts_true(self): def test_bulk_create_ignore_conflicts_false(self): with self.assertRaises(IntegrityError): RecommendationsCache.objects.bulk_create( - [self.cache, self.cache], - ignore_conflicts=False + [self.cache, self.cache], ignore_conflicts=False ) diff --git a/contentcuration/automation/utils/appnexus/base.py b/contentcuration/automation/utils/appnexus/base.py index 616c057c93..c242593feb 100644 --- a/contentcuration/automation/utils/appnexus/base.py +++ b/contentcuration/automation/utils/appnexus/base.py @@ -12,9 +12,10 @@ class SessionWithMaxConnectionAge(requests.Session): """ - Session with a maximum connection age. If the connection is older than the specified age, it will be closed and a new one will be created. - The age is specified in seconds. + Session with a maximum connection age. If the connection is older than the specified age, it will be closed and a new one will be created. + The age is specified in seconds. """ + def __init__(self, age=100): super().__init__() self.age = age @@ -32,6 +33,7 @@ def request(self, *args, **kwargs): class BackendRequest(object): """ Class that holds the request information for the backend """ + def __init__( self, method, @@ -41,7 +43,7 @@ def __init__( json=None, headers=None, timeout=(5, 100), - **kwargs + **kwargs, ): self.method = method self.path = path @@ -56,6 +58,7 @@ def __init__( class BackendResponse(object): """ Class that should be inherited by specific backend for its responses""" + def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) @@ -63,6 +66,7 @@ def __init__(self, **kwargs): class Backend(ABC): """ An abstract base class for backend interfaces that also implements the singleton pattern """ + _instance = None session = None base_url = None diff --git a/contentcuration/automation/views.py b/contentcuration/automation/views.py index fd0e044955..3e6a05d4a3 100644 --- a/contentcuration/automation/views.py +++ b/contentcuration/automation/views.py @@ -1,3 +1,2 @@ # from django.shortcuts import render - # Create your views here. diff --git a/contentcuration/contentcuration/admin.py b/contentcuration/contentcuration/admin.py index 37b76372c9..d64457522d 100644 --- a/contentcuration/contentcuration/admin.py +++ b/contentcuration/contentcuration/admin.py @@ -9,8 +9,13 @@ class UserAdmin(admin.ModelAdmin): - list_display = ('first_name', 'last_name', 'email', 'date_joined',) - date_hierarchy = 'date_joined' + list_display = ( + "first_name", + "last_name", + "email", + "date_joined", + ) + date_hierarchy = "date_joined" admin.site.register(User, UserAdmin) diff --git a/contentcuration/contentcuration/api.py b/contentcuration/contentcuration/api.py index b297ffaba6..77b8f7b054 100644 --- a/contentcuration/contentcuration/api.py +++ b/contentcuration/contentcuration/api.py @@ -25,7 +25,9 @@ def write_file_to_storage(fobj, check_valid=False, name=None): fobj.seek(0) if check_valid and hashed_filename != filename: - raise SuspiciousOperation("Failed to upload file {0}: hash is invalid".format(name)) + raise SuspiciousOperation( + "Failed to upload file {0}: hash is invalid".format(name) + ) # Get location of file file_path = models.generate_object_storage_name(hashed_filename, full_filename) @@ -33,7 +35,11 @@ def write_file_to_storage(fobj, check_valid=False, name=None): # Write file storage = default_storage if storage.exists(file_path): - logging.info("{} exists in Google Cloud Storage, so it's not saved again.".format(file_path)) + logging.info( + "{} exists in Google Cloud Storage, so it's not saved again.".format( + file_path + ) + ) else: storage.save(file_path, fobj) return full_filename @@ -52,7 +58,11 @@ def write_raw_content_to_storage(contents, ext=None): # Write file storage = default_storage if storage.exists(file_path): - logging.info("{} exists in Google Cloud Storage, so it's not saved again.".format(file_path)) + logging.info( + "{} exists in Google Cloud Storage, so it's not saved again.".format( + file_path + ) + ) else: storage.save(file_path, BytesIO(contents)) diff --git a/contentcuration/contentcuration/apps.py b/contentcuration/contentcuration/apps.py index 6f344aa63d..06d62d9561 100644 --- a/contentcuration/contentcuration/apps.py +++ b/contentcuration/contentcuration/apps.py @@ -2,7 +2,7 @@ class ContentConfig(AppConfig): - name = 'contentcuration' + name = "contentcuration" def ready(self): # Import signals diff --git a/contentcuration/contentcuration/collectstatic_settings.py b/contentcuration/contentcuration/collectstatic_settings.py index dae329577e..038f204069 100644 --- a/contentcuration/contentcuration/collectstatic_settings.py +++ b/contentcuration/contentcuration/collectstatic_settings.py @@ -1,6 +1,6 @@ -# Settings used by containers running collectstatic. Scope our services +# Settings used by containers running collectstatic. Scope our services # to the only ones needed to run collectstatic. - +# flake8: noqa: F403, F405 from .settings import * -CACHES['default']['BACKEND'] = "django_prometheus.cache.backends.locmem.LocMemCache" +CACHES["default"]["BACKEND"] = "django_prometheus.cache.backends.locmem.LocMemCache" diff --git a/contentcuration/contentcuration/constants/completion_criteria.py b/contentcuration/contentcuration/constants/completion_criteria.py index ffed5d7821..1a8c101e38 100644 --- a/contentcuration/contentcuration/constants/completion_criteria.py +++ b/contentcuration/contentcuration/constants/completion_criteria.py @@ -13,7 +13,9 @@ def _build_validator(): """ cls = validator_for(completion_criteria.SCHEMA) validator = cls(completion_criteria.SCHEMA) - validator.resolver.store.update(RefResolver.from_schema(mastery_criteria.SCHEMA).store) + validator.resolver.store.update( + RefResolver.from_schema(mastery_criteria.SCHEMA).store + ) return validator @@ -86,10 +88,16 @@ def validate(data, kind=None): elif error.absolute_path: # if there's a path to a field, we can give a specific error json_path = ".".join(error.absolute_path) - error_descriptions.append(ValidationError("{} {}".format(json_path, error.message))) + error_descriptions.append( + ValidationError("{} {}".format(json_path, error.message)) + ) else: # without a path, likely top-level validation error, e.g. `anyOf` conditions - error_descriptions.append(ValidationError("object doesn't satisfy '{}' conditions".format(error.validator))) + error_descriptions.append( + ValidationError( + "object doesn't satisfy '{}' conditions".format(error.validator) + ) + ) if error_descriptions: e = ValidationError("Completion criteria doesn't conform to schema") diff --git a/contentcuration/contentcuration/constants/feature_flags.py b/contentcuration/contentcuration/constants/feature_flags.py index 3011ad3385..ae04284f20 100644 --- a/contentcuration/contentcuration/constants/feature_flags.py +++ b/contentcuration/contentcuration/constants/feature_flags.py @@ -9,7 +9,9 @@ def _schema(): """ Loads JSON schema file """ - file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../static/feature_flags.json') + file = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "../static/feature_flags.json" + ) with open(file) as f: data = json.load(f) return data diff --git a/contentcuration/contentcuration/constants/feedback.py b/contentcuration/contentcuration/constants/feedback.py index 178c4a99ab..be92c2f99e 100644 --- a/contentcuration/contentcuration/constants/feedback.py +++ b/contentcuration/contentcuration/constants/feedback.py @@ -1,8 +1,8 @@ FEEDBACK_TYPE_CHOICES = ( - ('IMPORTED', 'Imported'), - ('REJECTED', 'Rejected'), - ('PREVIEWED', 'Previewed'), - ('SHOWMORE', 'Show More'), - ('IGNORED', 'Ignored'), - ('FLAGGED', 'Flagged'), + ("IMPORTED", "Imported"), + ("REJECTED", "Rejected"), + ("PREVIEWED", "Previewed"), + ("SHOWMORE", "Show More"), + ("IGNORED", "Ignored"), + ("FLAGGED", "Flagged"), ) diff --git a/contentcuration/contentcuration/db/advisory_lock.py b/contentcuration/contentcuration/db/advisory_lock.py index f1d71995ed..52400659c4 100644 --- a/contentcuration/contentcuration/db/advisory_lock.py +++ b/contentcuration/contentcuration/db/advisory_lock.py @@ -8,8 +8,8 @@ # signed limits are 2**32 or 2**64, so one less power of 2 # to become unsigned limits (half above 0, half below 0) -INT_32BIT = 2**31 -INT_64BIT = 2**63 +INT_32BIT = 2 ** 31 +INT_64BIT = 2 ** 63 class AdvisoryLockBusy(RuntimeError): @@ -64,7 +64,7 @@ def execute_lock(key1, key2=None, unlock=False, session=False, shared=False, wai xact_="" if session else "xact_", lock="unlock" if unlock else "lock", _shared="_shared" if shared else "", - keys=", ".join(["%s" for i in range(0, 2 if key2 is not None else 1)]) + keys=", ".join(["%s" for i in range(0, 2 if key2 is not None else 1)]), ) log_query = f"'{query}' with params {keys}" diff --git a/contentcuration/contentcuration/db/models/expressions.py b/contentcuration/contentcuration/db/models/expressions.py index 3daec06977..5ea7afbb14 100644 --- a/contentcuration/contentcuration/db/models/expressions.py +++ b/contentcuration/contentcuration/db/models/expressions.py @@ -15,8 +15,11 @@ class WhenQ(Q): Example: queryset.annotate(some_thing=Case(When(condition=QExpression(BoolExpr(...)), then=...))) """ + def resolve_expression(self, *args, **kwargs): - return WhereNode([child.resolve_expression(*args, **kwargs) for child in self.children]) + return WhereNode( + [child.resolve_expression(*args, **kwargs) for child in self.children] + ) class BooleanComparison(CombinedExpression): @@ -27,6 +30,7 @@ class BooleanComparison(CombinedExpression): Example: BooleanExpression(F('x'), '<=', Value(123)) """ + output_field = BooleanField() @@ -39,8 +43,9 @@ class IsNull(BooleanComparison): IsNull('my_field_name') -> my_field_name IS NULL IsNull('my_field_name', negate=True) -> my_field_name IS NOT NULL """ + def __init__(self, field_name, negate=False): - operator = 'IS NOT' if negate else 'IS' + operator = "IS NOT" if negate else "IS" super(IsNull, self).__init__(F(field_name), operator, Value(None)) @@ -55,7 +60,8 @@ class Array(Func): F("other_table__field") ) """ + function = "ARRAY" - template = '%(function)s[%(expressions)s]' - arg_joiner = ', ' + template = "%(function)s[%(expressions)s]" + arg_joiner = ", " arity = None diff --git a/contentcuration/contentcuration/db/models/functions.py b/contentcuration/contentcuration/db/models/functions.py index 0cea0b62c2..9c7e360266 100644 --- a/contentcuration/contentcuration/db/models/functions.py +++ b/contentcuration/contentcuration/db/models/functions.py @@ -19,6 +19,7 @@ class Unnest(Func): 2 | b ... """ + function = "UNNEST" arity = 1 @@ -31,6 +32,7 @@ class ArrayRemove(Func): ArrayRemove(Array(1, 2, 3, None), None) => Array[1, 2, 3] """ + function = "ARRAY_REMOVE" arity = 2 @@ -51,5 +53,6 @@ class JSONObjectKeys(Func): other_key ... """ + function = "JSONB_OBJECT_KEYS" arity = 1 diff --git a/contentcuration/contentcuration/db/models/manager.py b/contentcuration/contentcuration/db/models/manager.py index db1e3a77bf..5121a9e99f 100644 --- a/contentcuration/contentcuration/db/models/manager.py +++ b/contentcuration/contentcuration/db/models/manager.py @@ -67,13 +67,15 @@ def log_lock_time_spent(timespent): "suggested_duration", } -EDIT_ALLOWED_OVERRIDES = ALLOWED_OVERRIDES.union({ - "license_id", - "license_description", - "extra_fields", - "copyright_holder", - "author", -}) +EDIT_ALLOWED_OVERRIDES = ALLOWED_OVERRIDES.union( + { + "license_id", + "license_description", + "extra_fields", + "copyright_holder", + "author", + } +) class CustomContentNodeTreeManager(TreeManager.from_queryset(CustomTreeQuerySet)): @@ -114,14 +116,16 @@ def _attempt_lock(self, tree_ids, shared_tree_ids=None): # This will mean that every process acquires locks in the same order # and should help to minimize deadlocks for tree_id in tree_ids: - advisory_lock(TREE_LOCK, key2=tree_id, shared=tree_id in shared_tree_ids) + advisory_lock( + TREE_LOCK, key2=tree_id, shared=tree_id in shared_tree_ids + ) yield log_lock_time_spent(time.time() - start) @contextlib.contextmanager def lock_mptt(self, *tree_ids, **kwargs): tree_ids = sorted((t for t in set(tree_ids) if t is not None)) - shared_tree_ids = kwargs.pop('shared_tree_ids', []) + shared_tree_ids = kwargs.pop("shared_tree_ids", []) # If this is not inside the context of a delay context manager # or updates are not disabled set a lock on the tree_ids. if ( @@ -229,14 +233,17 @@ def move_node(self, node, target, position="last-child"): self._move_node(node, target, position=position) node.save(skip_lock=True) node_moved.send( - sender=node.__class__, instance=node, target=target, position=position, + sender=node.__class__, + instance=node, + target=target, + position=position, ) # when moving to a new tree, like trash, we'll blanket reset the modified for the # new root and the old root nodes if old_parent.tree_id != target.tree_id: for size_cache in [ ResourceSizeCache(target.get_root()), - ResourceSizeCache(old_parent.get_root()) + ResourceSizeCache(old_parent.get_root()), ]: size_cache.reset_modified(None) @@ -291,7 +298,9 @@ def _clone_node( copy.update(self.get_source_attributes(source)) if isinstance(mods, dict): - allowed_keys = EDIT_ALLOWED_OVERRIDES if can_edit_source_channel else ALLOWED_OVERRIDES + allowed_keys = ( + EDIT_ALLOWED_OVERRIDES if can_edit_source_channel else ALLOWED_OVERRIDES + ) for key, value in mods.items(): if key in copy and key in allowed_keys: copy[key] = value @@ -324,7 +333,12 @@ def _recurse_to_create_tree( mods, ): copy = self._clone_node( - source, parent_id, source_channel_id, can_edit_source_channel, pk, mods, + source, + parent_id, + source_channel_id, + can_edit_source_channel, + pk, + mods, ) if source.kind_id == content_kinds.TOPIC and source.id in nodes_by_parent: @@ -367,7 +381,7 @@ def copy_node( excluded_descendants=None, can_edit_source_channel=None, batch_size=None, - progress_tracker=None + progress_tracker=None, ): """ :type progress_tracker: contentcuration.utils.celery.ProgressTracker|None @@ -503,7 +517,9 @@ def _copy_tags(self, source_copy_id_map): # In the case that we are copying a node that is in the weird state of having a tag # that is duplicated (with a channel tag and a null channel tag) this can cause an error # so we ignore conflicts here to ignore the duplicate tags. - self.model.tags.through.objects.bulk_create(mappings_to_create, ignore_conflicts=True) + self.model.tags.through.objects.bulk_create( + mappings_to_create, ignore_conflicts=True + ) def _copy_assessment_items(self, source_copy_id_map): from contentcuration.models import File @@ -577,7 +593,12 @@ def _shallow_copy( can_edit_source_channel, ): data = self._clone_node( - node, None, source_channel_id, can_edit_source_channel, pk, mods, + node, + None, + source_channel_id, + can_edit_source_channel, + pk, + mods, ) with self.lock_mptt(target.tree_id if target else None): node_copy = self.model(**data) diff --git a/contentcuration/contentcuration/db/models/query.py b/contentcuration/contentcuration/db/models/query.py index 3cd57093dc..0c57d5a6cc 100644 --- a/contentcuration/contentcuration/db/models/query.py +++ b/contentcuration/contentcuration/db/models/query.py @@ -8,7 +8,7 @@ from mptt.querysets import TreeQuerySet -RIGHT_JOIN = 'RIGHT JOIN' +RIGHT_JOIN = "RIGHT JOIN" class CustomTreeQuerySet(TreeQuerySet, CTEQuerySet): @@ -19,11 +19,12 @@ class With(CTEWith): """ Custom CTE class which allows more join types than just INNER and LOUTER (LEFT) """ + def join(self, model_or_queryset, *filter_q, **filter_kw): """ Slight hack to allow more join types """ - join_type = filter_kw.get('_join_type', INNER) + join_type = filter_kw.get("_join_type", INNER) queryset = super(With, self).join(model_or_queryset, *filter_q, **filter_kw) # the underlying Django code forces the join type into INNER or a LEFT OUTER join @@ -40,6 +41,7 @@ class WithValues(With): @see https://www.postgresql.org/docs/9.6/queries-values.html """ + def __init__(self, fields, values_list, name="cte"): super(WithValues, self).__init__(None, name=name) self.query = WithValuesQuery(self) @@ -59,7 +61,9 @@ def _resolve_ref(self, name): class WithValuesSQLCompiler(SQLCompiler): - TEMPLATE = "SELECT * FROM (VALUES {values_statement}) AS {cte_name}({fields_statement})" + TEMPLATE = ( + "SELECT * FROM (VALUES {values_statement}) AS {cte_name}({fields_statement})" + ) def as_sql(self, with_limits=True, with_col_aliases=False): """ @@ -71,12 +75,16 @@ def as_sql(self, with_limits=True, with_col_aliases=False): :return: A tuple of SQL and parameters """ value_parameters = ", ".join(["%s"] * len(self.cte.fields)) - values_statement = ", ".join(["({})".format(value_parameters)] * len(self.cte.values_list)) - fields_statement = ", ".join([self.connection.ops.quote_name(field) for field in list(self.cte.fields)]) + values_statement = ", ".join( + ["({})".format(value_parameters)] * len(self.cte.values_list) + ) + fields_statement = ", ".join( + [self.connection.ops.quote_name(field) for field in list(self.cte.fields)] + ) sql = self.TEMPLATE.format( values_statement=values_statement, cte_name="_{}".format(self.cte.name), - fields_statement=fields_statement + fields_statement=fields_statement, ) return sql, list(sum(self.cte.values_list, ())) @@ -95,6 +103,7 @@ class WithValuesQuery(Query): Note: this does inherit from Query, which we're not passing a Model instance so not all Query functionality is intended to work """ + def __init__(self, cte): super(WithValuesQuery, self).__init__(None) self.cte = cte diff --git a/contentcuration/contentcuration/decorators.py b/contentcuration/contentcuration/decorators.py index 9c51e83b7a..eb6fa4fea3 100644 --- a/contentcuration/contentcuration/decorators.py +++ b/contentcuration/contentcuration/decorators.py @@ -69,6 +69,7 @@ class DelayUserStorageCalculation(ContextDecorator): Decorator class that will dedupe and delay requests to enqueue storage calculation tasks for users until after the wrapped function has exited """ + depth = 0 queue = [] @@ -85,6 +86,7 @@ def __enter__(self): def __exit__(self, exc_type, exc_val, exc_tb): from contentcuration.utils.user import calculate_user_storage + self.depth -= 1 if not self.is_active: user_ids = set(self.queue) diff --git a/contentcuration/contentcuration/dev_urls.py b/contentcuration/contentcuration/dev_urls.py index 70ef17dc9c..003cac87a3 100644 --- a/contentcuration/contentcuration/dev_urls.py +++ b/contentcuration/contentcuration/dev_urls.py @@ -33,7 +33,7 @@ def file_server(request, storage_path=None): return HttpResponseNotFound() params = urllib.parse.urlparse(default_storage.url(storage_path)).query - host = request.META['HTTP_HOST'].split(":")[0] + host = request.META["HTTP_HOST"].split(":")[0] port = 9000 # hardcoded to the default minio IP address url = "http://{host}:{port}/{bucket}/{path}?{params}".format( host=host, @@ -58,7 +58,7 @@ def file_server(request, storage_path=None): urlpatterns = urlpatterns + [ re_path(r"^__open-in-editor/", webpack_redirect_view), - path('admin/', admin.site.urls), + path("admin/", admin.site.urls), re_path( r"^swagger(?P\.json|\.yaml)$", schema_view.without_ui(cache_timeout=0), diff --git a/contentcuration/contentcuration/forms.py b/contentcuration/contentcuration/forms.py index 8e9320d85f..2c761af080 100644 --- a/contentcuration/contentcuration/forms.py +++ b/contentcuration/contentcuration/forms.py @@ -13,14 +13,14 @@ from contentcuration.models import User -REGISTRATION_SALT = getattr(settings, 'REGISTRATION_SALT', 'registration') +REGISTRATION_SALT = getattr(settings, "REGISTRATION_SALT", "registration") # LOGIN/REGISTRATION FORMS ################################################################# class RegistrationForm(UserCreationForm): - CODE_ACCOUNT_ACTIVE = 'account_active' - CODE_ACCOUNT_INACTIVE = 'account_inactive' + CODE_ACCOUNT_ACTIVE = "account_active" + CODE_ACCOUNT_INACTIVE = "account_inactive" first_name = forms.CharField(required=True) last_name = forms.CharField(required=True) @@ -43,9 +43,13 @@ def clean_email(self): user_qs = User.objects.filter(email__iexact=email) if user_qs.exists(): if user_qs.filter(Q(is_active=True) | Q(deleted=True)).exists(): - raise ValidationError("Account already active", code=self.CODE_ACCOUNT_ACTIVE) + raise ValidationError( + "Account already active", code=self.CODE_ACCOUNT_ACTIVE + ) else: - raise ValidationError("Already registered.", code=self.CODE_ACCOUNT_INACTIVE) + raise ValidationError( + "Already registered.", code=self.CODE_ACCOUNT_INACTIVE + ) return email def save(self, commit=True): @@ -53,12 +57,12 @@ def save(self, commit=True): user.first_name = self.cleaned_data["first_name"] user.last_name = self.cleaned_data["last_name"] user.information = { - "uses": self.cleaned_data['uses'].split('|'), - "locations": self.cleaned_data['locations'].split('|'), - "space_needed": self.cleaned_data['storage'], - "heard_from": self.cleaned_data['source'], + "uses": self.cleaned_data["uses"].split("|"), + "locations": self.cleaned_data["locations"].split("|"), + "space_needed": self.cleaned_data["storage"], + "heard_from": self.cleaned_data["source"], } - user.policies = json.loads(self.cleaned_data['policies']) + user.policies = json.loads(self.cleaned_data["policies"]) if commit: user.save() @@ -67,7 +71,7 @@ def save(self, commit=True): class Meta: model = User - fields = ('first_name', 'last_name', 'email') + fields = ("first_name", "last_name", "email") class ForgotPasswordForm(PasswordResetForm): @@ -82,40 +86,57 @@ def save(self, request=None, extra_email_context=None, **kwargs): user = User.get_for_email(email) if user and user.is_active: - super(ForgotPasswordForm, self).save(request=request, extra_email_context=extra_email_context, **kwargs) + super(ForgotPasswordForm, self).save( + request=request, extra_email_context=extra_email_context, **kwargs + ) elif user: # For users who were invited but hadn't registered yet if not user.password: context = { - 'site': extra_email_context.get('site'), - 'user': user, - 'domain': extra_email_context.get('domain'), + "site": extra_email_context.get("site"), + "user": user, + "domain": extra_email_context.get("domain"), } - subject = render_to_string('registration/password_reset_subject.txt', context) - subject = ''.join(subject.splitlines()) - message = render_to_string('registration/registration_needed_email.txt', context) - user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL, ) + subject = render_to_string( + "registration/password_reset_subject.txt", context + ) + subject = "".join(subject.splitlines()) + message = render_to_string( + "registration/registration_needed_email.txt", context + ) + user.email_user( + subject, + message, + settings.DEFAULT_FROM_EMAIL, + ) else: activation_key = self.get_activation_key(user) context = { - 'activation_key': activation_key, - 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS, - 'site': extra_email_context.get('site'), - 'user': user, - 'domain': extra_email_context.get('domain'), + "activation_key": activation_key, + "expiration_days": settings.ACCOUNT_ACTIVATION_DAYS, + "site": extra_email_context.get("site"), + "user": user, + "domain": extra_email_context.get("domain"), } - subject = render_to_string('registration/password_reset_subject.txt', context) - subject = ''.join(subject.splitlines()) - message = render_to_string('registration/activation_needed_email.txt', context) - user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL, ) + subject = render_to_string( + "registration/password_reset_subject.txt", context + ) + subject = "".join(subject.splitlines()) + message = render_to_string( + "registration/activation_needed_email.txt", context + ) + user.email_user( + subject, + message, + settings.DEFAULT_FROM_EMAIL, + ) def get_activation_key(self, user): """ Generate the activation key which will be emailed to the user. """ return signing.dumps( - obj=getattr(user, user.USERNAME_FIELD), - salt=REGISTRATION_SALT + obj=getattr(user, user.USERNAME_FIELD), salt=REGISTRATION_SALT ) @@ -124,11 +145,11 @@ class PolicyAcceptForm(forms.Form): class Meta: model = User - fields = ('accepted', 'policy_names') + fields = ("accepted", "policy_names") def save(self, user): user.policies = user.policies or {} - user.policies.update(json.loads(self.cleaned_data['policy'])) + user.policies.update(json.loads(self.cleaned_data["policy"])) user.save() return user @@ -141,8 +162,8 @@ class UsernameChangeForm(UserChangeForm): class Meta: model = User - fields = ('first_name', 'last_name') - exclude = ('password', 'email') + fields = ("first_name", "last_name") + exclude = ("password", "email") def clean_password(self): return True @@ -179,8 +200,23 @@ class StorageRequestForm(forms.Form): message = forms.CharField(required=True) class Meta: - fields = ("storage", "kind", "resource_count", "resource_size", "creators", "sample_link", "license", "public", - "audience", "import_count", "location", "uploading_for", "organization_type", "time_constraint", "message") + fields = ( + "storage", + "kind", + "resource_count", + "resource_size", + "creators", + "sample_link", + "license", + "public", + "audience", + "import_count", + "location", + "uploading_for", + "organization_type", + "time_constraint", + "message", + ) class IssueReportForm(forms.Form): @@ -201,7 +237,7 @@ def __init__(self, user, *args, **kwargs): super(DeleteAccountForm, self).__init__(*args, **kwargs) def clean_email(self): - email = self.cleaned_data['email'].strip().lower() - if self.user.is_admin or self.user.email.lower() != self.cleaned_data['email']: + email = self.cleaned_data["email"].strip().lower() + if self.user.is_admin or self.user.email.lower() != self.cleaned_data["email"]: raise ValidationError("Not allowed") return email diff --git a/contentcuration/contentcuration/management/commands/count_public_resources.py b/contentcuration/contentcuration/management/commands/count_public_resources.py index 40b717b608..7c3d825f73 100644 --- a/contentcuration/contentcuration/management/commands/count_public_resources.py +++ b/contentcuration/contentcuration/management/commands/count_public_resources.py @@ -5,16 +5,19 @@ from contentcuration.models import Channel from contentcuration.models import ContentNode -logger = logging.getLogger('command') +logger = logging.getLogger("command") class Command(BaseCommand): - def handle(self, *args, **options): - public_tree_ids = Channel.objects.filter(public=True, deleted=False).values_list('main_tree__tree_id', flat=True) - count = ContentNode.objects.filter(tree_id__in=public_tree_ids) \ - .exclude(kind_id='topic') \ - .values('content_id', 'language_id') \ - .distinct() \ - .count() + public_tree_ids = Channel.objects.filter( + public=True, deleted=False + ).values_list("main_tree__tree_id", flat=True) + count = ( + ContentNode.objects.filter(tree_id__in=public_tree_ids) + .exclude(kind_id="topic") + .values("content_id", "language_id") + .distinct() + .count() + ) logger.info("{} unique resources".format(count)) diff --git a/contentcuration/contentcuration/management/commands/fix_duplicate_assessment_items.py b/contentcuration/contentcuration/management/commands/fix_duplicate_assessment_items.py index 96c86e3fa5..81d7809308 100644 --- a/contentcuration/contentcuration/management/commands/fix_duplicate_assessment_items.py +++ b/contentcuration/contentcuration/management/commands/fix_duplicate_assessment_items.py @@ -16,11 +16,16 @@ def handle(self, *args, **options): start = time.time() # Go through nodes that have assessment items with the same assessment_id logging.info("Looking for nodes with invalid assessments...") - nodes = ContentNode.objects.filter(kind_id='exercise') \ + nodes = ( + ContentNode.objects.filter(kind_id="exercise") .annotate( - num_ids=Count('assessment_items__pk'), - num_assessment_ids=Count('assessment_items__assessment_id', distinct=True) - ).exclude(num_ids=F('num_assessment_ids')) + num_ids=Count("assessment_items__pk"), + num_assessment_ids=Count( + "assessment_items__assessment_id", distinct=True + ), + ) + .exclude(num_ids=F("num_assessment_ids")) + ) total = nodes.count() logging.info("Fixing {} nodes...".format(total)) @@ -29,7 +34,9 @@ def handle(self, *args, **options): # Go through each node's assessment items for item in node.assessment_items.all(): # Handle duplicate assessment ids - other_duplicate_assessment_items = node.assessment_items.filter(assessment_id=item.assessment_id).exclude(pk=item.pk) + other_duplicate_assessment_items = node.assessment_items.filter( + assessment_id=item.assessment_id + ).exclude(pk=item.pk) if other_duplicate_assessment_items.exists(): # Remove duplicates @@ -37,14 +44,16 @@ def handle(self, *args, **options): question=item.question, answers=item.answers, hints=item.hints, - raw_data=item.raw_data + raw_data=item.raw_data, ).exists(): item.delete() # Get new ids for non-duplicates else: new_id = uuid.uuid4().hex - while node.assessment_items.filter(assessment_id=new_id).exists(): + while node.assessment_items.filter( + assessment_id=new_id + ).exists(): new_id = uuid.uuid4().hex item.assessment_id = new_id item.save() diff --git a/contentcuration/contentcuration/management/commands/fix_exercise_complete.py b/contentcuration/contentcuration/management/commands/fix_exercise_complete.py index f9ed6e903f..e3be0754c0 100644 --- a/contentcuration/contentcuration/management/commands/fix_exercise_complete.py +++ b/contentcuration/contentcuration/management/commands/fix_exercise_complete.py @@ -9,99 +9,209 @@ from contentcuration.models import ContentNode from contentcuration.models import License -logging = logmodule.getLogger('command') - +logging = logmodule.getLogger("command") CHUNKSIZE = 10000 class Command(BaseCommand): - def handle(self, *args, **options): start = time.time() reset_time = time.time() - mastery_model_exercise_count = ContentNode.objects.filter(kind_id=content_kinds.EXERCISE) \ - .filter(Q(extra_fields__has_key='mastery_model')).order_by().count() + mastery_model_exercise_count = ( + ContentNode.objects.filter(kind_id=content_kinds.EXERCISE) + .filter(Q(extra_fields__has_key="mastery_model")) + .order_by() + .count() + ) i = 0 while i < mastery_model_exercise_count: chunk_time = time.time() - update_ids = ContentNode.objects.filter(kind_id=content_kinds.EXERCISE) \ - .filter(Q(extra_fields__has_key='mastery_model')).order_by("id").values_list("id", flat=True)[i: i + CHUNKSIZE] + update_ids = ( + ContentNode.objects.filter(kind_id=content_kinds.EXERCISE) + .filter(Q(extra_fields__has_key="mastery_model")) + .order_by("id") + .values_list("id", flat=True)[i : i + CHUNKSIZE] + ) ContentNode.objects.filter(pk__in=update_ids).update(complete=True) - logging.info('Marked {} nodes as complete=True in {} seconds'.format(CHUNKSIZE, time.time() - chunk_time)) + logging.info( + "Marked {} nodes as complete=True in {} seconds".format( + CHUNKSIZE, time.time() - chunk_time + ) + ) i += CHUNKSIZE - mastery_model_exercise_count = ContentNode.objects.filter(kind_id=content_kinds.EXERCISE) \ - .filter(Q(extra_fields__has_key='option.completion_criteria.mastery_model')).order_by().count() + mastery_model_exercise_count = ( + ContentNode.objects.filter(kind_id=content_kinds.EXERCISE) + .filter(Q(extra_fields__has_key="option.completion_criteria.mastery_model")) + .order_by() + .count() + ) while i < mastery_model_exercise_count: chunk_time = time.time() - update_ids = ContentNode.objects.filter(kind_id=content_kinds.EXERCISE) \ - .filter(Q(extra_fields__has_key='option.completion_criteria.mastery_model')).order_by("id").values_list("id", flat=True)[i: i + CHUNKSIZE] + update_ids = ( + ContentNode.objects.filter(kind_id=content_kinds.EXERCISE) + .filter( + Q(extra_fields__has_key="option.completion_criteria.mastery_model") + ) + .order_by("id") + .values_list("id", flat=True)[i : i + CHUNKSIZE] + ) ContentNode.objects.filter(pk__in=update_ids).update(complete=True) - logging.info('Marked {} nodes as complete=True in {} seconds'.format(CHUNKSIZE, time.time() - chunk_time)) + logging.info( + "Marked {} nodes as complete=True in {} seconds".format( + CHUNKSIZE, time.time() - chunk_time + ) + ) i += CHUNKSIZE - logging.info('Marked all mastery_modeled exercises as complete=True (finished in {})'.format(time.time() - reset_time)) + logging.info( + "Marked all mastery_modeled exercises as complete=True (finished in {})".format( + time.time() - reset_time + ) + ) # Mark invalid titles titlestart = time.time() - logging.info('Marking blank titles...') - count = ContentNode.objects.exclude(complete=False).filter(kind_id=content_kinds.EXERCISE, title='').order_by().update(complete=False) - logging.info('Marked {} invalid titles (finished in {})'.format(count, time.time() - titlestart)) + logging.info("Marking blank titles...") + count = ( + ContentNode.objects.exclude(complete=False) + .filter(kind_id=content_kinds.EXERCISE, title="") + .order_by() + .update(complete=False) + ) + logging.info( + "Marked {} invalid titles (finished in {})".format( + count, time.time() - titlestart + ) + ) # Mark invalid licenses licensestart = time.time() - logging.info('Marking blank licenses...') - invalid_license_count = ContentNode.objects.filter(kind_id=content_kinds.EXERCISE, license__isnull=True)\ - .order_by().count() + logging.info("Marking blank licenses...") + invalid_license_count = ( + ContentNode.objects.filter( + kind_id=content_kinds.EXERCISE, license__isnull=True + ) + .order_by() + .count() + ) while i < invalid_license_count: chunk_time = time.time() - update_ids = ContentNode.objects.filter(kind_id=content_kinds.EXERCISE, license__isnull=True)\ - .order_by("id").values_list("id", flat=True)[i: i + CHUNKSIZE] + update_ids = ( + ContentNode.objects.filter( + kind_id=content_kinds.EXERCISE, license__isnull=True + ) + .order_by("id") + .values_list("id", flat=True)[i : i + CHUNKSIZE] + ) count = ContentNode.objects.filter(pk__in=update_ids).update(complete=False) - logging.info('Marked {} nodes as complete=False in {} seconds'.format(count, time.time() - chunk_time)) + logging.info( + "Marked {} nodes as complete=False in {} seconds".format( + count, time.time() - chunk_time + ) + ) i += CHUNKSIZE - logging.info('Marked {} invalid licenses (finished in {})'.format(invalid_license_count, time.time() - licensestart)) + logging.info( + "Marked {} invalid licenses (finished in {})".format( + invalid_license_count, time.time() - licensestart + ) + ) licensestart = time.time() - logging.info('Marking blank license descriptions...') - custom_licenses = list(License.objects.filter(is_custom=True).values_list("pk", flat=True)) - count = ContentNode.objects.exclude(complete=False)\ - .filter(kind_id=content_kinds.EXERCISE, license_id__in=custom_licenses).filter(Q(license_description__isnull=True) | Q(license_description=''))\ - .order_by().update(complete=False) - logging.info('Marked {} invalid license descriptions (finished in {})'.format(count, time.time() - licensestart)) + logging.info("Marking blank license descriptions...") + custom_licenses = list( + License.objects.filter(is_custom=True).values_list("pk", flat=True) + ) + count = ( + ContentNode.objects.exclude(complete=False) + .filter(kind_id=content_kinds.EXERCISE, license_id__in=custom_licenses) + .filter(Q(license_description__isnull=True) | Q(license_description="")) + .order_by() + .update(complete=False) + ) + logging.info( + "Marked {} invalid license descriptions (finished in {})".format( + count, time.time() - licensestart + ) + ) licensestart = time.time() - logging.info('Marking blank copyright holders...') - copyright_licenses = list(License.objects.filter(copyright_holder_required=True).values_list("pk", flat=True)) - blank_copyright_holder_count = ContentNode.objects\ - .filter(kind_id=content_kinds.EXERCISE, license_id__in=copyright_licenses).filter(Q(copyright_holder__isnull=True) | Q(copyright_holder=''))\ - .order_by().count() + logging.info("Marking blank copyright holders...") + copyright_licenses = list( + License.objects.filter(copyright_holder_required=True).values_list( + "pk", flat=True + ) + ) + blank_copyright_holder_count = ( + ContentNode.objects.filter( + kind_id=content_kinds.EXERCISE, license_id__in=copyright_licenses + ) + .filter(Q(copyright_holder__isnull=True) | Q(copyright_holder="")) + .order_by() + .count() + ) while i < blank_copyright_holder_count: chunk_time = time.time() - update_ids = ContentNode.objects.filter(kind_id=content_kinds.EXERCISE, license_id__in=copyright_licenses)\ - .filter(Q(copyright_holder__isnull=True) | Q(copyright_holder='')).order_by("id").values_list("id", flat=True)[i: i + CHUNKSIZE] + update_ids = ( + ContentNode.objects.filter( + kind_id=content_kinds.EXERCISE, license_id__in=copyright_licenses + ) + .filter(Q(copyright_holder__isnull=True) | Q(copyright_holder="")) + .order_by("id") + .values_list("id", flat=True)[i : i + CHUNKSIZE] + ) count = ContentNode.objects.filter(pk__in=update_ids).update(complete=False) - logging.info('Marked {} nodes as complete=False in {} seconds'.format(count, time.time() - chunk_time)) + logging.info( + "Marked {} nodes as complete=False in {} seconds".format( + count, time.time() - chunk_time + ) + ) i += CHUNKSIZE - logging.info('Marked {} invalid copyright holders (finished in {})'.format(blank_copyright_holder_count, time.time() - licensestart)) + logging.info( + "Marked {} invalid copyright holders (finished in {})".format( + blank_copyright_holder_count, time.time() - licensestart + ) + ) # Mark invalid exercises exercisestart = time.time() - logging.info('Marking mastery_model less exercises...') - count = ContentNode.objects.exclude(complete=False).filter(kind_id=content_kinds.EXERCISE).filter(~Q(extra_fields__has_key='mastery_model'))\ - .order_by().update(complete=False) - logging.info('Marked {} mastery_model less exercises(finished in {})'.format(count, time.time() - exercisestart)) + logging.info("Marking mastery_model less exercises...") + count = ( + ContentNode.objects.exclude(complete=False) + .filter(kind_id=content_kinds.EXERCISE) + .filter(~Q(extra_fields__has_key="mastery_model")) + .order_by() + .update(complete=False) + ) + logging.info( + "Marked {} mastery_model less exercises(finished in {})".format( + count, time.time() - exercisestart + ) + ) exercisestart = time.time() - logging.info('Marking bad mastery model exercises...') - count = ContentNode.objects.exclude(complete=False).filter(kind_id=content_kinds.EXERCISE)\ - .filter(Q(extra_fields__mastery_model=exercises.M_OF_N) & (~Q(extra_fields__has_key='m') | ~Q(extra_fields__has_key='n')))\ - .order_by().update(complete=False) - logging.info('Marked {} bad mastery model exercises (finished in {})'.format(count, time.time() - exercisestart)) - - logging.info('Mark incomplete command completed in {}s'.format(time.time() - start)) + logging.info("Marking bad mastery model exercises...") + count = ( + ContentNode.objects.exclude(complete=False) + .filter(kind_id=content_kinds.EXERCISE) + .filter( + Q(extra_fields__mastery_model=exercises.M_OF_N) + & (~Q(extra_fields__has_key="m") | ~Q(extra_fields__has_key="n")) + ) + .order_by() + .update(complete=False) + ) + logging.info( + "Marked {} bad mastery model exercises (finished in {})".format( + count, time.time() - exercisestart + ) + ) + + logging.info( + "Mark incomplete command completed in {}s".format(time.time() - start) + ) diff --git a/contentcuration/contentcuration/management/commands/garbage_collect.py b/contentcuration/contentcuration/management/commands/garbage_collect.py index 732a494aec..2255bbfbec 100644 --- a/contentcuration/contentcuration/management/commands/garbage_collect.py +++ b/contentcuration/contentcuration/management/commands/garbage_collect.py @@ -16,11 +16,10 @@ from contentcuration.utils.garbage_collect import clean_up_tasks -logging = logmodule.getLogger('command') +logging = logmodule.getLogger("command") class Command(BaseCommand): - def handle(self, *args, **options): """ Actual logic for garbage collection. @@ -29,7 +28,9 @@ def handle(self, *args, **options): # Clean up users that are soft deleted and are older than ACCOUNT_DELETION_BUFFER (90 days). # Also clean contentnodes, files and file objects on storage that are associated # with the orphan tree. - logging.info("Cleaning up soft deleted users older than ACCOUNT_DELETION_BUFFER (90 days)") + logging.info( + "Cleaning up soft deleted users older than ACCOUNT_DELETION_BUFFER (90 days)" + ) clean_up_soft_deleted_users() logging.info("Cleaning up contentnodes from the orphan tree") diff --git a/contentcuration/contentcuration/management/commands/mark_incomplete.py b/contentcuration/contentcuration/management/commands/mark_incomplete.py index 056634d7d8..3cbb74dcf8 100644 --- a/contentcuration/contentcuration/management/commands/mark_incomplete.py +++ b/contentcuration/contentcuration/management/commands/mark_incomplete.py @@ -13,123 +13,237 @@ from contentcuration.models import File from contentcuration.models import License -logging = logmodule.getLogger('command') +logging = logmodule.getLogger("command") class Command(BaseCommand): - def handle(self, *args, **options): start = time.time() # Mark invalid titles titlestart = time.time() - logging.info('Marking blank titles...') - count = ContentNode.objects.exclude(complete=False).filter(title='', parent__isnull=False).order_by().update(complete=False) - logging.info('Marked {} invalid titles (finished in {})'.format(count, time.time() - titlestart)) + logging.info("Marking blank titles...") + count = ( + ContentNode.objects.exclude(complete=False) + .filter(title="", parent__isnull=False) + .order_by() + .update(complete=False) + ) + logging.info( + "Marked {} invalid titles (finished in {})".format( + count, time.time() - titlestart + ) + ) # Mark invalid licenses licensestart = time.time() - logging.info('Marking blank licenses...') - count = ContentNode.objects.exclude(kind_id=content_kinds.TOPIC) \ - .exclude(complete=False) \ - .filter(license__isnull=True) \ - .order_by() \ + logging.info("Marking blank licenses...") + count = ( + ContentNode.objects.exclude(kind_id=content_kinds.TOPIC) + .exclude(complete=False) + .filter(license__isnull=True) + .order_by() .update(complete=False) - logging.info('Marked {} invalid licenses (finished in {})'.format(count, time.time() - licensestart)) + ) + logging.info( + "Marked {} invalid licenses (finished in {})".format( + count, time.time() - licensestart + ) + ) licensestart = time.time() - logging.info('Marking blank license descriptions...') - custom_licenses = list(License.objects.filter(is_custom=True).values_list("pk", flat=True)) - count = ContentNode.objects.exclude(kind_id=content_kinds.TOPIC) \ - .exclude(complete=False) \ - .filter(license_id__in=custom_licenses) \ - .filter(Q(license_description__isnull=True) | Q(license_description='')) \ - .order_by() \ + logging.info("Marking blank license descriptions...") + custom_licenses = list( + License.objects.filter(is_custom=True).values_list("pk", flat=True) + ) + count = ( + ContentNode.objects.exclude(kind_id=content_kinds.TOPIC) + .exclude(complete=False) + .filter(license_id__in=custom_licenses) + .filter(Q(license_description__isnull=True) | Q(license_description="")) + .order_by() .update(complete=False) - logging.info('Marked {} invalid license descriptions (finished in {})'.format(count, time.time() - licensestart)) + ) + logging.info( + "Marked {} invalid license descriptions (finished in {})".format( + count, time.time() - licensestart + ) + ) licensestart = time.time() - logging.info('Marking blank copyright holders...') - copyright_licenses = list(License.objects.filter(copyright_holder_required=True).values_list("pk", flat=True)) - count = ContentNode.objects.exclude(kind_id=content_kinds.TOPIC) \ - .exclude(complete=False) \ - .filter(license_id__in=copyright_licenses) \ - .filter(Q(copyright_holder__isnull=True) | Q(copyright_holder='')) \ - .order_by() \ + logging.info("Marking blank copyright holders...") + copyright_licenses = list( + License.objects.filter(copyright_holder_required=True).values_list( + "pk", flat=True + ) + ) + count = ( + ContentNode.objects.exclude(kind_id=content_kinds.TOPIC) + .exclude(complete=False) + .filter(license_id__in=copyright_licenses) + .filter(Q(copyright_holder__isnull=True) | Q(copyright_holder="")) + .order_by() .update(complete=False) - logging.info('Marked {} invalid copyright holders (finished in {})'.format(count, time.time() - licensestart)) + ) + logging.info( + "Marked {} invalid copyright holders (finished in {})".format( + count, time.time() - licensestart + ) + ) # Mark invalid file resources resourcestart = time.time() - logging.info('Marking file resources...') - file_check_query = With(File.objects.filter(preset__supplementary=False).values("contentnode_id").order_by(), name="t_file") - - query = file_check_query.join(ContentNode, id=file_check_query.col.contentnode_id, _join_type=LOUTER) \ - .with_cte(file_check_query) \ - .annotate(t_contentnode_id=file_check_query.col.contentnode_id) \ - .exclude(kind_id=content_kinds.TOPIC) \ - .exclude(kind_id=content_kinds.EXERCISE) \ - .exclude(complete=False) \ - .filter(t_contentnode_id__isnull=True) \ + logging.info("Marking file resources...") + file_check_query = With( + File.objects.filter(preset__supplementary=False) + .values("contentnode_id") + .order_by(), + name="t_file", + ) + + query = ( + file_check_query.join( + ContentNode, id=file_check_query.col.contentnode_id, _join_type=LOUTER + ) + .with_cte(file_check_query) + .annotate(t_contentnode_id=file_check_query.col.contentnode_id) + .exclude(kind_id=content_kinds.TOPIC) + .exclude(kind_id=content_kinds.EXERCISE) + .exclude(complete=False) + .filter(t_contentnode_id__isnull=True) .order_by() - count = ContentNode.objects.filter(id__in=query.order_by().values_list('id', flat=True)).update(complete=False) - logging.info('Marked {} invalid file resources (finished in {})'.format(count, time.time() - resourcestart)) + ) + count = ContentNode.objects.filter( + id__in=query.order_by().values_list("id", flat=True) + ).update(complete=False) + logging.info( + "Marked {} invalid file resources (finished in {})".format( + count, time.time() - resourcestart + ) + ) # Mark invalid exercises exercisestart = time.time() - logging.info('Marking exercises...') - - has_questions_query = With(AssessmentItem.objects.all().values("contentnode_id").order_by(), name="t_assessmentitem") - - query = has_questions_query.join(ContentNode, id=has_questions_query.col.contentnode_id, _join_type=LOUTER) \ - .with_cte(has_questions_query) \ - .annotate(t_contentnode_id=has_questions_query.col.contentnode_id) \ - .filter(kind_id=content_kinds.EXERCISE) \ - .exclude(complete=False) \ - .filter(t_contentnode_id__isnull=True) \ + logging.info("Marking exercises...") + + has_questions_query = With( + AssessmentItem.objects.all().values("contentnode_id").order_by(), + name="t_assessmentitem", + ) + + query = ( + has_questions_query.join( + ContentNode, + id=has_questions_query.col.contentnode_id, + _join_type=LOUTER, + ) + .with_cte(has_questions_query) + .annotate(t_contentnode_id=has_questions_query.col.contentnode_id) + .filter(kind_id=content_kinds.EXERCISE) + .exclude(complete=False) + .filter(t_contentnode_id__isnull=True) .order_by() + ) exercisestart = time.time() - count = ContentNode.objects.filter(id__in=query.order_by().values_list('id', flat=True)).update(complete=False) + count = ContentNode.objects.filter( + id__in=query.order_by().values_list("id", flat=True) + ).update(complete=False) - logging.info('Marked {} questionless exercises (finished in {})'.format(count, time.time() - exercisestart)) + logging.info( + "Marked {} questionless exercises (finished in {})".format( + count, time.time() - exercisestart + ) + ) exercisestart = time.time() - exercise_check_query = With(AssessmentItem.objects.exclude(type=exercises.PERSEUS_QUESTION) - .filter( - Q(question='') - | Q(answers='[]') - # hack to check if no correct answers - | (~Q(type=exercises.INPUT_QUESTION) & ~Q(answers__iregex=r'"correct":\s*true'))).order_by(), name="t_assessmentitem") - - query = exercise_check_query.join(ContentNode, id=has_questions_query.col.contentnode_id) \ - .with_cte(exercise_check_query) \ - .annotate(t_contentnode_id=exercise_check_query.col.contentnode_id) \ - .filter(kind_id=content_kinds.EXERCISE) \ - .exclude(complete=False) \ + exercise_check_query = With( + AssessmentItem.objects.exclude(type=exercises.PERSEUS_QUESTION) + .filter( + Q(question="") + | Q(answers="[]") + # hack to check if no correct answers + | ( + ~Q(type=exercises.INPUT_QUESTION) + & ~Q(answers__iregex=r'"correct":\s*true') + ) + ) + .order_by(), + name="t_assessmentitem", + ) + + query = ( + exercise_check_query.join( + ContentNode, id=has_questions_query.col.contentnode_id + ) + .with_cte(exercise_check_query) + .annotate(t_contentnode_id=exercise_check_query.col.contentnode_id) + .filter(kind_id=content_kinds.EXERCISE) + .exclude(complete=False) .order_by() + ) - count = ContentNode.objects.filter(id__in=query.order_by().values_list('id', flat=True)).update(complete=False) + count = ContentNode.objects.filter( + id__in=query.order_by().values_list("id", flat=True) + ).update(complete=False) - logging.info('Marked {} invalid exercises (finished in {})'.format(count, time.time() - exercisestart)) + logging.info( + "Marked {} invalid exercises (finished in {})".format( + count, time.time() - exercisestart + ) + ) exercisestart = time.time() - logging.info('Marking mastery_model less exercises...') - count = ContentNode.objects.exclude(complete=False).filter(kind_id=content_kinds.EXERCISE).filter(~Q(extra_fields__has_key='mastery_model')) \ - .order_by().update(complete=False) - - logging.info('Marked {} mastery_model less exercises(finished in {})'.format(count, time.time() - exercisestart)) - - count = ContentNode.objects.exclude(complete=False).filter(kind_id=content_kinds.EXERCISE).filter(~Q(extra_fields__has_key='mastery_model') & ~Q(extra_fields__has_key='option.completion_criteria.mastery_model')) \ - .order_by().update(complete=False) + logging.info("Marking mastery_model less exercises...") + count = ( + ContentNode.objects.exclude(complete=False) + .filter(kind_id=content_kinds.EXERCISE) + .filter(~Q(extra_fields__has_key="mastery_model")) + .order_by() + .update(complete=False) + ) + + logging.info( + "Marked {} mastery_model less exercises(finished in {})".format( + count, time.time() - exercisestart + ) + ) + + count = ( + ContentNode.objects.exclude(complete=False) + .filter(kind_id=content_kinds.EXERCISE) + .filter( + ~Q(extra_fields__has_key="mastery_model") + & ~Q(extra_fields__has_key="option.completion_criteria.mastery_model") + ) + .order_by() + .update(complete=False) + ) - logging.info('Marked {} mastery_model less exercises(finished in {})'.format(count, time.time() - exercisestart)) + logging.info( + "Marked {} mastery_model less exercises(finished in {})".format( + count, time.time() - exercisestart + ) + ) exercisestart = time.time() - logging.info('Marking bad mastery model exercises...') - count = ContentNode.objects.exclude(complete=False).filter(kind_id=content_kinds.EXERCISE) \ - .filter(Q(extra_fields__mastery_model=exercises.M_OF_N) & (~Q(extra_fields__has_key='m') | ~Q(extra_fields__has_key='n'))) \ - .order_by().update(complete=False) - logging.info('Marked {} bad mastery model exercises (finished in {})'.format(count, time.time() - exercisestart)) - - logging.info('Mark incomplete command completed in {}s'.format(time.time() - start)) + logging.info("Marking bad mastery model exercises...") + count = ( + ContentNode.objects.exclude(complete=False) + .filter(kind_id=content_kinds.EXERCISE) + .filter( + Q(extra_fields__mastery_model=exercises.M_OF_N) + & (~Q(extra_fields__has_key="m") | ~Q(extra_fields__has_key="n")) + ) + .order_by() + .update(complete=False) + ) + logging.info( + "Marked {} bad mastery model exercises (finished in {})".format( + count, time.time() - exercisestart + ) + ) + + logging.info( + "Mark incomplete command completed in {}s".format(time.time() - start) + ) diff --git a/contentcuration/contentcuration/management/commands/reconcile_change_tasks.py b/contentcuration/contentcuration/management/commands/reconcile_change_tasks.py index 4aa2f9f261..54af9c005b 100644 --- a/contentcuration/contentcuration/management/commands/reconcile_change_tasks.py +++ b/contentcuration/contentcuration/management/commands/reconcile_change_tasks.py @@ -6,7 +6,7 @@ from contentcuration.models import Change from contentcuration.models import User -logger = logging.getLogger('command') +logger = logging.getLogger("command") class Command(BaseCommand): @@ -18,26 +18,42 @@ def handle(self, *args, **options): from contentcuration.tasks import apply_channel_changes_task from contentcuration.tasks import apply_user_changes_task - active_task_ids = [task['id'] for task in app.get_active_and_reserved_tasks()] + active_task_ids = [task["id"] for task in app.get_active_and_reserved_tasks()] - channel_changes = Change.objects.filter(channel_id__isnull=False, applied=False, errored=False) \ - .order_by('channel_id', 'created_by_id') \ - .values('channel_id', 'created_by_id') \ + channel_changes = ( + Change.objects.filter( + channel_id__isnull=False, applied=False, errored=False + ) + .order_by("channel_id", "created_by_id") + .values("channel_id", "created_by_id") .distinct() + ) for channel_change in channel_changes: - apply_channel_changes_task.revoke(exclude_task_ids=active_task_ids, channel_id=channel_change['channel_id']) + apply_channel_changes_task.revoke( + exclude_task_ids=active_task_ids, + channel_id=channel_change["channel_id"], + ) apply_channel_changes_task.fetch_or_enqueue( - User.objects.get(pk=channel_change['created_by_id']), - channel_id=channel_change['channel_id'] + User.objects.get(pk=channel_change["created_by_id"]), + channel_id=channel_change["channel_id"], ) - user_changes = Change.objects.filter(channel_id__isnull=True, user_id__isnull=False, applied=False, errored=False) \ - .order_by('user_id', 'created_by_id') \ - .values('user_id', 'created_by_id') \ + user_changes = ( + Change.objects.filter( + channel_id__isnull=True, + user_id__isnull=False, + applied=False, + errored=False, + ) + .order_by("user_id", "created_by_id") + .values("user_id", "created_by_id") .distinct() + ) for user_change in user_changes: - apply_user_changes_task.revoke(exclude_task_ids=active_task_ids, user_id=user_change['user_id']) + apply_user_changes_task.revoke( + exclude_task_ids=active_task_ids, user_id=user_change["user_id"] + ) apply_user_changes_task.fetch_or_enqueue( - User.objects.get(pk=user_change['created_by_id']), - user_id=user_change['user_id'] + User.objects.get(pk=user_change["created_by_id"]), + user_id=user_change["user_id"], ) diff --git a/contentcuration/contentcuration/management/commands/reconcile_publishing_status.py b/contentcuration/contentcuration/management/commands/reconcile_publishing_status.py index ce97abf7a5..f5a3474c76 100644 --- a/contentcuration/contentcuration/management/commands/reconcile_publishing_status.py +++ b/contentcuration/contentcuration/management/commands/reconcile_publishing_status.py @@ -20,11 +20,18 @@ def handle(self, *args, **options): from contentcuration.tasks import apply_channel_changes_task # Channels that are in `publishing` state. - publishing_channels = list(Channel.objects.filter(deleted=False, main_tree__publishing=True).values_list("id", flat=True)) + publishing_channels = list( + Channel.objects.filter( + deleted=False, main_tree__publishing=True + ).values_list("id", flat=True) + ) # channel_ids of tasks that are currently being run by the celery workers. - active_channel_tasks = [task["kwargs"].get("channel_id") for task in app.get_active_tasks() - if task["name"] == apply_channel_changes_task.name] + active_channel_tasks = [ + task["kwargs"].get("channel_id") + for task in app.get_active_tasks() + if task["name"] == apply_channel_changes_task.name + ] # If channel is in publishing state and doesnot have any active task, # that means the worker has crashed. So, we reset the publishing state to False. @@ -33,4 +40,6 @@ def handle(self, *args, **options): channel = Channel.objects.get(pk=channel_id) channel.main_tree.publishing = False channel.main_tree.save() - logger.info(f"Resetted publishing status to False for channel {channel.id}.") + logger.info( + f"Resetted publishing status to False for channel {channel.id}." + ) diff --git a/contentcuration/contentcuration/management/commands/restore_channel.py b/contentcuration/contentcuration/management/commands/restore_channel.py index efaeb3ee7c..6133ec3806 100644 --- a/contentcuration/contentcuration/management/commands/restore_channel.py +++ b/contentcuration/contentcuration/management/commands/restore_channel.py @@ -4,26 +4,25 @@ from contentcuration.utils.import_tools import import_channel -logger = logging.getLogger('command') +logger = logging.getLogger("command") class Command(BaseCommand): - def add_arguments(self, parser): # ID of channel to read data from - parser.add_argument('source_id', type=str) + parser.add_argument("source_id", type=str) # ID of channel to write data to (can be same as source channel) - parser.add_argument('--target', help='restore channel db to TARGET CHANNEL ID') - parser.add_argument('--download-url', help='where to download db from') - parser.add_argument('--editor', help='add user as editor to channel') + parser.add_argument("--target", help="restore channel db to TARGET CHANNEL ID") + parser.add_argument("--download-url", help="where to download db from") + parser.add_argument("--editor", help="add user as editor to channel") def handle(self, *args, **options): # Set up variables for restoration process logger.info("\n\n********** STARTING CHANNEL RESTORATION **********") - source_id = options['source_id'] - target_id = options.get('target') or source_id - download_url = options.get('download_url') - editor = options.get('editor') + source_id = options["source_id"] + target_id = options.get("target") or source_id + download_url = options.get("download_url") + editor = options.get("editor") import_channel(source_id, target_id, download_url, editor, logger=logger) diff --git a/contentcuration/contentcuration/management/commands/set_content_mimetypes.py b/contentcuration/contentcuration/management/commands/set_content_mimetypes.py index 732d64f8d6..27af4732fc 100755 --- a/contentcuration/contentcuration/management/commands/set_content_mimetypes.py +++ b/contentcuration/contentcuration/management/commands/set_content_mimetypes.py @@ -18,18 +18,17 @@ class Command(BaseCommand): - def handle(self, *args, **kwargs): blobs = self._list_all_files() futures = [] with concurrent.futures.ThreadPoolExecutor() as e: - print("Scheduling all metadata update jobs...") + print("Scheduling all metadata update jobs...") # noqa: T201 for blob in blobs: future = e.submit(self._update_metadata, blob) futures.append(future) - print("Waiting for all jobs to finish...") + print("Waiting for all jobs to finish...") # noqa: T201 def _determine_cache_control(self, name): _, ext = os.path.splitext(name) diff --git a/contentcuration/contentcuration/management/commands/set_default_learning_activities.py b/contentcuration/contentcuration/management/commands/set_default_learning_activities.py index b6202477fe..e1105b70e5 100644 --- a/contentcuration/contentcuration/management/commands/set_default_learning_activities.py +++ b/contentcuration/contentcuration/management/commands/set_default_learning_activities.py @@ -6,31 +6,46 @@ from contentcuration.constants.contentnode import kind_activity_map from contentcuration.models import ContentNode -logging = logmodule.getLogger('command') +logging = logmodule.getLogger("command") CHUNKSIZE = 10000 class Command(BaseCommand): - def handle(self, *args, **options): start = time.time() for kind, activity in kind_activity_map.items(): kind_start = time.time() - map_to_set = { - activity: True - } + map_to_set = {activity: True} - null_learning_activities = ContentNode.objects.filter(kind=kind, learning_activities__isnull=True).values_list("id", flat=True) + null_learning_activities = ContentNode.objects.filter( + kind=kind, learning_activities__isnull=True + ).values_list("id", flat=True) - logging.info("Setting default learning activities for kind: {}".format(kind)) + logging.info( + "Setting default learning activities for kind: {}".format(kind) + ) while null_learning_activities.exists(): - updated_count = ContentNode.objects.filter(id__in=null_learning_activities[0:CHUNKSIZE]).update(learning_activities=map_to_set) - logging.info("Updated {} content nodes of kind {} with learning activity {}".format(updated_count, kind, activity)) - - logging.info("Finished setting default learning activities for kind: {} in {} seconds".format(kind, time.time() - kind_start)) - - logging.info('Finished setting all null learning activities in {} seconds'.format(time.time() - start)) + updated_count = ContentNode.objects.filter( + id__in=null_learning_activities[0:CHUNKSIZE] + ).update(learning_activities=map_to_set) + logging.info( + "Updated {} content nodes of kind {} with learning activity {}".format( + updated_count, kind, activity + ) + ) + + logging.info( + "Finished setting default learning activities for kind: {} in {} seconds".format( + kind, time.time() - kind_start + ) + ) + + logging.info( + "Finished setting all null learning activities in {} seconds".format( + time.time() - start + ) + ) diff --git a/contentcuration/contentcuration/management/commands/set_file_duration.py b/contentcuration/contentcuration/management/commands/set_file_duration.py index 77446c9853..958b05dba3 100644 --- a/contentcuration/contentcuration/management/commands/set_file_duration.py +++ b/contentcuration/contentcuration/management/commands/set_file_duration.py @@ -7,7 +7,7 @@ from contentcuration.models import File from contentcuration.models import MEDIA_PRESETS -logging = logmodule.getLogger('command') +logging = logmodule.getLogger("command") CHUNKSIZE = 10000 @@ -31,7 +31,7 @@ def extract_duration_of_media(f_in, extension): # noqa C901 "panic", "-f", extension, - "-" + "-", ], stdin=f_in, ) @@ -52,7 +52,7 @@ def extract_duration_of_media(f_in, extension): # noqa C901 "-", ], stdin=f_in, - stderr=subprocess.PIPE + stderr=subprocess.PIPE, ) try: second_last_line = result.stderr.decode("utf-8").strip().splitlines()[-2] @@ -76,22 +76,25 @@ def extract_duration_of_media(f_in, extension): # noqa C901 class Command(BaseCommand): - def handle(self, *args, **options): start = time.time() - logging.info("Setting default duration for media presets: {}".format(MEDIA_PRESETS)) + logging.info( + "Setting default duration for media presets: {}".format(MEDIA_PRESETS) + ) excluded_files = set() - null_duration = File.objects.filter(preset_id__in=MEDIA_PRESETS, duration__isnull=True) + null_duration = File.objects.filter( + preset_id__in=MEDIA_PRESETS, duration__isnull=True + ) null_duration_count = null_duration.count() updated_count = 0 i = 0 while i < null_duration_count: - for file in null_duration[i:i + CHUNKSIZE]: + for file in null_duration[i : i + CHUNKSIZE]: if file.file_on_disk.name in excluded_files: continue file.refresh_from_db() @@ -99,16 +102,26 @@ def handle(self, *args, **options): continue try: with file.file_on_disk.open() as f: - duration = extract_duration_of_media(f, file.file_format.extension) + duration = extract_duration_of_media( + f, file.file_format.extension + ) if duration: - updated_count += File.objects.filter(checksum=file.checksum, preset_id__in=MEDIA_PRESETS).update(duration=duration) + updated_count += File.objects.filter( + checksum=file.checksum, preset_id__in=MEDIA_PRESETS + ).update(duration=duration) except FileNotFoundError: logging.warning("File {} not found".format(file)) excluded_files.add(file.file_on_disk.name) except (subprocess.CalledProcessError, RuntimeError): - logging.warning("File {} could not be read for duration".format(file)) + logging.warning( + "File {} could not be read for duration".format(file) + ) excluded_files.add(file.file_on_disk.name) i += CHUNKSIZE - logging.info('Finished setting all null duration for {} files in {} seconds'.format(updated_count, time.time() - start)) + logging.info( + "Finished setting all null duration for {} files in {} seconds".format( + updated_count, time.time() - start + ) + ) diff --git a/contentcuration/contentcuration/management/commands/set_orm_based_has_captions.py b/contentcuration/contentcuration/management/commands/set_orm_based_has_captions.py index 38865f6b89..32d2659173 100644 --- a/contentcuration/contentcuration/management/commands/set_orm_based_has_captions.py +++ b/contentcuration/contentcuration/management/commands/set_orm_based_has_captions.py @@ -11,38 +11,57 @@ from contentcuration.models import ContentNode from contentcuration.models import File -logging = logmodule.getLogger('command') +logging = logmodule.getLogger("command") CHUNKSIZE = 10000 class Command(BaseCommand): - def handle(self, *args, **options): start = time.time() logging.info("Setting 'has captions' for audio kinds") - has_captions_subquery = Exists(File.objects.filter(contentnode=OuterRef("id"), language=OuterRef("language"), preset_id=format_presets.VIDEO_SUBTITLE)) + has_captions_subquery = Exists( + File.objects.filter( + contentnode=OuterRef("id"), + language=OuterRef("language"), + preset_id=format_presets.VIDEO_SUBTITLE, + ) + ) # Only try to update audio nodes which have not had any accessibility labels set on them # this will allow this management command to be rerun and resume from where it left off # and also prevent stomping previous edits to the accessibility_labels field. - updateable_nodes = ContentNode.objects.filter(has_captions_subquery, kind=content_kinds.AUDIO, accessibility_labels__isnull=True) + updateable_nodes = ContentNode.objects.filter( + has_captions_subquery, + kind=content_kinds.AUDIO, + accessibility_labels__isnull=True, + ) - updateable_node_slice = updateable_nodes.values_list("id", flat=True)[0:CHUNKSIZE] + updateable_node_slice = updateable_nodes.values_list("id", flat=True)[ + 0:CHUNKSIZE + ] count = 0 while updateable_nodes.exists(): this_count = ContentNode.objects.filter( id__in=updateable_node_slice - ).update(accessibility_labels={accessibility_categories.CAPTIONS_SUBTITLES: True}) + ).update( + accessibility_labels={accessibility_categories.CAPTIONS_SUBTITLES: True} + ) logging.info("Set has captions metadata for {} nodes".format(this_count)) count += this_count - updateable_node_slice = updateable_nodes.values_list("id", flat=True)[0:CHUNKSIZE] + updateable_node_slice = updateable_nodes.values_list("id", flat=True)[ + 0:CHUNKSIZE + ] - logging.info('Finished setting all has captions metadata for {} nodes in {} seconds'.format(count, time.time() - start)) + logging.info( + "Finished setting all has captions metadata for {} nodes in {} seconds".format( + count, time.time() - start + ) + ) diff --git a/contentcuration/contentcuration/management/commands/set_storage_used.py b/contentcuration/contentcuration/management/commands/set_storage_used.py index 906ac580e7..52a185ab81 100644 --- a/contentcuration/contentcuration/management/commands/set_storage_used.py +++ b/contentcuration/contentcuration/management/commands/set_storage_used.py @@ -13,7 +13,11 @@ def add_arguments(self, parser): parser.add_argument("--force", action="store_true", dest="force", default=False) def handle(self, *args, **options): - users = User.objects.all() if options["force"] else User.objects.filter(disk_space_used=0) + users = ( + User.objects.all() + if options["force"] + else User.objects.filter(disk_space_used=0) + ) for index, user in enumerate(users): user.set_space_used() logger.info("Updated storage used for {} user(s)".format(index + 1)) diff --git a/contentcuration/contentcuration/management/commands/setup.py b/contentcuration/contentcuration/management/commands/setup.py index f8ddaae053..16478297f0 100644 --- a/contentcuration/contentcuration/management/commands/setup.py +++ b/contentcuration/contentcuration/management/commands/setup.py @@ -40,31 +40,35 @@ class Command(BaseCommand): - def add_arguments(self, parser): - parser.add_argument('--email', dest="email", default="a@a.com") - parser.add_argument('--password', dest="password", default="a") - parser.add_argument('--clean-data-state', action='store_true', default=False, help='Sets database in clean state.') + parser.add_argument("--email", dest="email", default="a@a.com") + parser.add_argument("--password", dest="password", default="a") + parser.add_argument( + "--clean-data-state", + action="store_true", + default=False, + help="Sets database in clean state.", + ) def handle(self, *args, **options): # Validate email email = options["email"] password = options["password"] if not re.match(r"[^@]+@[^@]+\.[^@]+", email): - print("{} is not a valid email".format(email)) + print("{} is not a valid email".format(email)) # noqa: T201 sys.exit() # create the cache table try: call_command("createcachetable") except DBError as e: - logging.error('Error creating cache table: {}'.format(str(e))) + logging.error("Error creating cache table: {}".format(str(e))) # Run migrations - call_command('migrate') + call_command("migrate") # Run loadconstants - call_command('loadconstants') + call_command("loadconstants") # Set up user as admin admin = create_user(email, password, "Admin", "User", admin=True) @@ -77,8 +81,20 @@ def handle(self, *args, **options): # Only create additional data when clean-data-state is False (i.e. default behaviour). if options["clean_data_state"] is False: # Create channels - channel1 = create_channel("Published Channel", DESCRIPTION, editors=[admin], bookmarkers=[user1, user2], public=True) - channel2 = create_channel("Ricecooker Channel", DESCRIPTION, editors=[admin, user1], bookmarkers=[user2], viewers=[user3]) + channel1 = create_channel( + "Published Channel", + DESCRIPTION, + editors=[admin], + bookmarkers=[user1, user2], + public=True, + ) + channel2 = create_channel( + "Ricecooker Channel", + DESCRIPTION, + editors=[admin, user1], + bookmarkers=[user2], + viewers=[user3], + ) channel3 = create_channel("Empty Channel", editors=[user3], viewers=[user2]) channel4 = create_channel("Imported Channel", editors=[admin]) @@ -99,33 +115,71 @@ def handle(self, *args, **options): # Create pool of tags tags = [] for t in TAGS: - tag, _new = ContentTag.objects.get_or_create(tag_name=t, channel=channel1) + tag, _new = ContentTag.objects.get_or_create( + tag_name=t, channel=channel1 + ) # Generate file objects - document_file = create_file("Sample Document", format_presets.DOCUMENT, file_formats.PDF, user=admin) - video_file = create_file("Sample Video", format_presets.VIDEO_HIGH_RES, file_formats.MP4, user=admin) - subtitle_file = create_file("Sample Subtitle", format_presets.VIDEO_SUBTITLE, file_formats.VTT, user=admin) - audio_file = create_file("Sample Audio", format_presets.AUDIO, file_formats.MP3, user=admin) - html5_file = create_file("Sample HTML", format_presets.HTML5_ZIP, file_formats.HTML5, user=admin) + document_file = create_file( + "Sample Document", format_presets.DOCUMENT, file_formats.PDF, user=admin + ) + video_file = create_file( + "Sample Video", + format_presets.VIDEO_HIGH_RES, + file_formats.MP4, + user=admin, + ) + subtitle_file = create_file( + "Sample Subtitle", + format_presets.VIDEO_SUBTITLE, + file_formats.VTT, + user=admin, + ) + audio_file = create_file( + "Sample Audio", format_presets.AUDIO, file_formats.MP3, user=admin + ) + html5_file = create_file( + "Sample HTML", format_presets.HTML5_ZIP, file_formats.HTML5, user=admin + ) # Populate channel 1 with content - generate_tree(channel1.main_tree, document_file, video_file, subtitle_file, audio_file, html5_file, user=admin, tags=tags) + generate_tree( + channel1.main_tree, + document_file, + video_file, + subtitle_file, + audio_file, + html5_file, + user=admin, + tags=tags, + ) # Populate channel 2 with staged content channel2.ricecooker_version = "0.0.0" channel2.save() - generate_tree(channel2.staging_tree, document_file, video_file, subtitle_file, audio_file, html5_file, user=admin, tags=tags) + generate_tree( + channel2.staging_tree, + document_file, + video_file, + subtitle_file, + audio_file, + html5_file, + user=admin, + tags=tags, + ) # Import content from channel 1 into channel 4 channel1.main_tree.children.first().copy_to(channel4.main_tree) # Get validation to be reflected in nodes properly ContentNode.objects.all().update(complete=True) - call_command('mark_incomplete') + call_command("mark_incomplete") # Mark this node as incomplete even though it is complete # for testing purposes - node = ContentNode.objects.get(tree_id=channel1.main_tree.tree_id, title="Sample Audio") + node = ContentNode.objects.get( + tree_id=channel1.main_tree.tree_id, title="Sample Audio" + ) node.complete = False node.save() @@ -137,7 +191,11 @@ def handle(self, *args, **options): for legacy_node in legacy_clipboard_nodes: legacy_node.copy_to(target=user1.clipboard_tree) - print("\n\n\nSETUP DONE: Log in as admin to view data (email: {}, password: {})\n\n\n".format(email, password)) + print( # noqa: T201 + "\n\n\nSETUP DONE: Log in as admin to view data (email: {}, password: {})\n\n\n".format( + email, password + ) + ) def generate_tree(root, document, video, subtitle, audio, html5, user=None, tags=None): @@ -148,18 +206,60 @@ def generate_tree(root, document, video, subtitle, audio, html5, user=None, tags # Add files to topic 1 license_id = License.objects.get(license_name=LICENSE).pk - topic1_video_node = create_contentnode("Sample Video", topic1, video, content_kinds.VIDEO, license_id, user=user, tags=tags) + topic1_video_node = create_contentnode( + "Sample Video", + topic1, + video, + content_kinds.VIDEO, + license_id, + user=user, + tags=tags, + ) duplicate_file(subtitle, node=topic1_video_node) - topic1_document_node = create_contentnode("Sample Document", topic1, document, content_kinds.DOCUMENT, license_id, user=user, tags=tags) - topic1_audio_node = create_contentnode("Sample Audio", topic1, audio, content_kinds.AUDIO, license_id, user=user, tags=tags) - topic1_html5_node = create_contentnode("Sample HTML", topic1, html5, content_kinds.HTML5, license_id, user=user, tags=tags) - topic1_exercise_node = create_exercise("Sample Exercise", topic1, license_id, user=user) + topic1_document_node = create_contentnode( + "Sample Document", + topic1, + document, + content_kinds.DOCUMENT, + license_id, + user=user, + tags=tags, + ) + topic1_audio_node = create_contentnode( + "Sample Audio", + topic1, + audio, + content_kinds.AUDIO, + license_id, + user=user, + tags=tags, + ) + topic1_html5_node = create_contentnode( + "Sample HTML", + topic1, + html5, + content_kinds.HTML5, + license_id, + user=user, + tags=tags, + ) + topic1_exercise_node = create_exercise( + "Sample Exercise", topic1, license_id, user=user + ) create_exercise("Sample Empty Exercise", topic1, license_id, user=user, empty=True) # Setup pre/post-requisites around Exercise node # Topic 1 Video -> Topic 1 Document -> Topic 1 Exercise -> Topic 1 Audio -> Topic 1 Html5 - PrerequisiteContentRelationship.objects.create(target_node_id=topic1_document_node.id, prerequisite_id=topic1_video_node.id) - PrerequisiteContentRelationship.objects.create(target_node_id=topic1_exercise_node.id, prerequisite_id=topic1_document_node.id) - PrerequisiteContentRelationship.objects.create(target_node_id=topic1_audio_node.id, prerequisite_id=topic1_exercise_node.id) - PrerequisiteContentRelationship.objects.create(target_node_id=topic1_html5_node.id, prerequisite_id=topic1_audio_node.id) + PrerequisiteContentRelationship.objects.create( + target_node_id=topic1_document_node.id, prerequisite_id=topic1_video_node.id + ) + PrerequisiteContentRelationship.objects.create( + target_node_id=topic1_exercise_node.id, prerequisite_id=topic1_document_node.id + ) + PrerequisiteContentRelationship.objects.create( + target_node_id=topic1_audio_node.id, prerequisite_id=topic1_exercise_node.id + ) + PrerequisiteContentRelationship.objects.create( + target_node_id=topic1_html5_node.id, prerequisite_id=topic1_audio_node.id + ) diff --git a/contentcuration/contentcuration/management/commands/setup_perftest_data.py b/contentcuration/contentcuration/management/commands/setup_perftest_data.py index da67679cd4..18fbadae54 100644 --- a/contentcuration/contentcuration/management/commands/setup_perftest_data.py +++ b/contentcuration/contentcuration/management/commands/setup_perftest_data.py @@ -3,8 +3,8 @@ from contentcuration.models import ContentNode from contentcuration.utils.db_tools import create_channel -from contentcuration.utils.db_tools import TreeBuilder from contentcuration.utils.db_tools import create_user +from contentcuration.utils.db_tools import TreeBuilder LICENSE = licenses.SPECIAL_PERMISSIONS @@ -20,7 +20,7 @@ def handle(self, *args, **options): self.editor.clipboard_tree.get_descendants().delete() with ContentNode.objects.delay_mptt_updates(): - print("Creating channel...") + print("Creating channel...") # noqa: T201 self.generate_random_channels() # Make sure we have a channel with a lot of root topics to test initial channel load. @@ -33,7 +33,7 @@ def handle(self, *args, **options): self.editor.clipboard_tree = TreeBuilder( levels=2, num_children=25, user=self.editor ).root - print( + print( # noqa: T201 "Created clipboard with {} nodes".format( self.editor.clipboard_tree.get_descendants().count() ) @@ -47,7 +47,7 @@ def generate_random_channels(self, num_channels=1): new_channel.main_tree = TreeBuilder(user=self.editor).root - print( + print( # noqa: T201 "Created channel with {} nodes".format( new_channel.main_tree.get_descendants().count() ) @@ -55,4 +55,4 @@ def generate_random_channels(self, num_channels=1): # make sure we have a trash tree so that can be tested with real data as well. new_channel.trash_tree = TreeBuilder(user=self.editor).root - print("Created channel with id {}".format(new_channel.pk)) + print("Created channel with id {}".format(new_channel.pk)) # noqa: T201 diff --git a/contentcuration/contentcuration/management/commands/test_server_perf.py b/contentcuration/contentcuration/management/commands/test_server_perf.py index 8123e9ec66..b17f0d8081 100644 --- a/contentcuration/contentcuration/management/commands/test_server_perf.py +++ b/contentcuration/contentcuration/management/commands/test_server_perf.py @@ -5,15 +5,15 @@ class Command(BaseCommand): - help = 'Runs db tests and reports the performance results. (Usage: test_server_perf [num_objects=100])' + help = "Runs db tests and reports the performance results. (Usage: test_server_perf [num_objects=100])" def add_arguments(self, parser): pass # ID of channel to read data from - parser.add_argument('--num_objects', type=int, default=100) + parser.add_argument("--num_objects", type=int, default=100) # ID of channel to write data to (can be same as source channel) - parser.add_argument('--stress-test', action='store_true', default=False) + parser.add_argument("--stress-test", action="store_true", default=False) def handle(self, *args, **options): objects = None @@ -21,23 +21,34 @@ def handle(self, *args, **options): objects = objective.Objective() stats = {} - num_objects = options['num_objects'] + num_objects = options["num_objects"] num_runs = 10 - object_types = ['ContentNode', 'File'] + object_types = ["ContentNode", "File"] for object_type in object_types: - stats[object_type] = objects.get_object_creation_stats(object_type, num_objects, num_runs) + stats[object_type] = objects.get_object_creation_stats( + object_type, num_objects, num_runs + ) - stats['ContentNode-mptt-delay'] = objects.get_object_creation_stats_mptt_delay(num_objects, num_runs) - object_types.append('ContentNode-mptt-delay') + stats[ + "ContentNode-mptt-delay" + ] = objects.get_object_creation_stats_mptt_delay(num_objects, num_runs) + object_types.append("ContentNode-mptt-delay") print() print("Test results:") for object_type in object_types: run_stats = stats[object_type] - print("Stats for creating {} {} objects over {} runs: {}".format(num_objects, object_type, num_runs, run_stats)) - - if options['stress_test']: - print("Running stress test simulating creation / cloning of a channel like KA, this will take at least several minutes. Please do not interrupt if possible!") + print( + "Stats for creating {} {} objects over {} runs: {}".format( + num_objects, object_type, num_runs, run_stats + ) + ) + + if options["stress_test"]: + print( # noqa: T201 + "Running stress test simulating creation / cloning of a channel like KA, " + "this will take at least several minutes. Please do not interrupt if possible!" + ) stats = objects.get_large_channel_creation_stats() for stat in stats: print("{}: {}".format(stat, stats[stat])) diff --git a/contentcuration/contentcuration/middleware/db_readonly.py b/contentcuration/contentcuration/middleware/db_readonly.py index 34c3c077e0..958da79751 100644 --- a/contentcuration/contentcuration/middleware/db_readonly.py +++ b/contentcuration/contentcuration/middleware/db_readonly.py @@ -9,16 +9,18 @@ class MiddlewareMixin(object): pass + from readonly.exceptions import DatabaseWriteDenied class DatabaseReadOnlyMiddleware(MiddlewareMixin): - def process_exception(self, request, exception): # Only process DatabaseWriteDenied exceptions if not isinstance(exception, DatabaseWriteDenied): return None # Handle the exception - if request.method != 'GET': - return HttpResponseBadRequest(_('The site is currently in read-only mode. Please try again later.')) + if request.method != "GET": + return HttpResponseBadRequest( + _("The site is currently in read-only mode. Please try again later.") + ) diff --git a/contentcuration/contentcuration/middleware/error_reporting.py b/contentcuration/contentcuration/middleware/error_reporting.py index c6b5a099c1..cfea9e797c 100644 --- a/contentcuration/contentcuration/middleware/error_reporting.py +++ b/contentcuration/contentcuration/middleware/error_reporting.py @@ -4,12 +4,11 @@ class ErrorReportingMiddleware(object): - def __init__(self, *args, **kwargs): self.client = error_reporting.Client.from_service_account_json( os.getenv("GOOGLE_APPLICATION_CREDENTIALS"), service=os.getenv("GCLOUD_DEBUGGER_APP_IDENTIFIER"), - _use_grpc=False + _use_grpc=False, ) def process_exception(self, request, exception): diff --git a/contentcuration/contentcuration/middleware/locale.py b/contentcuration/contentcuration/middleware/locale.py index 965312c0fa..edb62b3281 100644 --- a/contentcuration/contentcuration/middleware/locale.py +++ b/contentcuration/contentcuration/middleware/locale.py @@ -24,4 +24,6 @@ def process_view(self, request, callback, callback_args, callback_kwargs): def process_response(self, request, response): if self._is_exempt(request): return response - return super(KolibriStudioLocaleMiddleware, self).process_response(request, response) + return super(KolibriStudioLocaleMiddleware, self).process_response( + request, response + ) diff --git a/contentcuration/contentcuration/middleware/session.py b/contentcuration/contentcuration/middleware/session.py index 35fb81a367..c110650fe2 100644 --- a/contentcuration/contentcuration/middleware/session.py +++ b/contentcuration/contentcuration/middleware/session.py @@ -20,4 +20,6 @@ def process_view(self, request, callback, callback_args, callback_kwargs): def process_response(self, request, response): if self._is_exempt(request): return response - return super(KolibriStudioSessionMiddleware, self).process_response(request, response) + return super(KolibriStudioSessionMiddleware, self).process_response( + request, response + ) diff --git a/contentcuration/contentcuration/migration_production_settings.py b/contentcuration/contentcuration/migration_production_settings.py index e4b948c5c7..610a428525 100644 --- a/contentcuration/contentcuration/migration_production_settings.py +++ b/contentcuration/contentcuration/migration_production_settings.py @@ -1,8 +1,8 @@ # Settings used by migrations. This removes the need for Redis during migration jobs - +# flake8: noqa: F403, F405 from .production_settings import * -CACHES['default']['BACKEND'] = "django_prometheus.cache.backends.locmem.LocMemCache" +CACHES["default"]["BACKEND"] = "django_prometheus.cache.backends.locmem.LocMemCache" # Remove the need for GCS as well -DEFAULT_FILE_STORAGE = 'django_s3_storage.storage.S3Storage' +DEFAULT_FILE_STORAGE = "django_s3_storage.storage.S3Storage" diff --git a/contentcuration/contentcuration/migrations/0001_squashed_0094_auto_20180910_2342.py b/contentcuration/contentcuration/migrations/0001_squashed_0094_auto_20180910_2342.py index 72bab632d6..8336457756 100644 --- a/contentcuration/contentcuration/migrations/0001_squashed_0094_auto_20180910_2342.py +++ b/contentcuration/contentcuration/migrations/0001_squashed_0094_auto_20180910_2342.py @@ -18,851 +18,1686 @@ class Migration(migrations.Migration): initial = True dependencies = [ - ('auth', '0007_alter_validators_add_error_messages'), + ("auth", "0007_alter_validators_add_error_messages"), ] operations = [ migrations.CreateModel( - name='User', + name="User", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('password', models.CharField(max_length=128, verbose_name='password')), - ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), - ('email', models.EmailField(max_length=100, unique=True)), - ('first_name', models.CharField(max_length=100)), - ('last_name', models.CharField(max_length=100)), - ('is_admin', models.BooleanField(default=False)), - ('is_active', models.BooleanField(default=False)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("password", models.CharField(max_length=128, verbose_name="password")), + ( + "last_login", + models.DateTimeField( + blank=True, null=True, verbose_name="last login" + ), + ), + ("email", models.EmailField(max_length=100, unique=True)), + ("first_name", models.CharField(max_length=100)), + ("last_name", models.CharField(max_length=100)), + ("is_admin", models.BooleanField(default=False)), + ("is_active", models.BooleanField(default=False)), ], options={ - 'verbose_name': 'User', - 'verbose_name_plural': 'Users', + "verbose_name": "User", + "verbose_name_plural": "Users", }, ), migrations.CreateModel( - name='AssessmentItem', + name="AssessmentItem", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('type', models.CharField(default='multiplechoice', max_length=50)), - ('question', models.TextField(blank=True)), - ('answers', models.TextField(default='[]')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("type", models.CharField(default="multiplechoice", max_length=50)), + ("question", models.TextField(blank=True)), + ("answers", models.TextField(default="[]")), ], ), migrations.CreateModel( - name='Channel', + name="Channel", fields=[ - ('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)), - ('name', models.CharField(max_length=200)), - ('description', models.CharField(blank=True, max_length=400)), - ('version', models.IntegerField(default=0)), - ('thumbnail', models.TextField(blank=True)), - ('deleted', models.BooleanField(default=False)), - ('public', models.BooleanField(default=False)), - ('bookmarked_by', models.ManyToManyField(related_name='bookmarked_channels', to=settings.AUTH_USER_MODEL, verbose_name='bookmarked by')), + ( + "id", + contentcuration.models.UUIDField( + default=uuid.uuid4, + max_length=32, + primary_key=True, + serialize=False, + ), + ), + ("name", models.CharField(max_length=200)), + ("description", models.CharField(blank=True, max_length=400)), + ("version", models.IntegerField(default=0)), + ("thumbnail", models.TextField(blank=True)), + ("deleted", models.BooleanField(default=False)), + ("public", models.BooleanField(default=False)), + ( + "bookmarked_by", + models.ManyToManyField( + related_name="bookmarked_channels", + to=settings.AUTH_USER_MODEL, + verbose_name="bookmarked by", + ), + ), ], options={ - 'verbose_name': 'Channel', - 'verbose_name_plural': 'Channels', + "verbose_name": "Channel", + "verbose_name_plural": "Channels", }, ), migrations.CreateModel( - name='ContentKind', + name="ContentKind", fields=[ - ('kind', models.CharField(choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', - 'Exercise'), ('document', 'Document'), ('image', 'Image')], max_length=200, primary_key=True, serialize=False)), + ( + "kind", + models.CharField( + choices=[ + ("topic", "Topic"), + ("video", "Video"), + ("audio", "Audio"), + ("exercise", "Exercise"), + ("document", "Document"), + ("image", "Image"), + ], + max_length=200, + primary_key=True, + serialize=False, + ), + ), ], ), migrations.CreateModel( - name='ContentNode', + name="ContentNode", fields=[ - ('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)), - ('content_id', contentcuration.models.UUIDField(default=uuid.uuid4, editable=False, max_length=32)), - ('title', models.CharField(max_length=200)), - ('description', models.CharField(blank=True, max_length=400)), - ('sort_order', models.FloatField(default=0, help_text='Ascending, lowest number shown first', max_length=50, verbose_name='sort order')), - ('license_owner', models.CharField(blank=True, help_text='Organization of person who holds the essential rights', max_length=200)), - ('author', models.CharField(blank=True, help_text='Person who created content', max_length=200)), - ('created', models.DateTimeField(auto_now_add=True, verbose_name='created')), - ('modified', models.DateTimeField(auto_now=True, verbose_name='modified')), - ('changed', models.BooleanField(default=True)), - ('lft', models.PositiveIntegerField(db_index=True, editable=False)), - ('rght', models.PositiveIntegerField(db_index=True, editable=False)), - ('tree_id', models.PositiveIntegerField(db_index=True, editable=False)), - ('level', models.PositiveIntegerField(db_index=True, editable=False)), - ('cloned_source', mptt.fields.TreeForeignKey(blank=True, null=True, - on_delete=django.db.models.deletion.SET_NULL, related_name='clones', to='contentcuration.ContentNode')), + ( + "id", + contentcuration.models.UUIDField( + default=uuid.uuid4, + max_length=32, + primary_key=True, + serialize=False, + ), + ), + ( + "content_id", + contentcuration.models.UUIDField( + default=uuid.uuid4, editable=False, max_length=32 + ), + ), + ("title", models.CharField(max_length=200)), + ("description", models.CharField(blank=True, max_length=400)), + ( + "sort_order", + models.FloatField( + default=0, + help_text="Ascending, lowest number shown first", + max_length=50, + verbose_name="sort order", + ), + ), + ( + "license_owner", + models.CharField( + blank=True, + help_text="Organization of person who holds the essential rights", + max_length=200, + ), + ), + ( + "author", + models.CharField( + blank=True, + help_text="Person who created content", + max_length=200, + ), + ), + ( + "created", + models.DateTimeField(auto_now_add=True, verbose_name="created"), + ), + ( + "modified", + models.DateTimeField(auto_now=True, verbose_name="modified"), + ), + ("changed", models.BooleanField(default=True)), + ("lft", models.PositiveIntegerField(db_index=True, editable=False)), + ("rght", models.PositiveIntegerField(db_index=True, editable=False)), + ("tree_id", models.PositiveIntegerField(db_index=True, editable=False)), + ("level", models.PositiveIntegerField(db_index=True, editable=False)), + ( + "cloned_source", + mptt.fields.TreeForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="clones", + to="contentcuration.ContentNode", + ), + ), ], options={ - 'verbose_name': 'Topic', - 'verbose_name_plural': 'Topics', + "verbose_name": "Topic", + "verbose_name_plural": "Topics", }, ), migrations.CreateModel( - name='ContentTag', + name="ContentTag", fields=[ - ('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)), - ('tag_name', models.CharField(max_length=30)), - ('channel', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tags', to='contentcuration.Channel')), + ( + "id", + contentcuration.models.UUIDField( + default=uuid.uuid4, + max_length=32, + primary_key=True, + serialize=False, + ), + ), + ("tag_name", models.CharField(max_length=30)), + ( + "channel", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="tags", + to="contentcuration.Channel", + ), + ), ], ), migrations.CreateModel( - name='Exercise', + name="Exercise", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), ], ), migrations.CreateModel( - name='File', + name="File", fields=[ - ('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)), - ('checksum', models.CharField(blank=True, max_length=400)), - ('file_size', models.IntegerField(blank=True, null=True)), - ('file_on_disk', models.FileField(blank=True, max_length=500, - storage=contentcuration.models.FileOnDiskStorage(), upload_to=contentcuration.models.file_on_disk_name)), - ('original_filename', models.CharField(blank=True, max_length=255)), - ('contentnode', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='contentcuration.ContentNode')), + ( + "id", + contentcuration.models.UUIDField( + default=uuid.uuid4, + max_length=32, + primary_key=True, + serialize=False, + ), + ), + ("checksum", models.CharField(blank=True, max_length=400)), + ("file_size", models.IntegerField(blank=True, null=True)), + ( + "file_on_disk", + models.FileField( + blank=True, + max_length=500, + storage=contentcuration.models.FileOnDiskStorage(), + upload_to=contentcuration.models.file_on_disk_name, + ), + ), + ("original_filename", models.CharField(blank=True, max_length=255)), + ( + "contentnode", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="files", + to="contentcuration.ContentNode", + ), + ), ], ), migrations.CreateModel( - name='FileFormat', + name="FileFormat", fields=[ - ('extension', models.CharField(choices=[('mp4', 'mp4'), ('vtt', 'vtt'), ('srt', 'srt'), - ('mp3', 'mp3'), ('pdf', 'pdf')], max_length=40, primary_key=True, serialize=False)), - ('mimetype', models.CharField(blank=True, max_length=200)), + ( + "extension", + models.CharField( + choices=[ + ("mp4", "mp4"), + ("vtt", "vtt"), + ("srt", "srt"), + ("mp3", "mp3"), + ("pdf", "pdf"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), + ), + ("mimetype", models.CharField(blank=True, max_length=200)), ], ), migrations.CreateModel( - name='FormatPreset', + name="FormatPreset", fields=[ - ('id', models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('vector_video', 'Vectorized'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ( - 'exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail')], max_length=150, primary_key=True, serialize=False)), - ('readable_name', models.CharField(max_length=400)), - ('multi_language', models.BooleanField(default=False)), - ('supplementary', models.BooleanField(default=False)), - ('order', models.IntegerField(default=0)), - ('allowed_formats', models.ManyToManyField(blank=True, to='contentcuration.FileFormat')), - ('kind', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='format_presets', to='contentcuration.ContentKind')), - ('thumbnail', models.BooleanField(default=False)), - ('display', models.BooleanField(default=True)), - ('subtitle', models.BooleanField(default=False)), + ( + "id", + models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("vector_video", "Vectorized"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), + ), + ("readable_name", models.CharField(max_length=400)), + ("multi_language", models.BooleanField(default=False)), + ("supplementary", models.BooleanField(default=False)), + ("order", models.IntegerField(default=0)), + ( + "allowed_formats", + models.ManyToManyField(blank=True, to="contentcuration.FileFormat"), + ), + ( + "kind", + models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="format_presets", + to="contentcuration.ContentKind", + ), + ), + ("thumbnail", models.BooleanField(default=False)), + ("display", models.BooleanField(default=True)), + ("subtitle", models.BooleanField(default=False)), ], ), migrations.CreateModel( - name='Invitation', + name="Invitation", fields=[ - ('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)), - ('email', models.EmailField(max_length=100, null=True)), - ('first_name', models.CharField(default='Guest', max_length=100)), - ('last_name', models.CharField(blank=True, max_length=100, null=True)), - ('channel', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='pending_editors', to='contentcuration.Channel')), - ('invited', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='sent_to', to=settings.AUTH_USER_MODEL)), - ('sender', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='sent_by', to=settings.AUTH_USER_MODEL)), - ('share_mode', models.CharField(default='edit', max_length=50)), + ( + "id", + contentcuration.models.UUIDField( + default=uuid.uuid4, + max_length=32, + primary_key=True, + serialize=False, + ), + ), + ("email", models.EmailField(max_length=100, null=True)), + ("first_name", models.CharField(default="Guest", max_length=100)), + ("last_name", models.CharField(blank=True, max_length=100, null=True)), + ( + "channel", + models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="pending_editors", + to="contentcuration.Channel", + ), + ), + ( + "invited", + models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="sent_to", + to=settings.AUTH_USER_MODEL, + ), + ), + ( + "sender", + models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="sent_by", + to=settings.AUTH_USER_MODEL, + ), + ), + ("share_mode", models.CharField(default="edit", max_length=50)), ], options={ - 'verbose_name': 'Invitation', - 'verbose_name_plural': 'Invitations', + "verbose_name": "Invitation", + "verbose_name_plural": "Invitations", }, ), migrations.CreateModel( - name='Language', + name="Language", fields=[ - ('id', models.CharField(max_length=14, primary_key=True, serialize=False)), - ('lang_code', models.CharField(db_index=True, max_length=3)), - ('lang_subcode', models.CharField(blank=True, db_index=True, max_length=10, null=True)), - ('readable_name', models.CharField(blank=True, max_length=100)), - ('native_name', models.CharField(blank=True, max_length=100)), - ('lang_direction', models.CharField(choices=[('ltr', 'Left to Right'), ('rtl', 'Right to Left')], default='ltr', max_length=3)), + ( + "id", + models.CharField(max_length=14, primary_key=True, serialize=False), + ), + ("lang_code", models.CharField(db_index=True, max_length=3)), + ( + "lang_subcode", + models.CharField( + blank=True, db_index=True, max_length=10, null=True + ), + ), + ("readable_name", models.CharField(blank=True, max_length=100)), + ("native_name", models.CharField(blank=True, max_length=100)), + ( + "lang_direction", + models.CharField( + choices=[("ltr", "Left to Right"), ("rtl", "Right to Left")], + default="ltr", + max_length=3, + ), + ), ], ), migrations.CreateModel( - name='License', + name="License", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('license_name', models.CharField(max_length=50)), - ('license_url', models.URLField(blank=True)), - ('license_description', models.TextField(blank=True)), - ('exists', models.BooleanField(default=False, help_text='Tells whether or not a content item is licensed to share', verbose_name='license exists')), - ('copyright_holder_required', models.BooleanField(default=True)), - ('is_custom', models.BooleanField(default=False)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("license_name", models.CharField(max_length=50)), + ("license_url", models.URLField(blank=True)), + ("license_description", models.TextField(blank=True)), + ( + "exists", + models.BooleanField( + default=False, + help_text="Tells whether or not a content item is licensed to share", + verbose_name="license exists", + ), + ), + ("copyright_holder_required", models.BooleanField(default=True)), + ("is_custom", models.BooleanField(default=False)), ], ), migrations.CreateModel( - name='PrerequisiteContentRelationship', + name="PrerequisiteContentRelationship", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('prerequisite', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, - related_name='contentcuration_prerequisitecontentrelationship_prerequisite', to='contentcuration.ContentNode')), - ('target_node', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, - related_name='contentcuration_prerequisitecontentrelationship_target_node', to='contentcuration.ContentNode')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "prerequisite", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="contentcuration_prerequisitecontentrelationship_prerequisite", + to="contentcuration.ContentNode", + ), + ), + ( + "target_node", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="contentcuration_prerequisitecontentrelationship_target_node", + to="contentcuration.ContentNode", + ), + ), ], ), migrations.CreateModel( - name='RelatedContentRelationship', + name="RelatedContentRelationship", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('contentnode_1', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, - related_name='contentcuration_relatedcontentrelationship_1', to='contentcuration.ContentNode')), - ('contentnode_2', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, - related_name='contentcuration_relatedcontentrelationship_2', to='contentcuration.ContentNode')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "contentnode_1", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="contentcuration_relatedcontentrelationship_1", + to="contentcuration.ContentNode", + ), + ), + ( + "contentnode_2", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="contentcuration_relatedcontentrelationship_2", + to="contentcuration.ContentNode", + ), + ), ], ), migrations.AddField( - model_name='file', - name='file_format', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='contentcuration.FileFormat'), - ), - migrations.AddField( - model_name='file', - name='language', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='contentcuration.Language'), - ), - migrations.AddField( - model_name='file', - name='preset', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='files', to='contentcuration.FormatPreset'), - ), - migrations.AddField( - model_name='contentnode', - name='is_related', - field=models.ManyToManyField(blank=True, related_name='relate_to', - through='contentcuration.RelatedContentRelationship', to='contentcuration.ContentNode'), - ), - migrations.AddField( - model_name='contentnode', - name='kind', - field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contentnodes', to='contentcuration.ContentKind'), - ), - migrations.AddField( - model_name='contentnode', - name='license', - field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='contentcuration.License'), - ), - migrations.AddField( - model_name='contentnode', - name='original_node', - field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, - related_name='duplicates', to='contentcuration.ContentNode'), - ), - migrations.AddField( - model_name='contentnode', - name='parent', - field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='children', to='contentcuration.ContentNode'), - ), - migrations.AddField( - model_name='contentnode', - name='prerequisite', - field=models.ManyToManyField(blank=True, related_name='is_prerequisite_of', - through='contentcuration.PrerequisiteContentRelationship', to='contentcuration.ContentNode'), - ), - migrations.AddField( - model_name='contentnode', - name='tags', - field=models.ManyToManyField(blank=True, related_name='tagged_content', to='contentcuration.ContentTag'), - ), - migrations.AddField( - model_name='channel', - name='clipboard_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='channel_clipboard', to='contentcuration.ContentNode'), - ), - migrations.AddField( - model_name='channel', - name='editors', - field=models.ManyToManyField(blank=True, help_text='Users with edit rights', related_name='editable_channels', - to=settings.AUTH_USER_MODEL, verbose_name='editors'), - ), - migrations.AddField( - model_name='channel', - name='main_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='channel_main', to='contentcuration.ContentNode'), - ), - migrations.AddField( - model_name='channel', - name='trash_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='channel_trash', to='contentcuration.ContentNode'), + model_name="file", + name="file_format", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="files", + to="contentcuration.FileFormat", + ), + ), + migrations.AddField( + model_name="file", + name="language", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="files", + to="contentcuration.Language", + ), + ), + migrations.AddField( + model_name="file", + name="preset", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="files", + to="contentcuration.FormatPreset", + ), + ), + migrations.AddField( + model_name="contentnode", + name="is_related", + field=models.ManyToManyField( + blank=True, + related_name="relate_to", + through="contentcuration.RelatedContentRelationship", + to="contentcuration.ContentNode", + ), + ), + migrations.AddField( + model_name="contentnode", + name="kind", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="contentnodes", + to="contentcuration.ContentKind", + ), + ), + migrations.AddField( + model_name="contentnode", + name="license", + field=models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="contentcuration.License", + ), + ), + migrations.AddField( + model_name="contentnode", + name="original_node", + field=mptt.fields.TreeForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="duplicates", + to="contentcuration.ContentNode", + ), + ), + migrations.AddField( + model_name="contentnode", + name="parent", + field=mptt.fields.TreeForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="children", + to="contentcuration.ContentNode", + ), + ), + migrations.AddField( + model_name="contentnode", + name="prerequisite", + field=models.ManyToManyField( + blank=True, + related_name="is_prerequisite_of", + through="contentcuration.PrerequisiteContentRelationship", + to="contentcuration.ContentNode", + ), + ), + migrations.AddField( + model_name="contentnode", + name="tags", + field=models.ManyToManyField( + blank=True, + related_name="tagged_content", + to="contentcuration.ContentTag", + ), + ), + migrations.AddField( + model_name="channel", + name="clipboard_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_clipboard", + to="contentcuration.ContentNode", + ), + ), + migrations.AddField( + model_name="channel", + name="editors", + field=models.ManyToManyField( + blank=True, + help_text="Users with edit rights", + related_name="editable_channels", + to=settings.AUTH_USER_MODEL, + verbose_name="editors", + ), + ), + migrations.AddField( + model_name="channel", + name="main_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_main", + to="contentcuration.ContentNode", + ), + ), + migrations.AddField( + model_name="channel", + name="trash_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_trash", + to="contentcuration.ContentNode", + ), ), migrations.AlterUniqueTogether( - name='relatedcontentrelationship', - unique_together=set([('contentnode_1', 'contentnode_2')]), + name="relatedcontentrelationship", + unique_together=set([("contentnode_1", "contentnode_2")]), ), migrations.AlterUniqueTogether( - name='prerequisitecontentrelationship', - unique_together=set([('target_node', 'prerequisite')]), + name="prerequisitecontentrelationship", + unique_together=set([("target_node", "prerequisite")]), ), migrations.AlterUniqueTogether( - name='contenttag', - unique_together=set([('tag_name', 'channel')]), + name="contenttag", + unique_together=set([("tag_name", "channel")]), ), migrations.AddField( - model_name='user', - name='clipboard_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='user_clipboard', to='contentcuration.ContentNode'), + model_name="user", + name="clipboard_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="user_clipboard", + to="contentcuration.ContentNode", + ), ), migrations.RenameField( - model_name='contentnode', - old_name='license_owner', - new_name='copyright_holder', + model_name="contentnode", + old_name="license_owner", + new_name="copyright_holder", ), migrations.AlterModelManagers( - name='contentnode', - managers=[ - ], + name="contentnode", + managers=[], ), migrations.AddField( - model_name='contentnode', - name='published', + model_name="contentnode", + name="published", field=models.BooleanField(default=False), ), migrations.AlterField( - model_name='contentnode', - name='sort_order', - field=models.FloatField(default=1, help_text='Ascending, lowest number shown first', max_length=50, verbose_name='sort order'), - ), - migrations.AddField( - model_name='file', - name='source_url', + model_name="contentnode", + name="sort_order", + field=models.FloatField( + default=1, + help_text="Ascending, lowest number shown first", + max_length=50, + verbose_name="sort order", + ), + ), + migrations.AddField( + model_name="file", + name="source_url", field=models.CharField(blank=True, max_length=400, null=True), ), migrations.AddField( - model_name='channel', - name='staging_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='channel_staging', to='contentcuration.ContentNode'), + model_name="channel", + name="staging_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_staging", + to="contentcuration.ContentNode", + ), ), migrations.AlterField( - model_name='contentkind', - name='kind', - field=models.CharField(choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', - 'Exercise'), ('document', 'Document')], max_length=200, primary_key=True, serialize=False), + model_name="contentkind", + name="kind", + field=models.CharField( + choices=[ + ("topic", "Topic"), + ("video", "Video"), + ("audio", "Audio"), + ("exercise", "Exercise"), + ("document", "Document"), + ], + max_length=200, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'mp4'), ('vtt', 'vtt'), ('srt', 'srt'), ('mp3', 'mp3'), ('wav', 'wav'), ('pdf', 'pdf'), - ('jpg', 'jpg'), ('jpeg', 'jpeg'), ('png', 'png'), ('perseus', 'perseus')], max_length=40, primary_key=True, serialize=False), - ), - migrations.AddField( - model_name='contentnode', - name='node_id', - field=contentcuration.models.UUIDField(default=uuid.uuid4, editable=False, max_length=32), - ), - migrations.AddField( - model_name='channel', - name='language', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='channel_language', to='contentcuration.Language'), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "mp4"), + ("vtt", "vtt"), + ("srt", "srt"), + ("mp3", "mp3"), + ("wav", "wav"), + ("pdf", "pdf"), + ("jpg", "jpg"), + ("jpeg", "jpeg"), + ("png", "png"), + ("perseus", "perseus"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), + ), + migrations.AddField( + model_name="contentnode", + name="node_id", + field=contentcuration.models.UUIDField( + default=uuid.uuid4, editable=False, max_length=32 + ), + ), + migrations.AddField( + model_name="channel", + name="language", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_language", + to="contentcuration.Language", + ), ), migrations.AlterField( - model_name='channel', - name='thumbnail', + model_name="channel", + name="thumbnail", field=models.TextField(blank=True, null=True), ), migrations.AddField( - model_name='exercise', - name='contentnode', - field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='exercise', to='contentcuration.ContentNode'), - ), - migrations.AddField( - model_name='exercise', - name='mastery_model', - field=models.CharField(choices=[('do_all', 'Do all'), ('num_correct_in_a_row_2', '2 in a row'), ('num_correct_in_a_row_10', '10 in a row'), ( - 'num_correct_in_a_row_3', '3 in a row'), ('num_correct_in_a_row_5', '5 in a row'), ('skill_check', 'Skill check'), ('m_of_n', 'M out of N')], default='do_all', max_length=200), - ), - migrations.AddField( - model_name='assessmentitem', - name='contentnode', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='assessment_items', to='contentcuration.ContentNode'), - ), - migrations.AddField( - model_name='contentnode', - name='extra_fields', + model_name="exercise", + name="contentnode", + field=models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="exercise", + to="contentcuration.ContentNode", + ), + ), + migrations.AddField( + model_name="exercise", + name="mastery_model", + field=models.CharField( + choices=[ + ("do_all", "Do all"), + ("num_correct_in_a_row_2", "2 in a row"), + ("num_correct_in_a_row_10", "10 in a row"), + ("num_correct_in_a_row_3", "3 in a row"), + ("num_correct_in_a_row_5", "5 in a row"), + ("skill_check", "Skill check"), + ("m_of_n", "M out of N"), + ], + default="do_all", + max_length=200, + ), + ), + migrations.AddField( + model_name="assessmentitem", + name="contentnode", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="assessment_items", + to="contentcuration.ContentNode", + ), + ), + migrations.AddField( + model_name="contentnode", + name="extra_fields", field=models.TextField(blank=True, null=True), ), migrations.AddField( - model_name='assessmentitem', - name='order', + model_name="assessmentitem", + name="order", field=models.IntegerField(default=1), ), migrations.AddField( - model_name='assessmentitem', - name='assessment_id', - field=contentcuration.models.UUIDField(default=uuid.uuid4, editable=False, max_length=32), + model_name="assessmentitem", + name="assessment_id", + field=contentcuration.models.UUIDField( + default=uuid.uuid4, editable=False, max_length=32 + ), ), migrations.AlterField( - model_name='contentnode', - name='license', - field=models.ForeignKey(default=1, null=True, on_delete=django.db.models.deletion.CASCADE, to='contentcuration.License'), - ), - migrations.AddField( - model_name='assessmentitem', - name='raw_data', + model_name="contentnode", + name="license", + field=models.ForeignKey( + default=1, + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="contentcuration.License", + ), + ), + migrations.AddField( + model_name="assessmentitem", + name="raw_data", field=models.TextField(blank=True), ), migrations.AddField( - model_name='assessmentitem', - name='hints', - field=models.TextField(default='[]'), + model_name="assessmentitem", + name="hints", + field=models.TextField(default="[]"), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'mp4'), ('vtt', 'vtt'), ('srt', 'srt'), ('mp3', 'mp3'), ('wav', 'wav'), ('pdf', 'pdf'), ('jpg', 'jpg'), ( - 'jpeg', 'jpeg'), ('png', 'png'), ('json', 'json'), ('svg', 'svg'), ('perseus', 'perseus')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "mp4"), + ("vtt", "vtt"), + ("srt", "srt"), + ("mp3", "mp3"), + ("wav", "wav"), + ("pdf", "pdf"), + ("jpg", "jpg"), + ("jpeg", "jpeg"), + ("png", "png"), + ("json", "json"), + ("svg", "svg"), + ("perseus", "perseus"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.RemoveField( - model_name='contentnode', - name='author', - ), - migrations.AddField( - model_name='contentnode', - name='author', - field=models.CharField(blank=True, default='', help_text='Who created this content?', max_length=200, null=True), - ), - migrations.AddField( - model_name='file', - name='assessment_item', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='files', to='contentcuration.AssessmentItem'), - ), - migrations.AddField( - model_name='channel', - name='previous_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='channel_previous', to='contentcuration.ContentNode'), + model_name="contentnode", + name="author", + ), + migrations.AddField( + model_name="contentnode", + name="author", + field=models.CharField( + blank=True, + default="", + help_text="Who created this content?", + max_length=200, + null=True, + ), + ), + migrations.AddField( + model_name="file", + name="assessment_item", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="files", + to="contentcuration.AssessmentItem", + ), + ), + migrations.AddField( + model_name="channel", + name="previous_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_previous", + to="contentcuration.ContentNode", + ), ), migrations.AlterField( - model_name='channel', - name='name', + model_name="channel", + name="name", field=models.CharField(blank=True, max_length=200), ), migrations.AlterField( - model_name='contentkind', - name='kind', - field=models.CharField(choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', 'Exercise'), - ('document', 'Document'), ('html5', 'HTML5 App')], max_length=200, primary_key=True, serialize=False), + model_name="contentkind", + name="kind", + field=models.CharField( + choices=[ + ("topic", "Topic"), + ("video", "Video"), + ("audio", "Audio"), + ("exercise", "Exercise"), + ("document", "Document"), + ("html5", "HTML5 App"), + ], + max_length=200, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('wav', 'WAV Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ( - 'jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip')], max_length=40, primary_key=True, serialize=False), - ), - migrations.AddField( - model_name='channel', - name='viewers', - field=models.ManyToManyField(blank=True, help_text='Users with view only rights', related_name='view_only_channels', - to=settings.AUTH_USER_MODEL, verbose_name='viewers'), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("srt", "SRT Subtitle"), + ("mp3", "MP3 Audio"), + ("wav", "WAV Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("zip", "HTML5 Zip"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), + ), + migrations.AddField( + model_name="channel", + name="viewers", + field=models.ManyToManyField( + blank=True, + help_text="Users with view only rights", + related_name="view_only_channels", + to=settings.AUTH_USER_MODEL, + verbose_name="viewers", + ), ), migrations.AlterField( - model_name='channel', - name='name', + model_name="channel", + name="name", field=models.CharField(blank=True, max_length=200), ), migrations.AlterField( - model_name='contentkind', - name='kind', - field=models.CharField(choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', 'Exercise'), - ('document', 'Document'), ('html5', 'HTML5 App')], max_length=200, primary_key=True, serialize=False), + model_name="contentkind", + name="kind", + field=models.CharField( + choices=[ + ("topic", "Topic"), + ("video", "Video"), + ("audio", "Audio"), + ("exercise", "Exercise"), + ("document", "Document"), + ("html5", "HTML5 App"), + ], + max_length=200, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='contentnode', - name='description', + model_name="contentnode", + name="description", field=models.TextField(blank=True), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('wav', 'WAV Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ( - 'jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("srt", "SRT Subtitle"), + ("mp3", "MP3 Audio"), + ("wav", "WAV Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("zip", "HTML5 Zip"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='channel', - name='name', + model_name="channel", + name="name", field=models.CharField(blank=True, max_length=200), ), migrations.AlterField( - model_name='contentkind', - name='kind', - field=models.CharField(choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', 'Exercise'), - ('document', 'Document'), ('html5', 'HTML5 App')], max_length=200, primary_key=True, serialize=False), + model_name="contentkind", + name="kind", + field=models.CharField( + choices=[ + ("topic", "Topic"), + ("video", "Video"), + ("audio", "Audio"), + ("exercise", "Exercise"), + ("document", "Document"), + ("html5", "HTML5 App"), + ], + max_length=200, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('wav', 'WAV Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ( - 'jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip')], max_length=40, primary_key=True, serialize=False), - ), - migrations.AddField( - model_name='contentnode', - name='original_channel_id', - field=contentcuration.models.UUIDField(db_index=True, editable=False, max_length=32, null=True), - ), - migrations.AddField( - model_name='contentnode', - name='source_channel_id', - field=contentcuration.models.UUIDField(editable=False, max_length=32, null=True), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("srt", "SRT Subtitle"), + ("mp3", "MP3 Audio"), + ("wav", "WAV Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("zip", "HTML5 Zip"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), + ), + migrations.AddField( + model_name="contentnode", + name="original_channel_id", + field=contentcuration.models.UUIDField( + db_index=True, editable=False, max_length=32, null=True + ), + ), + migrations.AddField( + model_name="contentnode", + name="source_channel_id", + field=contentcuration.models.UUIDField( + editable=False, max_length=32, null=True + ), ), migrations.AlterField( - model_name='contentnode', - name='copyright_holder', - field=models.CharField(blank=True, default='', help_text='Organization of person who holds the essential rights', max_length=200), + model_name="contentnode", + name="copyright_holder", + field=models.CharField( + blank=True, + default="", + help_text="Organization of person who holds the essential rights", + max_length=200, + ), ), migrations.AddField( - model_name='contentnode', - name='original_source_node_id', - field=contentcuration.models.UUIDField(db_index=True, editable=False, max_length=32, null=True), + model_name="contentnode", + name="original_source_node_id", + field=contentcuration.models.UUIDField( + db_index=True, editable=False, max_length=32, null=True + ), ), migrations.AddField( - model_name='contentnode', - name='source_node_id', - field=contentcuration.models.UUIDField(editable=False, max_length=32, null=True), + model_name="contentnode", + name="source_node_id", + field=contentcuration.models.UUIDField( + editable=False, max_length=32, null=True + ), ), migrations.AddField( - model_name='assessmentitem', - name='source_url', + model_name="assessmentitem", + name="source_url", field=models.CharField(blank=True, max_length=400, null=True), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ( - 'png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip')], max_length=40, primary_key=True, serialize=False), - ), - migrations.AddField( - model_name='channel', - name='ricecooker_version', + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("srt", "SRT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("zip", "HTML5 Zip"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), + ), + migrations.AddField( + model_name="channel", + name="ricecooker_version", field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AddField( - model_name='channel', - name='source_domain', + model_name="channel", + name="source_domain", field=models.CharField(blank=True, max_length=300, null=True), ), migrations.AddField( - model_name='channel', - name='source_id', + model_name="channel", + name="source_id", field=models.CharField(blank=True, max_length=200, null=True), ), migrations.AddField( - model_name='contentnode', - name='source_domain', + model_name="contentnode", + name="source_domain", field=models.CharField(blank=True, max_length=300, null=True), ), migrations.AddField( - model_name='contentnode', - name='source_id', + model_name="contentnode", + name="source_id", field=models.CharField(blank=True, max_length=200, null=True), ), migrations.AddField( - model_name='assessmentitem', - name='randomize', + model_name="assessmentitem", + name="randomize", field=models.BooleanField(default=False), ), migrations.AlterField( - model_name='channel', - name='deleted', + model_name="channel", + name="deleted", field=models.BooleanField(db_index=True, default=False), ), migrations.AlterField( - model_name='channel', - name='public', + model_name="channel", + name="public", field=models.BooleanField(db_index=True, default=False), ), migrations.AlterField( - model_name='contentnode', - name='changed', + model_name="contentnode", + name="changed", field=models.BooleanField(db_index=True, default=True), ), migrations.AlterField( - model_name='file', - name='checksum', + model_name="file", + name="checksum", field=models.CharField(blank=True, db_index=True, max_length=400), ), migrations.AddField( - model_name='assessmentitem', - name='deleted', + model_name="assessmentitem", + name="deleted", field=models.BooleanField(default=False), ), migrations.CreateModel( - name='ChannelResourceSize', + name="ChannelResourceSize", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('tree_id', models.IntegerField()), - ('resource_size', models.IntegerField()), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("tree_id", models.IntegerField()), + ("resource_size", models.IntegerField()), ], options={ - 'db_table': 'contentcuration_channel_resource_sizes', - 'managed': False, + "db_table": "contentcuration_channel_resource_sizes", + "managed": False, }, ), migrations.CreateModel( - name='ChannelResourceSize', + name="ChannelResourceSize", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('tree_id', models.IntegerField()), - ('resource_size', models.IntegerField()), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("tree_id", models.IntegerField()), + ("resource_size", models.IntegerField()), ], options={ - 'db_table': 'contentcuration_channel_resource_sizes', - 'managed': False, + "db_table": "contentcuration_channel_resource_sizes", + "managed": False, }, ), migrations.AddField( - model_name='user', - name='preferences', - field=models.TextField(default='{"auto_derive_exercise_thumbnail": true, "auto_derive_video_thumbnail": true, "m_value": 5, "language": null, "license": null, "author": null, "aggregator": null, "auto_randomize_questions": true, "auto_derive_document_thumbnail": true, "copyright_holder": null, "auto_derive_html5_thumbnail": true, "provider": null, "auto_derive_audio_thumbnail": true, "license_description": null, "n_value": 5, "mastery_model": "num_correct_in_a_row_5"}'), + model_name="user", + name="preferences", + field=models.TextField( + default='{"auto_derive_exercise_thumbnail": true, "auto_derive_video_thumbnail": true, "m_value": 5, "language": null, "license": null, "author": null, "aggregator": null, "auto_randomize_questions": true, "auto_derive_document_thumbnail": true, "copyright_holder": null, "auto_derive_html5_thumbnail": true, "provider": null, "auto_derive_audio_thumbnail": true, "license_description": null, "n_value": 5, "mastery_model": "num_correct_in_a_row_5"}' + ), ), migrations.AddField( - model_name='contentnode', - name='license_description', + model_name="contentnode", + name="license_description", field=models.CharField(blank=True, max_length=400, null=True), ), migrations.AlterField( - model_name='contentnode', - name='copyright_holder', - field=models.CharField(blank=True, default='', help_text='Organization of person who holds the essential rights', max_length=200, null=True), - ), - migrations.AddField( - model_name='user', - name='date_joined', - field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined'), - ), - migrations.AddField( - model_name='user', - name='groups', - field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', - related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'), - ), - migrations.AddField( - model_name='user', - name='is_staff', - field=models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status'), - ), - migrations.AddField( - model_name='user', - name='is_superuser', + model_name="contentnode", + name="copyright_holder", + field=models.CharField( + blank=True, + default="", + help_text="Organization of person who holds the essential rights", + max_length=200, + null=True, + ), + ), + migrations.AddField( + model_name="user", + name="date_joined", + field=models.DateTimeField( + default=django.utils.timezone.now, verbose_name="date joined" + ), + ), + migrations.AddField( + model_name="user", + name="groups", + field=models.ManyToManyField( + blank=True, + help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.", + related_name="user_set", + related_query_name="user", + to="auth.Group", + verbose_name="groups", + ), + ), + migrations.AddField( + model_name="user", + name="is_staff", field=models.BooleanField( - default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status'), + default=False, + help_text="Designates whether the user can log into this admin site.", + verbose_name="staff status", + ), ), migrations.AddField( - model_name='user', - name='user_permissions', - field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', - related_query_name='user', to='auth.Permission', verbose_name='user permissions'), + model_name="user", + name="is_superuser", + field=models.BooleanField( + default=False, + help_text="Designates that this user has all permissions without explicitly assigning them.", + verbose_name="superuser status", + ), + ), + migrations.AddField( + model_name="user", + name="user_permissions", + field=models.ManyToManyField( + blank=True, + help_text="Specific permissions for this user.", + related_name="user_set", + related_query_name="user", + to="auth.Permission", + verbose_name="user permissions", + ), ), migrations.AlterField( - model_name='user', - name='is_active', - field=models.BooleanField(default=False, help_text='Designates whether this user should be treated as active.', verbose_name='active'), + model_name="user", + name="is_active", + field=models.BooleanField( + default=False, + help_text="Designates whether this user should be treated as active.", + verbose_name="active", + ), ), migrations.AddField( - model_name='channel', - name='chef_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='channel_chef', to='contentcuration.ContentNode'), + model_name="channel", + name="chef_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_chef", + to="contentcuration.ContentNode", + ), ), migrations.AddField( - model_name='channel', - name='preferences', - field=models.TextField(default='{"auto_derive_exercise_thumbnail": true, "auto_derive_video_thumbnail": true, "m_value": 5, "language": null, "license": null, "author": null, "aggregator": null, "auto_randomize_questions": true, "auto_derive_document_thumbnail": true, "copyright_holder": null, "auto_derive_html5_thumbnail": true, "provider": null, "auto_derive_audio_thumbnail": true, "license_description": null, "n_value": 5, "mastery_model": "num_correct_in_a_row_5"}'), + model_name="channel", + name="preferences", + field=models.TextField( + default='{"auto_derive_exercise_thumbnail": true, "auto_derive_video_thumbnail": true, "m_value": 5, "language": null, "license": null, "author": null, "aggregator": null, "auto_randomize_questions": true, "auto_derive_document_thumbnail": true, "copyright_holder": null, "auto_derive_html5_thumbnail": true, "provider": null, "auto_derive_audio_thumbnail": true, "license_description": null, "n_value": 5, "mastery_model": "num_correct_in_a_row_5"}' + ), ), migrations.AddField( - model_name='channel', - name='thumbnail_encoding', + model_name="channel", + name="thumbnail_encoding", field=models.TextField(blank=True, null=True), ), migrations.AddField( - model_name='contentnode', - name='thumbnail_encoding', + model_name="contentnode", + name="thumbnail_encoding", field=models.TextField(blank=True, null=True), ), migrations.AlterField( - model_name='contentnode', - name='content_id', - field=contentcuration.models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, max_length=32), - ), - migrations.AddField( - model_name='contentnode', - name='language', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='content_language', to='contentcuration.Language'), + model_name="contentnode", + name="content_id", + field=contentcuration.models.UUIDField( + db_index=True, default=uuid.uuid4, editable=False, max_length=32 + ), + ), + migrations.AddField( + model_name="contentnode", + name="language", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="content_language", + to="contentcuration.Language", + ), ), migrations.CreateModel( - name='SecretToken', + name="SecretToken", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('token', models.CharField(max_length=100, unique=True)), - ('is_primary', models.BooleanField(default=False)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("token", models.CharField(max_length=100, unique=True)), + ("is_primary", models.BooleanField(default=False)), ], ), migrations.AddField( - model_name='channel', - name='secret_tokens', - field=models.ManyToManyField(blank=True, related_name='channels', to='contentcuration.SecretToken', verbose_name='secret tokens'), + model_name="channel", + name="secret_tokens", + field=models.ManyToManyField( + blank=True, + related_name="channels", + to="contentcuration.SecretToken", + verbose_name="secret tokens", + ), ), migrations.AddField( - model_name='channel', - name='priority', - field=models.IntegerField(default=0, help_text='Order to display public channels'), + model_name="channel", + name="priority", + field=models.IntegerField( + default=0, help_text="Order to display public channels" + ), ), migrations.CreateModel( - name='StagedFile', + name="StagedFile", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('checksum', models.CharField(blank=True, db_index=True, max_length=400)), - ('file_size', models.IntegerField(blank=True, null=True)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "checksum", + models.CharField(blank=True, db_index=True, max_length=400), + ), + ("file_size", models.IntegerField(blank=True, null=True)), ], ), migrations.AddField( - model_name='file', - name='uploaded_by', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to=settings.AUTH_USER_MODEL), + model_name="file", + name="uploaded_by", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="files", + to=settings.AUTH_USER_MODEL, + ), ), migrations.AddField( - model_name='user', - name='disk_space', - field=models.FloatField(default=524288000, help_text='How many bytes a user can upload'), + model_name="user", + name="disk_space", + field=models.FloatField( + default=524288000, help_text="How many bytes a user can upload" + ), ), migrations.AddField( - model_name='stagedfile', - name='uploaded_by', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, - related_name='staged_files', to=settings.AUTH_USER_MODEL), + model_name="stagedfile", + name="uploaded_by", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="staged_files", + to=settings.AUTH_USER_MODEL, + ), ), migrations.AddField( - model_name='contentnode', - name='freeze_authoring_data', + model_name="contentnode", + name="freeze_authoring_data", field=models.BooleanField(default=False), ), migrations.AlterField( - model_name='contentnode', - name='license', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='contentcuration.License'), - ), - migrations.AddField( - model_name='channel', - name='icon_encoding', + model_name="contentnode", + name="license", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="contentcuration.License", + ), + ), + migrations.AddField( + model_name="channel", + name="icon_encoding", field=models.TextField(blank=True, null=True), ), migrations.AddField( - model_name='channel', - name='last_published', + model_name="channel", + name="last_published", field=models.DateTimeField(blank=True, null=True), ), migrations.AddField( - model_name='channel', - name='included_languages', - field=models.ManyToManyField(blank=True, related_name='channels', to='contentcuration.Language', verbose_name='languages'), + model_name="channel", + name="included_languages", + field=models.ManyToManyField( + blank=True, + related_name="channels", + to="contentcuration.Language", + verbose_name="languages", + ), ), migrations.AddField( - model_name='channel', - name='published_kind_count', + model_name="channel", + name="published_kind_count", field=models.TextField(blank=True, null=True), ), migrations.AddField( - model_name='channel', - name='published_size', + model_name="channel", + name="published_size", field=models.FloatField(default=0), ), migrations.AddField( - model_name='channel', - name='total_resource_count', + model_name="channel", + name="total_resource_count", field=models.IntegerField(default=0), ), migrations.AddField( - model_name='contentnode', - name='publishing', + model_name="contentnode", + name="publishing", field=models.BooleanField(default=False), ), migrations.AddField( - model_name='user', - name='information', + model_name="user", + name="information", field=django.contrib.postgres.fields.jsonb.JSONField(null=True), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ( - 'png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("srt", "SRT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("zip", "HTML5 Zip"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ( - 'png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), - ), - migrations.AddField( - model_name='channel', - name='content_defaults', + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("srt", "SRT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("zip", "HTML5 Zip"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), + ), + migrations.AddField( + model_name="channel", + name="content_defaults", field=django.contrib.postgres.fields.jsonb.JSONField(default=dict), ), migrations.AlterIndexTogether( - name='channel', - index_together=set([('deleted', 'public')]), + name="channel", + index_together=set([("deleted", "public")]), ), migrations.AddField( - model_name='contentnode', - name='role_visibility', - field=models.CharField(choices=[('coach', 'Coach'), ('learner', 'Learner')], default='learner', max_length=50), + model_name="contentnode", + name="role_visibility", + field=models.CharField( + choices=[("coach", "Coach"), ("learner", "Learner")], + default="learner", + max_length=50, + ), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ( - 'png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), - ), - migrations.AddField( - model_name='user', - name='content_defaults', + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("srt", "SRT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("zip", "HTML5 Zip"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), + ), + migrations.AddField( + model_name="user", + name="content_defaults", field=django.contrib.postgres.fields.jsonb.JSONField(default=dict), ), migrations.AddField( - model_name='user', - name='policies', - field=django.contrib.postgres.fields.jsonb.JSONField(default=dict, null=True), + model_name="user", + name="policies", + field=django.contrib.postgres.fields.jsonb.JSONField( + default=dict, null=True + ), ), migrations.AddField( - model_name='contentnode', - name='aggregator', - field=models.CharField(blank=True, default='', help_text='Who gathered this content together?', max_length=200, null=True), + model_name="contentnode", + name="aggregator", + field=models.CharField( + blank=True, + default="", + help_text="Who gathered this content together?", + max_length=200, + null=True, + ), ), migrations.AddField( - model_name='contentnode', - name='provider', - field=models.CharField(blank=True, default='', help_text='Who distributed this content?', max_length=200, null=True), + model_name="contentnode", + name="provider", + field=models.CharField( + blank=True, + default="", + help_text="Who distributed this content?", + max_length=200, + null=True, + ), ), migrations.AlterField( - model_name='contentnode', - name='changed', + model_name="contentnode", + name="changed", field=models.BooleanField(default=True), ), migrations.AlterField( - model_name='file', - name='file_on_disk', - field=models.FileField(blank=True, max_length=500, upload_to=contentcuration.models.object_storage_name), + model_name="file", + name="file_on_disk", + field=models.FileField( + blank=True, + max_length=500, + upload_to=contentcuration.models.object_storage_name, + ), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ( - 'json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.CreateModel( - name='ChannelSet', + name="ChannelSet", fields=[ - ('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)), - ('name', models.CharField(blank=True, max_length=200)), - ('description', models.CharField(blank=True, max_length=400)), - ('public', models.BooleanField(db_index=True, default=False)), - ('editors', models.ManyToManyField(blank=True, help_text='Users with edit rights', related_name='channel_sets', to=settings.AUTH_USER_MODEL, verbose_name='editors')), - ('secret_token', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_sets', to='contentcuration.SecretToken')), + ( + "id", + contentcuration.models.UUIDField( + default=uuid.uuid4, + max_length=32, + primary_key=True, + serialize=False, + ), + ), + ("name", models.CharField(blank=True, max_length=200)), + ("description", models.CharField(blank=True, max_length=400)), + ("public", models.BooleanField(db_index=True, default=False)), + ( + "editors", + models.ManyToManyField( + blank=True, + help_text="Users with edit rights", + related_name="channel_sets", + to=settings.AUTH_USER_MODEL, + verbose_name="editors", + ), + ), + ( + "secret_token", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="channel_sets", + to="contentcuration.SecretToken", + ), + ), ], ), ] diff --git a/contentcuration/contentcuration/migrations/0002_auto_20181220_1734.py b/contentcuration/contentcuration/migrations/0002_auto_20181220_1734.py index 3c972e3eb1..f01730b555 100644 --- a/contentcuration/contentcuration/migrations/0002_auto_20181220_1734.py +++ b/contentcuration/contentcuration/migrations/0002_auto_20181220_1734.py @@ -7,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0001_squashed_0094_auto_20180910_2342'), + ("contentcuration", "0001_squashed_0094_auto_20180910_2342"), ] operations = [ migrations.AddField( - model_name='channel', - name='thumbnail_encoding_json', + model_name="channel", + name="thumbnail_encoding_json", field=django.contrib.postgres.fields.jsonb.JSONField(default=dict), ), ] diff --git a/contentcuration/contentcuration/migrations/0003_copy_data.py b/contentcuration/contentcuration/migrations/0003_copy_data.py index 9a934b95a3..e9f073b424 100644 --- a/contentcuration/contentcuration/migrations/0003_copy_data.py +++ b/contentcuration/contentcuration/migrations/0003_copy_data.py @@ -7,23 +7,31 @@ def forwards(apps, schema_editor): - Channel = apps.get_model('contentcuration', 'channel') + Channel = apps.get_model("contentcuration", "channel") for channel in Channel.objects.all(): - channel.thumbnail_encoding_json = ast.literal_eval(channel.thumbnail_encoding) if channel.thumbnail_encoding else {} + channel.thumbnail_encoding_json = ( + ast.literal_eval(channel.thumbnail_encoding) + if channel.thumbnail_encoding + else {} + ) channel.save() def backwards(apps, schema_editor): - Channel = apps.get_model('contentcuration', 'channel') + Channel = apps.get_model("contentcuration", "channel") for channel in Channel.objects.all(): - channel.thumbnail_encoding = json.dumps(channel.thumbnail_encoding_json) if channel.thumbnail_encoding_json else None + channel.thumbnail_encoding = ( + json.dumps(channel.thumbnail_encoding_json) + if channel.thumbnail_encoding_json + else None + ) channel.save() class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0002_auto_20181220_1734'), + ("contentcuration", "0002_auto_20181220_1734"), ] operations = [ diff --git a/contentcuration/contentcuration/migrations/0004_remove_rename_json_field.py b/contentcuration/contentcuration/migrations/0004_remove_rename_json_field.py index a79cd7c18a..d840b2196d 100644 --- a/contentcuration/contentcuration/migrations/0004_remove_rename_json_field.py +++ b/contentcuration/contentcuration/migrations/0004_remove_rename_json_field.py @@ -6,14 +6,14 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0003_copy_data'), + ("contentcuration", "0003_copy_data"), ] operations = [ - migrations.RemoveField(model_name='channel', name='thumbnail_encoding'), + migrations.RemoveField(model_name="channel", name="thumbnail_encoding"), migrations.RenameField( - model_name='channel', - old_name='thumbnail_encoding_json', - new_name='thumbnail_encoding', + model_name="channel", + old_name="thumbnail_encoding_json", + new_name="thumbnail_encoding", ), ] diff --git a/contentcuration/contentcuration/migrations/0097_task.py b/contentcuration/contentcuration/migrations/0097_task.py index e8936964e8..3118d19c25 100644 --- a/contentcuration/contentcuration/migrations/0097_task.py +++ b/contentcuration/contentcuration/migrations/0097_task.py @@ -14,26 +14,71 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0004_remove_rename_json_field'), + ("contentcuration", "0004_remove_rename_json_field"), ] operations = [ migrations.CreateModel( - name='Task', + name="Task", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('task_type', models.CharField(max_length=50)), - ('created', models.DateTimeField(default=django.utils.timezone.now)), - ('status', models.CharField(max_length=10)), - ('is_progress_tracking', models.BooleanField(default=False)), - ('metadata', django.contrib.postgres.fields.jsonb.JSONField()), - ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task', to=settings.AUTH_USER_MODEL)), - ('task_id', contentcuration.models.UUIDField(db_index=True, default=uuid.uuid4, max_length=32)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("task_type", models.CharField(max_length=50)), + ("created", models.DateTimeField(default=django.utils.timezone.now)), + ("status", models.CharField(max_length=10)), + ("is_progress_tracking", models.BooleanField(default=False)), + ("metadata", django.contrib.postgres.fields.jsonb.JSONField()), + ( + "user", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="task", + to=settings.AUTH_USER_MODEL, + ), + ), + ( + "task_id", + contentcuration.models.UUIDField( + db_index=True, default=uuid.uuid4, max_length=32 + ), + ), ], ), migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0098_auto_20190424_1709.py b/contentcuration/contentcuration/migrations/0098_auto_20190424_1709.py index 26d1b1ca74..0be055421e 100644 --- a/contentcuration/contentcuration/migrations/0098_auto_20190424_1709.py +++ b/contentcuration/contentcuration/migrations/0098_auto_20190424_1709.py @@ -9,32 +9,99 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0097_task'), + ("contentcuration", "0097_task"), ] operations = [ migrations.CreateModel( - name='SlideshowSlide', + name="SlideshowSlide", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('sort_order', models.FloatField(default=1.0)), - ('metadata', django.contrib.postgres.fields.jsonb.JSONField(default={})), - ('contentnode', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='slideshow_slides', to='contentcuration.ContentNode')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("sort_order", models.FloatField(default=1.0)), + ( + "metadata", + django.contrib.postgres.fields.jsonb.JSONField(default={}), + ), + ( + "contentnode", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="slideshow_slides", + to="contentcuration.ContentNode", + ), + ), ], ), migrations.AlterField( - model_name='contentkind', - name='kind', - field=models.CharField(choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', 'Exercise'), ('document', 'Document'), ('html5', 'HTML5 App'), ('slideshow', 'Slideshow')], max_length=200, primary_key=True, serialize=False), + model_name="contentkind", + name="kind", + field=models.CharField( + choices=[ + ("topic", "Topic"), + ("video", "Video"), + ("audio", "Audio"), + ("exercise", "Exercise"), + ("document", "Document"), + ("html5", "HTML5 App"), + ("slideshow", "Slideshow"), + ], + max_length=200, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), migrations.AddField( - model_name='file', - name='slideshow_slide', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='contentcuration.SlideshowSlide'), + model_name="file", + name="slideshow_slide", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="files", + to="contentcuration.SlideshowSlide", + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0099_auto_20190715_2201.py b/contentcuration/contentcuration/migrations/0099_auto_20190715_2201.py index 4d38de0da7..9e66f0c52c 100644 --- a/contentcuration/contentcuration/migrations/0099_auto_20190715_2201.py +++ b/contentcuration/contentcuration/migrations/0099_auto_20190715_2201.py @@ -7,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0098_auto_20190424_1709'), + ("contentcuration", "0098_auto_20190424_1709"), ] operations = [ migrations.AlterField( - model_name='contenttag', - name='tag_name', + model_name="contenttag", + name="tag_name", field=models.CharField(max_length=50), ), ] diff --git a/contentcuration/contentcuration/migrations/0100_calculate_included_languages.py b/contentcuration/contentcuration/migrations/0100_calculate_included_languages.py index 619b6e6f83..426c4ec4ca 100644 --- a/contentcuration/contentcuration/migrations/0100_calculate_included_languages.py +++ b/contentcuration/contentcuration/migrations/0100_calculate_included_languages.py @@ -11,7 +11,9 @@ def calculate_included_languages(apps, schema_editor): Channel = apps.get_model("contentcuration", "Channel") ContentNode = apps.get_model("contentcuration", "ContentNode") - for channel in Channel.objects.filter(main_tree__isnull=False, last_published__lt=included_languages_deploy_date): + for channel in Channel.objects.filter( + main_tree__isnull=False, last_published__lt=included_languages_deploy_date + ): content_nodes = ContentNode.objects.filter( tree_id=channel.main_tree.tree_id, published=True, @@ -35,7 +37,7 @@ def calculate_included_languages(apps, schema_editor): class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0099_auto_20190715_2201'), + ("contentcuration", "0099_auto_20190715_2201"), ] operations = [ diff --git a/contentcuration/contentcuration/migrations/0101_extra_fields_json_field.py b/contentcuration/contentcuration/migrations/0101_extra_fields_json_field.py index 5ffe791275..9791ecc788 100644 --- a/contentcuration/contentcuration/migrations/0101_extra_fields_json_field.py +++ b/contentcuration/contentcuration/migrations/0101_extra_fields_json_field.py @@ -7,22 +7,23 @@ from contentcuration.models import ContentNode + class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0100_calculate_included_languages'), + ("contentcuration", "0100_calculate_included_languages"), ] operations = [ - migrations.RunSQL( # converts the extra_fields column from text to jsonb - "ALTER TABLE %s ALTER COLUMN extra_fields TYPE jsonb USING extra_fields::json;" % ContentNode._meta.db_table, + "ALTER TABLE %s ALTER COLUMN extra_fields TYPE jsonb USING extra_fields::json;" + % ContentNode._meta.db_table, # keeps the Django model in sync with the database state_operations=[ migrations.AlterField( - 'contentnode', - 'extra_fields', + "contentnode", + "extra_fields", django.contrib.postgres.fields.jsonb.JSONField(), ), ], @@ -32,12 +33,13 @@ class Migration(migrations.Migration): # as otherwise pre-conversion migration tests can fail if we allow null. reverse_sql="""ALTER TABLE %s ALTER COLUMN extra_fields TYPE text USING extra_fields #>> '{}'; ALTER TABLE %s ALTER COLUMN extra_fields DROP NOT NULL; - """ % (ContentNode._meta.db_table, ContentNode._meta.db_table), + """ + % (ContentNode._meta.db_table, ContentNode._meta.db_table), ), - # This is to update `ContentNode` entries with `extra_fields=="null"` to actual NULL values migrations.RunSQL( - "UPDATE %s SET extra_fields=NULL WHERE extra_fields = 'null'" % ContentNode._meta.db_table, - migrations.RunSQL.noop # don't bother to reverse this - ) + "UPDATE %s SET extra_fields=NULL WHERE extra_fields = 'null'" + % ContentNode._meta.db_table, + migrations.RunSQL.noop, # don't bother to reverse this + ), ] diff --git a/contentcuration/contentcuration/migrations/0102_auto_20190904_1627.py b/contentcuration/contentcuration/migrations/0102_auto_20190904_1627.py index 29f829baa7..856f183a63 100644 --- a/contentcuration/contentcuration/migrations/0102_auto_20190904_1627.py +++ b/contentcuration/contentcuration/migrations/0102_auto_20190904_1627.py @@ -8,13 +8,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0101_extra_fields_json_field'), + ("contentcuration", "0101_extra_fields_json_field"), ] operations = [ migrations.AlterField( - model_name='contentnode', - name='extra_fields', + model_name="contentnode", + name="extra_fields", field=django.contrib.postgres.fields.jsonb.JSONField(default=dict), ), ] diff --git a/contentcuration/contentcuration/migrations/0103_auto_20190905_0408.py b/contentcuration/contentcuration/migrations/0103_auto_20190905_0408.py index da1a79ea2a..e60e71999a 100644 --- a/contentcuration/contentcuration/migrations/0103_auto_20190905_0408.py +++ b/contentcuration/contentcuration/migrations/0103_auto_20190905_0408.py @@ -7,18 +7,69 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0102_auto_20190904_1627'), + ("contentcuration", "0102_auto_20190904_1627"), ] operations = [ migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0104_auto_20191028_2325.py b/contentcuration/contentcuration/migrations/0104_auto_20191028_2325.py index 64e3428099..8ec05b3733 100644 --- a/contentcuration/contentcuration/migrations/0104_auto_20191028_2325.py +++ b/contentcuration/contentcuration/migrations/0104_auto_20191028_2325.py @@ -7,13 +7,42 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0103_auto_20190905_0408'), + ("contentcuration", "0103_auto_20190905_0408"), ] operations = [ migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0105_channel_published_data.py b/contentcuration/contentcuration/migrations/0105_channel_published_data.py index 63d9cfd257..d7dd241958 100644 --- a/contentcuration/contentcuration/migrations/0105_channel_published_data.py +++ b/contentcuration/contentcuration/migrations/0105_channel_published_data.py @@ -7,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0104_auto_20191028_2325'), + ("contentcuration", "0104_auto_20191028_2325"), ] operations = [ migrations.AddField( - model_name='channel', - name='published_data', + model_name="channel", + name="published_data", field=django.contrib.postgres.fields.jsonb.JSONField(null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0106_auto_20191113_0217.py b/contentcuration/contentcuration/migrations/0106_auto_20191113_0217.py index 55f546524c..c964cbc6c9 100644 --- a/contentcuration/contentcuration/migrations/0106_auto_20191113_0217.py +++ b/contentcuration/contentcuration/migrations/0106_auto_20191113_0217.py @@ -7,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0105_channel_published_data'), + ("contentcuration", "0105_channel_published_data"), ] operations = [ migrations.AlterField( - model_name='channel', - name='published_data', + model_name="channel", + name="published_data", field=django.contrib.postgres.fields.jsonb.JSONField(default=dict), ), ] diff --git a/contentcuration/contentcuration/migrations/0107_auto_20191115_2344.py b/contentcuration/contentcuration/migrations/0107_auto_20191115_2344.py index 93cace4d02..ea2d48fc2e 100644 --- a/contentcuration/contentcuration/migrations/0107_auto_20191115_2344.py +++ b/contentcuration/contentcuration/migrations/0107_auto_20191115_2344.py @@ -7,24 +7,80 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0106_auto_20191113_0217'), + ("contentcuration", "0106_auto_20191113_0217"), ] operations = [ migrations.CreateModel( - name='MPTTTreeIDManager', + name="MPTTTreeIDManager", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), ], ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0108_mptt_tree_id_migration.py b/contentcuration/contentcuration/migrations/0108_mptt_tree_id_migration.py index e9349c4e24..2eb02e8ad4 100644 --- a/contentcuration/contentcuration/migrations/0108_mptt_tree_id_migration.py +++ b/contentcuration/contentcuration/migrations/0108_mptt_tree_id_migration.py @@ -10,14 +10,19 @@ def delete_tree_id_records(apps, schema_editor): Note that this technically does not reverse the migration, as IDs are not re-used after deletion, but just returns the table to an empty state undoing the record creation. """ - MPTTTreeIDManager = apps.get_model('contentcuration', 'MPTTTreeIDManager') + MPTTTreeIDManager = apps.get_model("contentcuration", "MPTTTreeIDManager") MPTTTreeIDManager.objects.all().delete() def update_tree_id_integer(apps, schema_editor): - MPTTTreeIDManager = apps.get_model('contentcuration', 'MPTTTreeIDManager') + MPTTTreeIDManager = apps.get_model("contentcuration", "MPTTTreeIDManager") # In tests, we won't have any existing MPTT trees, so this will return None. - max_id = ContentNode.objects.filter(parent=None).aggregate(max_id=models.Max('tree_id'))['max_id'] or 0 + max_id = ( + ContentNode.objects.filter(parent=None).aggregate(max_id=models.Max("tree_id"))[ + "max_id" + ] + or 0 + ) objects = [] for i in range(max_id): objects.append(MPTTTreeIDManager()) @@ -29,9 +34,11 @@ def update_tree_id_integer(apps, schema_editor): class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0107_auto_20191115_2344'), + ("contentcuration", "0107_auto_20191115_2344"), ] operations = [ - migrations.RunPython(update_tree_id_integer, reverse_code=delete_tree_id_records), + migrations.RunPython( + update_tree_id_integer, reverse_code=delete_tree_id_records + ), ] diff --git a/contentcuration/contentcuration/migrations/0109_auto_20191202_1759.py b/contentcuration/contentcuration/migrations/0109_auto_20191202_1759.py index db4fd7fcb8..6d0941d063 100644 --- a/contentcuration/contentcuration/migrations/0109_auto_20191202_1759.py +++ b/contentcuration/contentcuration/migrations/0109_auto_20191202_1759.py @@ -7,13 +7,15 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0108_mptt_tree_id_migration'), + ("contentcuration", "0108_mptt_tree_id_migration"), ] operations = [ migrations.AlterField( - model_name='contentnode', - name='extra_fields', - field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict, null=True), + model_name="contentnode", + name="extra_fields", + field=django.contrib.postgres.fields.jsonb.JSONField( + blank=True, default=dict, null=True + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0110_auto_20200511_2245.py b/contentcuration/contentcuration/migrations/0110_auto_20200511_2245.py index 51a0c8581d..77f297a38d 100644 --- a/contentcuration/contentcuration/migrations/0110_auto_20200511_2245.py +++ b/contentcuration/contentcuration/migrations/0110_auto_20200511_2245.py @@ -7,18 +7,18 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0109_auto_20191202_1759'), + ("contentcuration", "0109_auto_20191202_1759"), ] operations = [ migrations.AddField( - model_name='channel', - name='demo_server_url', + model_name="channel", + name="demo_server_url", field=models.CharField(blank=True, max_length=200), ), migrations.AddField( - model_name='channel', - name='source_url', + model_name="channel", + name="source_url", field=models.CharField(blank=True, max_length=200), ), ] diff --git a/contentcuration/contentcuration/migrations/0111_auto_20200513_2252.py b/contentcuration/contentcuration/migrations/0111_auto_20200513_2252.py index c60d7a6268..022cfa3769 100644 --- a/contentcuration/contentcuration/migrations/0111_auto_20200513_2252.py +++ b/contentcuration/contentcuration/migrations/0111_auto_20200513_2252.py @@ -7,18 +7,18 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0110_auto_20200511_2245'), + ("contentcuration", "0110_auto_20200511_2245"), ] operations = [ migrations.AlterField( - model_name='channel', - name='demo_server_url', + model_name="channel", + name="demo_server_url", field=models.CharField(blank=True, max_length=200, null=True), ), migrations.AlterField( - model_name='channel', - name='source_url', + model_name="channel", + name="source_url", field=models.CharField(blank=True, max_length=200, null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0112_auto_20200613_0050.py b/contentcuration/contentcuration/migrations/0112_auto_20200613_0050.py index f236c416ed..8d11ae9c56 100644 --- a/contentcuration/contentcuration/migrations/0112_auto_20200613_0050.py +++ b/contentcuration/contentcuration/migrations/0112_auto_20200613_0050.py @@ -7,23 +7,88 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0111_auto_20200513_2252'), + ("contentcuration", "0111_auto_20200513_2252"), ] operations = [ migrations.AlterField( - model_name='contentkind', - name='kind', - field=models.CharField(choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', 'Exercise'), ('document', 'Document'), ('html5', 'HTML5 App'), ('slideshow', 'Slideshow'), ('h5p', 'H5P')], max_length=200, primary_key=True, serialize=False), + model_name="contentkind", + name="kind", + field=models.CharField( + choices=[ + ("topic", "Topic"), + ("video", "Video"), + ("audio", "Audio"), + ("exercise", "Exercise"), + ("document", "Document"), + ("html5", "HTML5 App"), + ("slideshow", "Slideshow"), + ("h5p", "H5P"), + ], + max_length=200, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0113_channel_tagline.py b/contentcuration/contentcuration/migrations/0113_channel_tagline.py index 4687dc774f..142185faaa 100644 --- a/contentcuration/contentcuration/migrations/0113_channel_tagline.py +++ b/contentcuration/contentcuration/migrations/0113_channel_tagline.py @@ -7,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0112_auto_20200613_0050'), + ("contentcuration", "0112_auto_20200613_0050"), ] operations = [ migrations.AddField( - model_name='channel', - name='tagline', + model_name="channel", + name="tagline", field=models.CharField(blank=True, max_length=150, null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0114_assessment_item_unique_keypair.py b/contentcuration/contentcuration/migrations/0114_assessment_item_unique_keypair.py index 1d50578bc8..5da7fd6209 100644 --- a/contentcuration/contentcuration/migrations/0114_assessment_item_unique_keypair.py +++ b/contentcuration/contentcuration/migrations/0114_assessment_item_unique_keypair.py @@ -4,46 +4,46 @@ import contentcuration.models -TABLE_NAME = 'contentcuration_assessmentitem' -INDEX_NAME = 'assessmentitem_unique_keypair' -CONSTRAINT_NAME = 'assessmentitem_unique_keypair_constraint' +TABLE_NAME = "contentcuration_assessmentitem" +INDEX_NAME = "assessmentitem_unique_keypair" +CONSTRAINT_NAME = "assessmentitem_unique_keypair_constraint" + class Migration(migrations.Migration): atomic = False dependencies = [ - ('contentcuration', '0113_channel_tagline'), + ("contentcuration", "0113_channel_tagline"), ] operations = [ migrations.SeparateDatabaseAndState( state_operations=[ migrations.AlterUniqueTogether( - name='assessmentitem', - unique_together=set([('contentnode', 'assessment_id')]), + name="assessmentitem", + unique_together=set([("contentnode", "assessment_id")]), ), ], database_operations=[ migrations.RunSQL( - sql='CREATE UNIQUE INDEX CONCURRENTLY {index_name} ON {table_name} USING btree (assessment_id, contentnode_id)'.format( + sql="CREATE UNIQUE INDEX CONCURRENTLY {index_name} ON {table_name} USING btree (assessment_id, contentnode_id)".format( index_name=INDEX_NAME, table_name=TABLE_NAME, ), - reverse_sql='DROP INDEX IF EXISTS {index_name}'.format( + reverse_sql="DROP INDEX IF EXISTS {index_name}".format( index_name=INDEX_NAME, ), ), migrations.RunSQL( - sql='ALTER TABLE {table_name} ADD CONSTRAINT {constraint_name} UNIQUE USING INDEX {index_name}'.format( + sql="ALTER TABLE {table_name} ADD CONSTRAINT {constraint_name} UNIQUE USING INDEX {index_name}".format( index_name=INDEX_NAME, table_name=TABLE_NAME, constraint_name=CONSTRAINT_NAME, ), - reverse_sql='ALTER TABLE {table_name} DROP CONSTRAINT {constraint_name}'.format( - table_name=TABLE_NAME, - constraint_name=CONSTRAINT_NAME + reverse_sql="ALTER TABLE {table_name} DROP CONSTRAINT {constraint_name}".format( + table_name=TABLE_NAME, constraint_name=CONSTRAINT_NAME ), - ) - ] + ), + ], ) ] diff --git a/contentcuration/contentcuration/migrations/0116_index_channel_contentnode_file.py b/contentcuration/contentcuration/migrations/0116_index_channel_contentnode_file.py index 32828c0416..ce40ad640c 100644 --- a/contentcuration/contentcuration/migrations/0116_index_channel_contentnode_file.py +++ b/contentcuration/contentcuration/migrations/0116_index_channel_contentnode_file.py @@ -9,7 +9,7 @@ class Migration(migrations.Migration): atomic = False dependencies = [ - ('contentcuration', '0115_index_contentnode_node_id_field'), + ("contentcuration", "0115_index_contentnode_node_id_field"), ] operations = [ @@ -33,7 +33,6 @@ class Migration(migrations.Migration): ), ], ), - migrations.SeparateDatabaseAndState( state_operations=[ migrations.AddIndex( @@ -56,12 +55,14 @@ class Migration(migrations.Migration): ), ], ), - migrations.SeparateDatabaseAndState( state_operations=[ migrations.AddIndex( model_name="file", - index=models.Index(fields=['checksum', 'file_size'], name="file_checksum_file_size_idx"), + index=models.Index( + fields=["checksum", "file_size"], + name="file_checksum_file_size_idx", + ), ), ], database_operations=[ @@ -77,5 +78,4 @@ class Migration(migrations.Migration): ), ], ), - ] diff --git a/contentcuration/contentcuration/migrations/0118_relaunch_migrations.py b/contentcuration/contentcuration/migrations/0118_relaunch_migrations.py index 1e4562f24a..fac6d5f580 100644 --- a/contentcuration/contentcuration/migrations/0118_relaunch_migrations.py +++ b/contentcuration/contentcuration/migrations/0118_relaunch_migrations.py @@ -10,78 +10,108 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0117_assessment_id_index'), + ("contentcuration", "0117_assessment_id_index"), ] operations = [ migrations.AddField( - model_name='contentnode', - name='complete', + model_name="contentnode", + name="complete", field=models.NullBooleanField(), ), migrations.AddField( - model_name='invitation', - name='accepted', + model_name="invitation", + name="accepted", field=models.BooleanField(default=False), ), migrations.AddField( - model_name='invitation', - name='declined', + model_name="invitation", + name="declined", field=models.BooleanField(default=False), ), migrations.AddField( - model_name='invitation', - name='revoked', + model_name="invitation", + name="revoked", field=models.BooleanField(default=False), ), migrations.AddField( - model_name='user', - name='disk_space_used', - field=models.FloatField(default=0, help_text='How many bytes a user has uploaded'), + model_name="user", + name="disk_space_used", + field=models.FloatField( + default=0, help_text="How many bytes a user has uploaded" + ), ), migrations.AlterField( - model_name='channel', - name='preferences', - field=models.TextField(default='{"license": null, "language": null, "author": null, "aggregator": null, "provider": null, "copyright_holder": null, "license_description": null, "mastery_model": "num_correct_in_a_row_5", "m_value": 5, "n_value": 5, "auto_derive_video_thumbnail": true, "auto_derive_audio_thumbnail": true, "auto_derive_document_thumbnail": true, "auto_derive_html5_thumbnail": true, "auto_derive_exercise_thumbnail": true, "auto_randomize_questions": true}'), + model_name="channel", + name="preferences", + field=models.TextField( + default='{"license": null, "language": null, "author": null, "aggregator": null, "provider": null, "copyright_holder": null, "license_description": null, "mastery_model": "num_correct_in_a_row_5", "m_value": 5, "n_value": 5, "auto_derive_video_thumbnail": true, "auto_derive_audio_thumbnail": true, "auto_derive_document_thumbnail": true, "auto_derive_html5_thumbnail": true, "auto_derive_exercise_thumbnail": true, "auto_randomize_questions": true}' + ), ), migrations.AlterField( - model_name='contentnode', - name='created', - field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='created'), + model_name="contentnode", + name="created", + field=models.DateTimeField( + default=django.utils.timezone.now, verbose_name="created" + ), ), migrations.AlterField( - model_name='contentnode', - name='title', + model_name="contentnode", + name="title", field=models.CharField(blank=True, max_length=200), ), migrations.AlterField( - model_name='contenttag', - name='channel', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tags', to='contentcuration.Channel'), + model_name="contenttag", + name="channel", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="tags", + to="contentcuration.Channel", + ), ), migrations.AlterField( - model_name='file', - name='uploaded_by', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='files', to=settings.AUTH_USER_MODEL), + model_name="file", + name="uploaded_by", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="files", + to=settings.AUTH_USER_MODEL, + ), ), migrations.AlterField( - model_name='invitation', - name='channel', - field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='pending_editors', to='contentcuration.Channel'), + model_name="invitation", + name="channel", + field=models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="pending_editors", + to="contentcuration.Channel", + ), ), migrations.AlterField( - model_name='invitation', - name='first_name', + model_name="invitation", + name="first_name", field=models.CharField(blank=True, max_length=100), ), migrations.AlterField( - model_name='invitation', - name='sender', - field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sent_by', to=settings.AUTH_USER_MODEL), + model_name="invitation", + name="sender", + field=models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="sent_by", + to=settings.AUTH_USER_MODEL, + ), ), migrations.AlterField( - model_name='user', - name='preferences', - field=models.TextField(default='{"license": null, "language": null, "author": null, "aggregator": null, "provider": null, "copyright_holder": null, "license_description": null, "mastery_model": "num_correct_in_a_row_5", "m_value": 5, "n_value": 5, "auto_derive_video_thumbnail": true, "auto_derive_audio_thumbnail": true, "auto_derive_document_thumbnail": true, "auto_derive_html5_thumbnail": true, "auto_derive_exercise_thumbnail": true, "auto_randomize_questions": true}'), + model_name="user", + name="preferences", + field=models.TextField( + default='{"license": null, "language": null, "author": null, "aggregator": null, "provider": null, "copyright_holder": null, "license_description": null, "mastery_model": "num_correct_in_a_row_5", "m_value": 5, "n_value": 5, "auto_derive_video_thumbnail": true, "auto_derive_audio_thumbnail": true, "auto_derive_document_thumbnail": true, "auto_derive_html5_thumbnail": true, "auto_derive_exercise_thumbnail": true, "auto_randomize_questions": true}' + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0119_task_channel_id.py b/contentcuration/contentcuration/migrations/0119_task_channel_id.py index a4d69fc81f..c20c8efa0a 100644 --- a/contentcuration/contentcuration/migrations/0119_task_channel_id.py +++ b/contentcuration/contentcuration/migrations/0119_task_channel_id.py @@ -7,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0118_relaunch_migrations'), + ("contentcuration", "0118_relaunch_migrations"), ] operations = [ migrations.AddField( - model_name='task', - name='channel_id', + model_name="task", + name="channel_id", field=models.UUIDField(blank=True, db_index=True, null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0120_auto_20210128_1646.py b/contentcuration/contentcuration/migrations/0120_auto_20210128_1646.py index 96544fa29e..59d48db118 100644 --- a/contentcuration/contentcuration/migrations/0120_auto_20210128_1646.py +++ b/contentcuration/contentcuration/migrations/0120_auto_20210128_1646.py @@ -10,12 +10,16 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0119_task_channel_id'), + ("contentcuration", "0119_task_channel_id"), ] operations = [ migrations.AddIndex( - model_name='user', - index=contentcuration.models.UniqueActiveUserIndex(django.db.models.functions.text.Lower('email'), condition=models.Q(('is_active', True)), name='contentcura_email_d4d492_idx'), + model_name="user", + index=contentcuration.models.UniqueActiveUserIndex( + django.db.models.functions.text.Lower("email"), + condition=models.Q(("is_active", True)), + name="contentcura_email_d4d492_idx", + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0121_auto_20210305_2028.py b/contentcuration/contentcuration/migrations/0121_auto_20210305_2028.py index 4628a43bd2..0e427a8281 100644 --- a/contentcuration/contentcuration/migrations/0121_auto_20210305_2028.py +++ b/contentcuration/contentcuration/migrations/0121_auto_20210305_2028.py @@ -7,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0120_auto_20210128_1646'), + ("contentcuration", "0120_auto_20210128_1646"), ] operations = [ migrations.AddField( - model_name='file', - name='modified', - field=models.DateTimeField(verbose_name='modified', null=True), + model_name="file", + name="modified", + field=models.DateTimeField(verbose_name="modified", null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0122_file_modified_index.py b/contentcuration/contentcuration/migrations/0122_file_modified_index.py index 45702de28e..e247977453 100644 --- a/contentcuration/contentcuration/migrations/0122_file_modified_index.py +++ b/contentcuration/contentcuration/migrations/0122_file_modified_index.py @@ -10,15 +10,17 @@ class Migration(migrations.Migration): atomic = False dependencies = [ - ('contentcuration', '0121_auto_20210305_2028'), + ("contentcuration", "0121_auto_20210305_2028"), ] operations = [ migrations.SeparateDatabaseAndState( state_operations=[ migrations.AddIndex( - model_name='file', - index=models.Index(fields=['-modified'], name=FILE_MODIFIED_DESC_INDEX_NAME), + model_name="file", + index=models.Index( + fields=["-modified"], name=FILE_MODIFIED_DESC_INDEX_NAME + ), ), ], database_operations=[ diff --git a/contentcuration/contentcuration/migrations/0123_auto_20210407_0057.py b/contentcuration/contentcuration/migrations/0123_auto_20210407_0057.py index e181f57ac0..896ddfb8e4 100644 --- a/contentcuration/contentcuration/migrations/0123_auto_20210407_0057.py +++ b/contentcuration/contentcuration/migrations/0123_auto_20210407_0057.py @@ -7,13 +7,15 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0122_file_modified_index'), + ("contentcuration", "0122_file_modified_index"), ] operations = [ migrations.AlterField( - model_name='file', - name='modified', - field=models.DateTimeField(auto_now=True, null=True, verbose_name='modified'), + model_name="file", + name="modified", + field=models.DateTimeField( + auto_now=True, null=True, verbose_name="modified" + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0124_user_feature_flags.py b/contentcuration/contentcuration/migrations/0124_user_feature_flags.py index 5f4e393c27..bf9eccaf32 100644 --- a/contentcuration/contentcuration/migrations/0124_user_feature_flags.py +++ b/contentcuration/contentcuration/migrations/0124_user_feature_flags.py @@ -7,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0123_auto_20210407_0057'), + ("contentcuration", "0123_auto_20210407_0057"), ] operations = [ migrations.AddField( - model_name='user', - name='feature_flags', + model_name="user", + name="feature_flags", field=django.contrib.postgres.fields.jsonb.JSONField(null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0125_user_feature_flags_default.py b/contentcuration/contentcuration/migrations/0125_user_feature_flags_default.py index 1f1e32c082..60bca1b599 100644 --- a/contentcuration/contentcuration/migrations/0125_user_feature_flags_default.py +++ b/contentcuration/contentcuration/migrations/0125_user_feature_flags_default.py @@ -7,13 +7,15 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0124_user_feature_flags'), + ("contentcuration", "0124_user_feature_flags"), ] operations = [ migrations.AlterField( - model_name='user', - name='feature_flags', - field=django.contrib.postgres.fields.jsonb.JSONField(default=dict, null=True), + model_name="user", + name="feature_flags", + field=django.contrib.postgres.fields.jsonb.JSONField( + default=dict, null=True + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0126_auto_20210219_2314.py b/contentcuration/contentcuration/migrations/0126_auto_20210219_2314.py index 2f66e54cff..9cdf38902a 100644 --- a/contentcuration/contentcuration/migrations/0126_auto_20210219_2314.py +++ b/contentcuration/contentcuration/migrations/0126_auto_20210219_2314.py @@ -7,13 +7,35 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0125_user_feature_flags_default'), + ("contentcuration", "0125_user_feature_flags_default"), ] operations = [ migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('webm', 'WEBM Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("webm", "WEBM Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0127_auto_20210504_1744.py b/contentcuration/contentcuration/migrations/0127_auto_20210504_1744.py index 9c426ac588..409d1132fd 100644 --- a/contentcuration/contentcuration/migrations/0127_auto_20210504_1744.py +++ b/contentcuration/contentcuration/migrations/0127_auto_20210504_1744.py @@ -7,13 +7,44 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0126_auto_20210219_2314'), + ("contentcuration", "0126_auto_20210219_2314"), ] operations = [ migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('audio_dependency', 'audio (dependency)'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("audio_dependency", "audio (dependency)"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0128_auto_20210511_1605.py b/contentcuration/contentcuration/migrations/0128_auto_20210511_1605.py index 7bfa3e3d67..ca3456bf99 100644 --- a/contentcuration/contentcuration/migrations/0128_auto_20210511_1605.py +++ b/contentcuration/contentcuration/migrations/0128_auto_20210511_1605.py @@ -7,13 +7,46 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0127_auto_20210504_1744'), + ("contentcuration", "0127_auto_20210504_1744"), ] operations = [ migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('audio_dependency', 'audio (dependency)'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('qti', 'QTI Zip'), ('qti_thumbnail', 'QTI Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("audio_dependency", "audio (dependency)"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("qti", "QTI Zip"), + ("qti_thumbnail", "QTI Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0129_auto_20210519_2213.py b/contentcuration/contentcuration/migrations/0129_auto_20210519_2213.py index e46b0d2cac..c84c9267d7 100644 --- a/contentcuration/contentcuration/migrations/0129_auto_20210519_2213.py +++ b/contentcuration/contentcuration/migrations/0129_auto_20210519_2213.py @@ -1,159 +1,247 @@ # Generated by Django 3.2.3 on 2021-05-19 22:13 - -from django.db import migrations, models import django.db.models.deletion +from django.db import migrations +from django.db import models class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0128_auto_20210511_1605'), + ("contentcuration", "0128_auto_20210511_1605"), ] operations = [ migrations.AlterField( - model_name='channel', - name='chef_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_chef', to='contentcuration.contentnode'), - ), - migrations.AlterField( - model_name='channel', - name='clipboard_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_clipboard', to='contentcuration.contentnode'), - ), - migrations.AlterField( - model_name='channel', - name='content_defaults', + model_name="channel", + name="chef_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="channel_chef", + to="contentcuration.contentnode", + ), + ), + migrations.AlterField( + model_name="channel", + name="clipboard_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="channel_clipboard", + to="contentcuration.contentnode", + ), + ), + migrations.AlterField( + model_name="channel", + name="content_defaults", field=models.JSONField(default=dict), ), migrations.AlterField( - model_name='channel', - name='language', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_language', to='contentcuration.language'), - ), - migrations.AlterField( - model_name='channel', - name='main_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_main', to='contentcuration.contentnode'), - ), - migrations.AlterField( - model_name='channel', - name='previous_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_previous', to='contentcuration.contentnode'), - ), - migrations.AlterField( - model_name='channel', - name='published_data', + model_name="channel", + name="language", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="channel_language", + to="contentcuration.language", + ), + ), + migrations.AlterField( + model_name="channel", + name="main_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="channel_main", + to="contentcuration.contentnode", + ), + ), + migrations.AlterField( + model_name="channel", + name="previous_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="channel_previous", + to="contentcuration.contentnode", + ), + ), + migrations.AlterField( + model_name="channel", + name="published_data", field=models.JSONField(default=dict), ), migrations.AlterField( - model_name='channel', - name='staging_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_staging', to='contentcuration.contentnode'), + model_name="channel", + name="staging_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="channel_staging", + to="contentcuration.contentnode", + ), ), migrations.AlterField( - model_name='channel', - name='thumbnail_encoding', + model_name="channel", + name="thumbnail_encoding", field=models.JSONField(default=dict), ), migrations.AlterField( - model_name='channel', - name='trash_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_trash', to='contentcuration.contentnode'), + model_name="channel", + name="trash_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="channel_trash", + to="contentcuration.contentnode", + ), ), migrations.AlterField( - model_name='contentnode', - name='complete', + model_name="contentnode", + name="complete", field=models.BooleanField(null=True), ), migrations.AlterField( - model_name='contentnode', - name='extra_fields', + model_name="contentnode", + name="extra_fields", field=models.JSONField(blank=True, default=dict, null=True), ), migrations.AlterField( - model_name='contentnode', - name='kind', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='contentnodes', to='contentcuration.contentkind'), + model_name="contentnode", + name="kind", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="contentnodes", + to="contentcuration.contentkind", + ), ), migrations.AlterField( - model_name='contentnode', - name='language', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='content_language', to='contentcuration.language'), + model_name="contentnode", + name="language", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="content_language", + to="contentcuration.language", + ), ), migrations.AlterField( - model_name='contentnode', - name='level', + model_name="contentnode", + name="level", field=models.PositiveIntegerField(editable=False), ), migrations.AlterField( - model_name='contentnode', - name='lft', + model_name="contentnode", + name="lft", field=models.PositiveIntegerField(editable=False), ), migrations.AlterField( - model_name='contentnode', - name='license', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='contentcuration.license'), + model_name="contentnode", + name="license", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + to="contentcuration.license", + ), ), migrations.AlterField( - model_name='contentnode', - name='rght', + model_name="contentnode", + name="rght", field=models.PositiveIntegerField(editable=False), ), migrations.AlterField( - model_name='file', - name='file_format', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='files', to='contentcuration.fileformat'), - ), - migrations.AlterField( - model_name='file', - name='language', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='files', to='contentcuration.language'), - ), - migrations.AlterField( - model_name='file', - name='preset', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='files', to='contentcuration.formatpreset'), - ), - migrations.AlterField( - model_name='formatpreset', - name='kind', - field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='format_presets', to='contentcuration.contentkind'), - ), - migrations.AlterField( - model_name='slideshowslide', - name='metadata', + model_name="file", + name="file_format", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="files", + to="contentcuration.fileformat", + ), + ), + migrations.AlterField( + model_name="file", + name="language", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="files", + to="contentcuration.language", + ), + ), + migrations.AlterField( + model_name="file", + name="preset", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="files", + to="contentcuration.formatpreset", + ), + ), + migrations.AlterField( + model_name="formatpreset", + name="kind", + field=models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="format_presets", + to="contentcuration.contentkind", + ), + ), + migrations.AlterField( + model_name="slideshowslide", + name="metadata", field=models.JSONField(default=dict), ), migrations.AlterField( - model_name='task', - name='metadata', + model_name="task", + name="metadata", field=models.JSONField(), ), migrations.AlterField( - model_name='user', - name='clipboard_tree', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='user_clipboard', to='contentcuration.contentnode'), + model_name="user", + name="clipboard_tree", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="user_clipboard", + to="contentcuration.contentnode", + ), ), migrations.AlterField( - model_name='user', - name='content_defaults', + model_name="user", + name="content_defaults", field=models.JSONField(default=dict), ), migrations.AlterField( - model_name='user', - name='feature_flags', + model_name="user", + name="feature_flags", field=models.JSONField(default=dict, null=True), ), migrations.AlterField( - model_name='user', - name='information', + model_name="user", + name="information", field=models.JSONField(null=True), ), migrations.AlterField( - model_name='user', - name='policies', + model_name="user", + name="policies", field=models.JSONField(default=dict, null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0130_auto_20210706_2005.py b/contentcuration/contentcuration/migrations/0130_auto_20210706_2005.py index ac3a7a19d3..2a7b4e076f 100644 --- a/contentcuration/contentcuration/migrations/0130_auto_20210706_2005.py +++ b/contentcuration/contentcuration/migrations/0130_auto_20210706_2005.py @@ -6,23 +6,96 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0129_auto_20210519_2213'), + ("contentcuration", "0129_auto_20210519_2213"), ] operations = [ migrations.AlterField( - model_name='contentkind', - name='kind', - field=models.CharField(choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', 'Exercise'), ('document', 'Document'), ('html5', 'HTML5 App'), ('slideshow', 'Slideshow'), ('h5p', 'H5P'), ('zim', 'Zim')], max_length=200, primary_key=True, serialize=False), + model_name="contentkind", + name="kind", + field=models.CharField( + choices=[ + ("topic", "Topic"), + ("video", "Video"), + ("audio", "Audio"), + ("exercise", "Exercise"), + ("document", "Document"), + ("html5", "HTML5 App"), + ("slideshow", "Slideshow"), + ("h5p", "H5P"), + ("zim", "Zim"), + ], + max_length=200, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('webm', 'WEBM Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('zim', 'ZIM'), ('epub', 'ePub Document')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("webm", "WEBM Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("zim", "ZIM"), + ("epub", "ePub Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('audio_dependency', 'audio (dependency)'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('zim', 'Zim'), ('zim_thumbnail', 'Zim Thumbnail'), ('qti', 'QTI Zip'), ('qti_thumbnail', 'QTI Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("audio_dependency", "audio (dependency)"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("zim", "Zim"), + ("zim_thumbnail", "Zim Thumbnail"), + ("qti", "QTI Zip"), + ("qti_thumbnail", "QTI Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0131_auto_20210707_2326.py b/contentcuration/contentcuration/migrations/0131_auto_20210707_2326.py index b27a9f14f0..27346f7b6d 100644 --- a/contentcuration/contentcuration/migrations/0131_auto_20210707_2326.py +++ b/contentcuration/contentcuration/migrations/0131_auto_20210707_2326.py @@ -6,11 +6,11 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0130_auto_20210706_2005'), + ("contentcuration", "0130_auto_20210706_2005"), ] operations = [ migrations.DeleteModel( - name='ChannelResourceSize', + name="ChannelResourceSize", ), ] diff --git a/contentcuration/contentcuration/migrations/0132_auto_20210708_0011.py b/contentcuration/contentcuration/migrations/0132_auto_20210708_0011.py index 16c715d29e..7d8bbcfd1c 100644 --- a/contentcuration/contentcuration/migrations/0132_auto_20210708_0011.py +++ b/contentcuration/contentcuration/migrations/0132_auto_20210708_0011.py @@ -6,11 +6,11 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0131_auto_20210707_2326'), + ("contentcuration", "0131_auto_20210707_2326"), ] operations = [ migrations.DeleteModel( - name='Exercise', + name="Exercise", ), ] diff --git a/contentcuration/contentcuration/migrations/0133_auto_20220124_2149.py b/contentcuration/contentcuration/migrations/0133_auto_20220124_2149.py index ef0fec8d19..52c9f0f115 100644 --- a/contentcuration/contentcuration/migrations/0133_auto_20220124_2149.py +++ b/contentcuration/contentcuration/migrations/0133_auto_20220124_2149.py @@ -6,17 +6,27 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0132_auto_20210708_0011'), + ("contentcuration", "0132_auto_20210708_0011"), ] operations = [ migrations.AddField( - model_name='file', - name='duration', + model_name="file", + name="duration", field=models.IntegerField(blank=True, null=True), ), migrations.AddConstraint( - model_name='file', - constraint=models.CheckConstraint(check=models.Q(models.Q(('duration__gt', 0), ('preset__in', ['audio', 'high_res_video', 'low_res_video'])), ('duration__isnull', True), _connector='OR'), name='file_media_duration_int'), + model_name="file", + constraint=models.CheckConstraint( + check=models.Q( + models.Q( + ("duration__gt", 0), + ("preset__in", ["audio", "high_res_video", "low_res_video"]), + ), + ("duration__isnull", True), + _connector="OR", + ), + name="file_media_duration_int", + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0135_add_metadata_labels.py b/contentcuration/contentcuration/migrations/0135_add_metadata_labels.py index f1332bc008..afcf247b53 100644 --- a/contentcuration/contentcuration/migrations/0135_add_metadata_labels.py +++ b/contentcuration/contentcuration/migrations/0135_add_metadata_labels.py @@ -6,38 +6,38 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0134_alter_contentkind_kind'), + ("contentcuration", "0134_alter_contentkind_kind"), ] operations = [ migrations.AddField( - model_name='contentnode', - name='accessibility_labels', + model_name="contentnode", + name="accessibility_labels", field=models.JSONField(blank=True, null=True), ), migrations.AddField( - model_name='contentnode', - name='categories', + model_name="contentnode", + name="categories", field=models.JSONField(blank=True, null=True), ), migrations.AddField( - model_name='contentnode', - name='grade_levels', + model_name="contentnode", + name="grade_levels", field=models.JSONField(blank=True, null=True), ), migrations.AddField( - model_name='contentnode', - name='learner_needs', + model_name="contentnode", + name="learner_needs", field=models.JSONField(blank=True, null=True), ), migrations.AddField( - model_name='contentnode', - name='learning_activities', + model_name="contentnode", + name="learning_activities", field=models.JSONField(blank=True, null=True), ), migrations.AddField( - model_name='contentnode', - name='resource_types', + model_name="contentnode", + name="resource_types", field=models.JSONField(blank=True, null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0136_contentnode_suggested_duration.py b/contentcuration/contentcuration/migrations/0136_contentnode_suggested_duration.py index aff7b5641b..4411bb60ff 100644 --- a/contentcuration/contentcuration/migrations/0136_contentnode_suggested_duration.py +++ b/contentcuration/contentcuration/migrations/0136_contentnode_suggested_duration.py @@ -6,13 +6,17 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0135_add_metadata_labels'), + ("contentcuration", "0135_add_metadata_labels"), ] operations = [ migrations.AddField( - model_name='contentnode', - name='suggested_duration', - field=models.IntegerField(blank=True, help_text='Suggested duration for the content node (in seconds)', null=True), + model_name="contentnode", + name="suggested_duration", + field=models.IntegerField( + blank=True, + help_text="Suggested duration for the content node (in seconds)", + null=True, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0137_channelhistory.py b/contentcuration/contentcuration/migrations/0137_channelhistory.py index e5f692c054..0edaff77bf 100644 --- a/contentcuration/contentcuration/migrations/0137_channelhistory.py +++ b/contentcuration/contentcuration/migrations/0137_channelhistory.py @@ -9,26 +9,61 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0136_contentnode_suggested_duration'), + ("contentcuration", "0136_contentnode_suggested_duration"), ] operations = [ migrations.CreateModel( - name='ChannelHistory', + name="ChannelHistory", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('performed', models.DateTimeField(default=django.utils.timezone.now)), - ('action', models.CharField(choices=[('creation', 'Creation'), ('publication', 'Publication'), ('deletion', 'Deletion'), ('recovery', 'Deletion recovery')], max_length=50)), - ('actor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='channel_history', to=settings.AUTH_USER_MODEL)), - ('channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='history', to='contentcuration.channel')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("performed", models.DateTimeField(default=django.utils.timezone.now)), + ( + "action", + models.CharField( + choices=[ + ("creation", "Creation"), + ("publication", "Publication"), + ("deletion", "Deletion"), + ("recovery", "Deletion recovery"), + ], + max_length=50, + ), + ), + ( + "actor", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_history", + to=settings.AUTH_USER_MODEL, + ), + ), + ( + "channel", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="history", + to="contentcuration.channel", + ), + ), ], options={ - 'verbose_name': 'Channel history', - 'verbose_name_plural': 'Channel histories', + "verbose_name": "Channel history", + "verbose_name_plural": "Channel histories", }, ), migrations.AddIndex( - model_name='channelhistory', - index=models.Index(fields=['channel_id'], name='idx_channel_history_channel_id'), + model_name="channelhistory", + index=models.Index( + fields=["channel_id"], name="idx_channel_history_channel_id" + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0138_change.py b/contentcuration/contentcuration/migrations/0138_change.py index f18ee52320..504596a5f1 100644 --- a/contentcuration/contentcuration/migrations/0138_change.py +++ b/contentcuration/contentcuration/migrations/0138_change.py @@ -9,25 +9,62 @@ class Migration(migrations.Migration): dependencies = [ - ('sessions', '0001_initial'), - ('contentcuration', '0137_channelhistory'), + ("sessions", "0001_initial"), + ("contentcuration", "0137_channelhistory"), ] operations = [ migrations.CreateModel( - name='Change', + name="Change", fields=[ - ('server_rev', models.BigAutoField(primary_key=True, serialize=False)), - ('client_rev', models.IntegerField(blank=True, null=True)), - ('table', models.CharField(max_length=32)), - ('change_type', models.IntegerField()), - ('kwargs', models.JSONField(encoder=rest_framework.utils.encoders.JSONEncoder)), - ('applied', models.BooleanField(default=False)), - ('errored', models.BooleanField(default=False)), - ('channel', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='contentcuration.channel')), - ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='changes_by_user', to=settings.AUTH_USER_MODEL)), - ('session', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sessions.session')), - ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='changes_about_user', to=settings.AUTH_USER_MODEL)), + ("server_rev", models.BigAutoField(primary_key=True, serialize=False)), + ("client_rev", models.IntegerField(blank=True, null=True)), + ("table", models.CharField(max_length=32)), + ("change_type", models.IntegerField()), + ( + "kwargs", + models.JSONField(encoder=rest_framework.utils.encoders.JSONEncoder), + ), + ("applied", models.BooleanField(default=False)), + ("errored", models.BooleanField(default=False)), + ( + "channel", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="contentcuration.channel", + ), + ), + ( + "created_by", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + related_name="changes_by_user", + to=settings.AUTH_USER_MODEL, + ), + ), + ( + "session", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + to="sessions.session", + ), + ), + ( + "user", + models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="changes_about_user", + to=settings.AUTH_USER_MODEL, + ), + ), ], ), ] diff --git a/contentcuration/contentcuration/migrations/0139_django_celery_results.py b/contentcuration/contentcuration/migrations/0139_django_celery_results.py index f6b37e2c28..1a8f65211c 100644 --- a/contentcuration/contentcuration/migrations/0139_django_celery_results.py +++ b/contentcuration/contentcuration/migrations/0139_django_celery_results.py @@ -8,31 +8,45 @@ class Migration(migrations.Migration): - replaces = [('django_celery_results', '0138_change'),] + replaces = [ + ("django_celery_results", "0138_change"), + ] def __init__(self, name, app_label): - super(Migration, self).__init__(name, 'django_celery_results') + super(Migration, self).__init__(name, "django_celery_results") dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), - ('contentcuration', '0138_change'), - ('django_celery_results', '0001_initial'), + ("contentcuration", "0138_change"), + ("django_celery_results", "0001_initial"), ] operations = [ migrations.AddField( - model_name='taskresult', - name='channel_id', + model_name="taskresult", + name="channel_id", field=models.UUIDField(blank=True, db_index=True, null=True), ), migrations.AddField( - model_name='taskresult', - name='progress', - field=models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]), + model_name="taskresult", + name="progress", + field=models.IntegerField( + blank=True, + null=True, + validators=[ + django.core.validators.MinValueValidator(0), + django.core.validators.MaxValueValidator(100), + ], + ), ), migrations.AddField( - model_name='taskresult', - name='user', - field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to=settings.AUTH_USER_MODEL), + model_name="taskresult", + name="user", + field=models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="tasks", + to=settings.AUTH_USER_MODEL, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0140_delete_task.py b/contentcuration/contentcuration/migrations/0140_delete_task.py index ec2108fdf2..f654a2fb5b 100644 --- a/contentcuration/contentcuration/migrations/0140_delete_task.py +++ b/contentcuration/contentcuration/migrations/0140_delete_task.py @@ -5,11 +5,11 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0139_django_celery_results'), + ("contentcuration", "0139_django_celery_results"), ] operations = [ migrations.DeleteModel( - name='Task', + name="Task", ), ] diff --git a/contentcuration/contentcuration/migrations/0141_add_task_signature.py b/contentcuration/contentcuration/migrations/0141_add_task_signature.py index 4e182e8fa1..3f113f438d 100644 --- a/contentcuration/contentcuration/migrations/0141_add_task_signature.py +++ b/contentcuration/contentcuration/migrations/0141_add_task_signature.py @@ -5,24 +5,37 @@ class Migration(migrations.Migration): - replaces = [('django_celery_results', '0140_delete_task'),] + replaces = [ + ("django_celery_results", "0140_delete_task"), + ] def __init__(self, name, app_label): - super(Migration, self).__init__(name, 'django_celery_results') + super(Migration, self).__init__(name, "django_celery_results") dependencies = [ - ('contentcuration', '0140_delete_task'), - ('django_celery_results', '0011_taskresult_periodic_task_name'), + ("contentcuration", "0140_delete_task"), + ("django_celery_results", "0011_taskresult_periodic_task_name"), ] operations = [ migrations.AddField( - model_name='taskresult', - name='signature', + model_name="taskresult", + name="signature", field=models.CharField(max_length=32, null=True), ), migrations.AddIndex( - model_name='taskresult', - index=models.Index(condition=models.Q(('status__in', frozenset(['STARTED', 'REJECTED', 'RETRY', 'RECEIVED', 'PENDING']))), fields=['signature'], name='task_result_signature_idx'), + model_name="taskresult", + index=models.Index( + condition=models.Q( + ( + "status__in", + frozenset( + ["STARTED", "REJECTED", "RETRY", "RECEIVED", "PENDING"] + ), + ) + ), + fields=["signature"], + name="task_result_signature_idx", + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0142_remove_file_file_media_duration_int.py b/contentcuration/contentcuration/migrations/0142_remove_file_file_media_duration_int.py index e497fbd398..71e2b4f2bc 100644 --- a/contentcuration/contentcuration/migrations/0142_remove_file_file_media_duration_int.py +++ b/contentcuration/contentcuration/migrations/0142_remove_file_file_media_duration_int.py @@ -5,12 +5,12 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0141_add_task_signature'), + ("contentcuration", "0141_add_task_signature"), ] operations = [ migrations.RemoveConstraint( - model_name='file', - name='file_media_duration_int', + model_name="file", + name="file_media_duration_int", ), ] diff --git a/contentcuration/contentcuration/migrations/0143_file_file_media_duration_int.py b/contentcuration/contentcuration/migrations/0143_file_file_media_duration_int.py index 3a7dbae1a0..c67a5f068c 100644 --- a/contentcuration/contentcuration/migrations/0143_file_file_media_duration_int.py +++ b/contentcuration/contentcuration/migrations/0143_file_file_media_duration_int.py @@ -6,12 +6,31 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0142_remove_file_file_media_duration_int'), + ("contentcuration", "0142_remove_file_file_media_duration_int"), ] operations = [ migrations.AddConstraint( - model_name='file', - constraint=models.CheckConstraint(check=models.Q(models.Q(('duration__gt', 0), ('preset__in', ['audio', 'audio_dependency', 'high_res_video', 'low_res_video', 'video_dependency'])), ('duration__isnull', True), _connector='OR'), name='file_media_duration_int'), + model_name="file", + constraint=models.CheckConstraint( + check=models.Q( + models.Q( + ("duration__gt", 0), + ( + "preset__in", + [ + "audio", + "audio_dependency", + "high_res_video", + "low_res_video", + "video_dependency", + ], + ), + ), + ("duration__isnull", True), + _connector="OR", + ), + name="file_media_duration_int", + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0144_soft_delete_user.py b/contentcuration/contentcuration/migrations/0144_soft_delete_user.py index a04040df69..d2a778ed34 100644 --- a/contentcuration/contentcuration/migrations/0144_soft_delete_user.py +++ b/contentcuration/contentcuration/migrations/0144_soft_delete_user.py @@ -9,23 +9,53 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0143_file_file_media_duration_int'), + ("contentcuration", "0143_file_file_media_duration_int"), ] operations = [ migrations.AddField( - model_name='user', - name='deleted', + model_name="user", + name="deleted", field=models.BooleanField(db_index=True, default=False), ), migrations.CreateModel( - name='UserHistory', + name="UserHistory", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('action', models.CharField(choices=[('soft-deletion', 'User soft deletion'), ('soft-recovery', - 'User soft deletion recovery'), ('related-data-hard-deletion', 'User related data hard deletion')], max_length=32)), - ('performed_at', models.DateTimeField(default=django.utils.timezone.now)), - ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='history', to=settings.AUTH_USER_MODEL)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "action", + models.CharField( + choices=[ + ("soft-deletion", "User soft deletion"), + ("soft-recovery", "User soft deletion recovery"), + ( + "related-data-hard-deletion", + "User related data hard deletion", + ), + ], + max_length=32, + ), + ), + ( + "performed_at", + models.DateTimeField(default=django.utils.timezone.now), + ), + ( + "user", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="history", + to=settings.AUTH_USER_MODEL, + ), + ), ], ), ] diff --git a/contentcuration/contentcuration/migrations/0145_custom_task_metadata.py b/contentcuration/contentcuration/migrations/0145_custom_task_metadata.py index 64287039f0..6e4743a878 100644 --- a/contentcuration/contentcuration/migrations/0145_custom_task_metadata.py +++ b/contentcuration/contentcuration/migrations/0145_custom_task_metadata.py @@ -6,9 +6,10 @@ from django.db import migrations from django.db import models + def transfer_data(apps, schema_editor): - CustomTaskMetadata = apps.get_model('contentcuration', 'CustomTaskMetadata') - TaskResult = apps.get_model('django_celery_results', 'taskresult') + CustomTaskMetadata = apps.get_model("contentcuration", "CustomTaskMetadata") + TaskResult = apps.get_model("django_celery_results", "taskresult") old_task_results = TaskResult.objects.filter(status__in=states.UNREADY_STATES) @@ -21,28 +22,62 @@ def transfer_data(apps, schema_editor): signature=old_task_result.signature, ) + class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0144_soft_delete_user'), + ("contentcuration", "0144_soft_delete_user"), ] operations = [ migrations.CreateModel( - name='CustomTaskMetadata', + name="CustomTaskMetadata", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('task_id', models.CharField(max_length=255, unique=True)), - ('channel_id', models.UUIDField(blank=True, db_index=True, null=True)), - ('progress', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])), - ('signature', models.CharField(max_length=32, null=True)), - ('date_created', models.DateTimeField(auto_now_add=True, help_text='Datetime field when the custom_metadata for task was created in UTC', verbose_name='Created DateTime')), - ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to=settings.AUTH_USER_MODEL)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("task_id", models.CharField(max_length=255, unique=True)), + ("channel_id", models.UUIDField(blank=True, db_index=True, null=True)), + ( + "progress", + models.IntegerField( + blank=True, + null=True, + validators=[ + django.core.validators.MinValueValidator(0), + django.core.validators.MaxValueValidator(100), + ], + ), + ), + ("signature", models.CharField(max_length=32, null=True)), + ( + "date_created", + models.DateTimeField( + auto_now_add=True, + help_text="Datetime field when the custom_metadata for task was created in UTC", + verbose_name="Created DateTime", + ), + ), + ( + "user", + models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="tasks", + to=settings.AUTH_USER_MODEL, + ), + ), ], ), migrations.AddIndex( - model_name='customtaskmetadata', - index=models.Index(fields=['signature'], name='task_result_signature'), + model_name="customtaskmetadata", + index=models.Index(fields=["signature"], name="task_result_signature"), ), migrations.RunPython(transfer_data), ] diff --git a/contentcuration/contentcuration/migrations/0146_drop_taskresult_fields.py b/contentcuration/contentcuration/migrations/0146_drop_taskresult_fields.py index 5ecc6cb98f..0d288db47f 100644 --- a/contentcuration/contentcuration/migrations/0146_drop_taskresult_fields.py +++ b/contentcuration/contentcuration/migrations/0146_drop_taskresult_fields.py @@ -1,37 +1,40 @@ # Generated by Django 3.2.19 on 2023-09-14 10:42 from django.db import migrations + class Migration(migrations.Migration): - replaces = [('django_celery_results', '0145_custom_task_metadata'),] + replaces = [ + ("django_celery_results", "0145_custom_task_metadata"), + ] def __init__(self, name, app_label): - super(Migration, self).__init__(name, 'django_celery_results') + super(Migration, self).__init__(name, "django_celery_results") dependencies = [ - ('contentcuration', '0145_custom_task_metadata'), - ('contentcuration', '0141_add_task_signature'), + ("contentcuration", "0145_custom_task_metadata"), + ("contentcuration", "0141_add_task_signature"), ] operations = [ migrations.RemoveField( - model_name='taskresult', - name='channel_id', + model_name="taskresult", + name="channel_id", ), migrations.RemoveField( - model_name='taskresult', - name='progress', + model_name="taskresult", + name="progress", ), migrations.RemoveField( - model_name='taskresult', - name='user', + model_name="taskresult", + name="user", ), migrations.RemoveField( - model_name='taskresult', - name='signature', + model_name="taskresult", + name="signature", ), migrations.RemoveIndex( - model_name='taskresult', - name='task_result_signature_idx', + model_name="taskresult", + name="task_result_signature_idx", ), ] diff --git a/contentcuration/contentcuration/migrations/0147_alter_formatpreset_id.py b/contentcuration/contentcuration/migrations/0147_alter_formatpreset_id.py index ac3faa8904..8db529797f 100644 --- a/contentcuration/contentcuration/migrations/0147_alter_formatpreset_id.py +++ b/contentcuration/contentcuration/migrations/0147_alter_formatpreset_id.py @@ -6,13 +6,49 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0146_drop_taskresult_fields'), + ("contentcuration", "0146_drop_taskresult_fields"), ] operations = [ migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('audio_dependency', 'audio (dependency)'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('zim', 'Zim'), ('zim_thumbnail', 'Zim Thumbnail'), ('qti', 'QTI Zip'), ('qti_thumbnail', 'QTI Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest'), ('imscp_zip', 'IMSCP Zip')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("audio_dependency", "audio (dependency)"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("zim", "Zim"), + ("zim_thumbnail", "Zim Thumbnail"), + ("qti", "QTI Zip"), + ("qti_thumbnail", "QTI Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ("imscp_zip", "IMSCP Zip"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0148_flagfeedbackevent_recommendationsevent_recommendationsinteractionevent.py b/contentcuration/contentcuration/migrations/0148_flagfeedbackevent_recommendationsevent_recommendationsinteractionevent.py index ea3b80c86d..8a4ccaaa32 100644 --- a/contentcuration/contentcuration/migrations/0148_flagfeedbackevent_recommendationsevent_recommendationsinteractionevent.py +++ b/contentcuration/contentcuration/migrations/0148_flagfeedbackevent_recommendationsevent_recommendationsinteractionevent.py @@ -10,58 +10,120 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0147_alter_formatpreset_id'), + ("contentcuration", "0147_alter_formatpreset_id"), ] operations = [ migrations.CreateModel( - name='RecommendationsInteractionEvent', + name="RecommendationsInteractionEvent", fields=[ - ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), - ('context', models.JSONField()), - ('created_at', models.DateTimeField(auto_now_add=True)), - ('contentnode_id', models.UUIDField()), - ('content_id', models.UUIDField()), - ('feedback_type', models.CharField(choices=[('IMPORTED', 'Imported'), ('REJECTED', 'Rejected'), ('PREVIEWED', 'Previewed'), ('SHOWMORE', 'Show More'), ('IGNORED', 'Ignored'), ('FLAGGED', 'Flagged')], max_length=50)), - ('feedback_reason', models.TextField(max_length=1500)), - ('recommendation_event_id', models.UUIDField()), + ( + "id", + models.UUIDField( + default=uuid.uuid4, + editable=False, + primary_key=True, + serialize=False, + ), + ), + ("context", models.JSONField()), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("contentnode_id", models.UUIDField()), + ("content_id", models.UUIDField()), + ( + "feedback_type", + models.CharField( + choices=[ + ("IMPORTED", "Imported"), + ("REJECTED", "Rejected"), + ("PREVIEWED", "Previewed"), + ("SHOWMORE", "Show More"), + ("IGNORED", "Ignored"), + ("FLAGGED", "Flagged"), + ], + max_length=50, + ), + ), + ("feedback_reason", models.TextField(max_length=1500)), + ("recommendation_event_id", models.UUIDField()), ], options={ - 'abstract': False, + "abstract": False, }, ), migrations.CreateModel( - name='RecommendationsEvent', + name="RecommendationsEvent", fields=[ - ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), - ('context', models.JSONField()), - ('created_at', models.DateTimeField(auto_now_add=True)), - ('contentnode_id', models.UUIDField()), - ('content_id', models.UUIDField()), - ('target_channel_id', models.UUIDField()), - ('time_hidden', models.DateTimeField()), - ('content', models.JSONField(default=list)), - ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), + ( + "id", + models.UUIDField( + default=uuid.uuid4, + editable=False, + primary_key=True, + serialize=False, + ), + ), + ("context", models.JSONField()), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("contentnode_id", models.UUIDField()), + ("content_id", models.UUIDField()), + ("target_channel_id", models.UUIDField()), + ("time_hidden", models.DateTimeField()), + ("content", models.JSONField(default=list)), + ( + "user", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + to=settings.AUTH_USER_MODEL, + ), + ), ], options={ - 'abstract': False, + "abstract": False, }, ), migrations.CreateModel( - name='FlagFeedbackEvent', + name="FlagFeedbackEvent", fields=[ - ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), - ('context', models.JSONField()), - ('created_at', models.DateTimeField(auto_now_add=True)), - ('contentnode_id', models.UUIDField()), - ('content_id', models.UUIDField()), - ('target_channel_id', models.UUIDField()), - ('feedback_type', models.CharField(choices=[('IMPORTED', 'Imported'), ('REJECTED', 'Rejected'), ('PREVIEWED', 'Previewed'), ('SHOWMORE', 'Show More'), ('IGNORED', 'Ignored'), ('FLAGGED', 'Flagged')], max_length=50)), - ('feedback_reason', models.TextField(max_length=1500)), - ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), + ( + "id", + models.UUIDField( + default=uuid.uuid4, + editable=False, + primary_key=True, + serialize=False, + ), + ), + ("context", models.JSONField()), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("contentnode_id", models.UUIDField()), + ("content_id", models.UUIDField()), + ("target_channel_id", models.UUIDField()), + ( + "feedback_type", + models.CharField( + choices=[ + ("IMPORTED", "Imported"), + ("REJECTED", "Rejected"), + ("PREVIEWED", "Previewed"), + ("SHOWMORE", "Show More"), + ("IGNORED", "Ignored"), + ("FLAGGED", "Flagged"), + ], + max_length=50, + ), + ), + ("feedback_reason", models.TextField(max_length=1500)), + ( + "user", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + to=settings.AUTH_USER_MODEL, + ), + ), ], options={ - 'abstract': False, + "abstract": False, }, ), ] diff --git a/contentcuration/contentcuration/migrations/0149_unpublishable_change_field.py b/contentcuration/contentcuration/migrations/0149_unpublishable_change_field.py index a1ebff4d29..d1a7d9086b 100644 --- a/contentcuration/contentcuration/migrations/0149_unpublishable_change_field.py +++ b/contentcuration/contentcuration/migrations/0149_unpublishable_change_field.py @@ -6,20 +6,23 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0148_flagfeedbackevent_recommendationsevent_recommendationsinteractionevent'), + ( + "contentcuration", + "0148_flagfeedbackevent_recommendationsevent_recommendationsinteractionevent", + ), ] operations = [ migrations.AddField( - model_name='change', - name='unpublishable', + model_name="change", + name="unpublishable", field=models.BooleanField(blank=True, null=True), ), # Add default to False in a separate migration operation # to avoid expensive backfilling of the new column for existing rows migrations.AlterField( - model_name='change', - name='unpublishable', + model_name="change", + name="unpublishable", field=models.BooleanField(blank=True, default=False, null=True), ), ] diff --git a/contentcuration/contentcuration/migrations/0150_bloompub_format_and_preset.py b/contentcuration/contentcuration/migrations/0150_bloompub_format_and_preset.py index e1ffc389ef..c17c71988d 100644 --- a/contentcuration/contentcuration/migrations/0150_bloompub_format_and_preset.py +++ b/contentcuration/contentcuration/migrations/0150_bloompub_format_and_preset.py @@ -6,18 +6,80 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0149_unpublishable_change_field'), + ("contentcuration", "0149_unpublishable_change_field"), ] operations = [ migrations.AlterField( - model_name='fileformat', - name='extension', - field=models.CharField(choices=[('mp4', 'MP4 Video'), ('webm', 'WEBM Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('zim', 'ZIM'), ('epub', 'ePub Document'), ('bloompub', 'Bloom Document'), ('bloomd', 'Bloom Document')], max_length=40, primary_key=True, serialize=False), + model_name="fileformat", + name="extension", + field=models.CharField( + choices=[ + ("mp4", "MP4 Video"), + ("webm", "WEBM Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("zim", "ZIM"), + ("epub", "ePub Document"), + ("bloompub", "Bloom Document"), + ("bloomd", "Bloom Document"), + ], + max_length=40, + primary_key=True, + serialize=False, + ), ), migrations.AlterField( - model_name='formatpreset', - name='id', - field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('audio_dependency', 'audio (dependency)'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('zim', 'Zim'), ('zim_thumbnail', 'Zim Thumbnail'), ('qti', 'QTI Zip'), ('qti_thumbnail', 'QTI Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest'), ('imscp_zip', 'IMSCP Zip'), ('bloompub', 'Bloom Document')], max_length=150, primary_key=True, serialize=False), + model_name="formatpreset", + name="id", + field=models.CharField( + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("audio_dependency", "audio (dependency)"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("zim", "Zim"), + ("zim_thumbnail", "Zim Thumbnail"), + ("qti", "QTI Zip"), + ("qti_thumbnail", "QTI Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ("imscp_zip", "IMSCP Zip"), + ("bloompub", "Bloom Document"), + ], + max_length=150, + primary_key=True, + serialize=False, + ), ), ] diff --git a/contentcuration/contentcuration/migrations/0153_alter_recommendationsevent_time_hidden.py b/contentcuration/contentcuration/migrations/0153_alter_recommendationsevent_time_hidden.py index 451cbeadb0..6ca8841220 100644 --- a/contentcuration/contentcuration/migrations/0153_alter_recommendationsevent_time_hidden.py +++ b/contentcuration/contentcuration/migrations/0153_alter_recommendationsevent_time_hidden.py @@ -6,13 +6,13 @@ class Migration(migrations.Migration): dependencies = [ - ('contentcuration', '0152_alter_assessmentitem_type'), + ("contentcuration", "0152_alter_assessmentitem_type"), ] operations = [ migrations.AlterField( - model_name='recommendationsevent', - name='time_hidden', + model_name="recommendationsevent", + name="time_hidden", field=models.DateTimeField(blank=True, null=True), ), ] diff --git a/contentcuration/contentcuration/models.py b/contentcuration/contentcuration/models.py index 5fcf0a63bc..f193921afb 100644 --- a/contentcuration/contentcuration/models.py +++ b/contentcuration/contentcuration/models.py @@ -86,22 +86,22 @@ VIEW_ACCESS = "view" DEFAULT_CONTENT_DEFAULTS = { - 'license': None, - 'language': None, - 'author': None, - 'aggregator': None, - 'provider': None, - 'copyright_holder': None, - 'license_description': None, - 'mastery_model': exercises.NUM_CORRECT_IN_A_ROW_5, - 'm_value': 5, - 'n_value': 5, - 'auto_derive_video_thumbnail': True, - 'auto_derive_audio_thumbnail': True, - 'auto_derive_document_thumbnail': True, - 'auto_derive_html5_thumbnail': True, - 'auto_derive_exercise_thumbnail': True, - 'auto_randomize_questions': True, + "license": None, + "language": None, + "author": None, + "aggregator": None, + "provider": None, + "copyright_holder": None, + "license_description": None, + "mastery_model": exercises.NUM_CORRECT_IN_A_ROW_5, + "m_value": 5, + "n_value": 5, + "auto_derive_video_thumbnail": True, + "auto_derive_audio_thumbnail": True, + "auto_derive_document_thumbnail": True, + "auto_derive_html5_thumbnail": True, + "auto_derive_exercise_thumbnail": True, + "auto_randomize_questions": True, } DEFAULT_USER_PREFERENCES = json.dumps(DEFAULT_CONTENT_DEFAULTS, ensure_ascii=False) @@ -113,10 +113,9 @@ def to_pk(model_or_pk): class UserManager(BaseUserManager): - def create_user(self, email, first_name, last_name, password=None): if not email: - raise ValueError('Email address not specified') + raise ValueError("Email address not specified") new_user = self.model( email=self.normalize_email(email), @@ -136,7 +135,7 @@ def create_superuser(self, email, first_name, last_name, password=None): class UniqueActiveUserIndex(Index): - def create_sql(self, model, schema_editor, using='', **kwargs): + def create_sql(self, model, schema_editor, using="", **kwargs): """ This is a vendored and modified version of the Django create_sql method We do this so that we can monkey patch in the unique index statement onto the schema_editor @@ -145,7 +144,9 @@ def create_sql(self, model, schema_editor, using='', **kwargs): We should remove this as soon as Django natively supports UniqueConstraints with Expressions. This should hopefully be the case in Django 3.3. """ - include = [model._meta.get_field(field_name).column for field_name in self.include] + include = [ + model._meta.get_field(field_name).column for field_name in self.include + ] condition = self._get_condition_sql(model, schema_editor) if self.expressions: index_expressions = [] @@ -172,10 +173,17 @@ def create_sql(self, model, schema_editor, using='', **kwargs): schema_editor.sql_create_index = sql # Generate the SQL staetment that we want to return return_statement = schema_editor._create_index_sql( - model, fields=fields, name=self.name, using=using, - db_tablespace=self.db_tablespace, col_suffixes=col_suffixes, - opclasses=self.opclasses, condition=condition, include=include, - expressions=expressions, **kwargs, + model, + fields=fields, + name=self.name, + using=using, + db_tablespace=self.db_tablespace, + col_suffixes=col_suffixes, + opclasses=self.opclasses, + condition=condition, + include=include, + expressions=expressions, + **kwargs, ) # Reinstate the previous index SQL statement so that we have done no harm schema_editor.sql_create_index = old_create_index_sql @@ -188,15 +196,31 @@ class User(AbstractBaseUser, PermissionsMixin): first_name = models.CharField(max_length=100) last_name = models.CharField(max_length=100) is_admin = models.BooleanField(default=False) - is_active = models.BooleanField('active', default=False, - help_text='Designates whether this user should be treated as active.') - is_staff = models.BooleanField('staff status', default=False, - help_text='Designates whether the user can log into this admin site.') - date_joined = models.DateTimeField('date joined', default=timezone.now) - clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='user_clipboard', on_delete=models.SET_NULL) + is_active = models.BooleanField( + "active", + default=False, + help_text="Designates whether this user should be treated as active.", + ) + is_staff = models.BooleanField( + "staff status", + default=False, + help_text="Designates whether the user can log into this admin site.", + ) + date_joined = models.DateTimeField("date joined", default=timezone.now) + clipboard_tree = models.ForeignKey( + "ContentNode", + null=True, + blank=True, + related_name="user_clipboard", + on_delete=models.SET_NULL, + ) preferences = models.TextField(default=DEFAULT_USER_PREFERENCES) - disk_space = models.FloatField(default=524288000, help_text='How many bytes a user can upload') - disk_space_used = models.FloatField(default=0, help_text='How many bytes a user has uploaded') + disk_space = models.FloatField( + default=524288000, help_text="How many bytes a user can upload" + ) + disk_space_used = models.FloatField( + default=0, help_text="How many bytes a user has uploaded" + ) information = JSONField(null=True) content_defaults = JSONField(default=dict) @@ -205,14 +229,16 @@ class User(AbstractBaseUser, PermissionsMixin): deleted = models.BooleanField(default=False, db_index=True) - _field_updates = FieldTracker(fields=[ - # Field to watch for changes - "disk_space", - ]) + _field_updates = FieldTracker( + fields=[ + # Field to watch for changes + "disk_space", + ] + ) objects = UserManager() - USERNAME_FIELD = 'email' - REQUIRED_FIELDS = ['first_name', 'last_name'] + USERNAME_FIELD = "email" + REQUIRED_FIELDS = ["first_name", "last_name"] def __unicode__(self): return self.email @@ -254,38 +280,53 @@ def hard_delete_user_related_data(self): self.sent_by.all().delete() editable_channels_user_query = ( - User.objects.filter(editable_channels__id=OuterRef('id')) - .values_list('id', flat=True) - .distinct() + User.objects.filter(editable_channels__id=OuterRef("id")) + .values_list("id", flat=True) + .distinct() ) - non_public_channels_sole_editor = self.editable_channels.annotate(num_editors=SQCount( - editable_channels_user_query, field="id")).filter(num_editors=1, public=False) + non_public_channels_sole_editor = self.editable_channels.annotate( + num_editors=SQCount(editable_channels_user_query, field="id") + ).filter(num_editors=1, public=False) # Point sole editor non-public channels' contentnodes to orphan tree to let # our garbage collection delete the nodes and underlying file. - tree_ids_to_update = non_public_channels_sole_editor.values_list('main_tree__tree_id', flat=True) + tree_ids_to_update = non_public_channels_sole_editor.values_list( + "main_tree__tree_id", flat=True + ) for tree_id in tree_ids_to_update: - ContentNode.objects.filter(tree_id=tree_id).update(parent_id=settings.ORPHANAGE_ROOT_ID) + ContentNode.objects.filter(tree_id=tree_id).update( + parent_id=settings.ORPHANAGE_ROOT_ID + ) - logging.debug("Queries after updating content nodes parent ID: %s", connection.queries) + logging.debug( + "Queries after updating content nodes parent ID: %s", connection.queries + ) # Hard delete non-public channels associated with this user (if user is the only editor). non_public_channels_sole_editor.delete() # Hard delete non-public channel collections associated with this user (if user is the only editor). user_query = ( - User.objects.filter(channel_sets__id=OuterRef('id')) - .values_list('id', flat=True) - .distinct() + User.objects.filter(channel_sets__id=OuterRef("id")) + .values_list("id", flat=True) + .distinct() ) - self.channel_sets.annotate(num_editors=SQCount(user_query, field="id")).filter(num_editors=1, public=False).delete() + self.channel_sets.annotate(num_editors=SQCount(user_query, field="id")).filter( + num_editors=1, public=False + ).delete() # Create history! - self.history.create(user_id=self.pk, action=user_history.RELATED_DATA_HARD_DELETION) + self.history.create( + user_id=self.pk, action=user_history.RELATED_DATA_HARD_DELETION + ) def can_edit(self, channel_id): - return Channel.filter_edit_queryset(Channel.objects.all(), self).filter(pk=channel_id).exists() + return ( + Channel.filter_edit_queryset(Channel.objects.all(), self) + .filter(pk=channel_id) + .exists() + ) def check_space(self, size, checksum): if self.is_admin: @@ -297,7 +338,9 @@ def check_space(self, size, checksum): space = self.get_available_space(active_files=active_files) if space < size: - raise PermissionDenied(_("Not enough space. Check your storage under Settings page.")) + raise PermissionDenied( + _("Not enough space. Check your storage under Settings page.") + ) def check_feature_flag(self, flag_name): feature_flags = self.feature_flags or {} @@ -306,46 +349,61 @@ def check_feature_flag(self, flag_name): def check_channel_space(self, channel): active_files = self.get_user_active_files() staging_tree_id = channel.staging_tree.tree_id - channel_files = self.files\ - .filter(contentnode__tree_id=staging_tree_id)\ - .values('checksum')\ - .distinct()\ - .exclude(checksum__in=active_files.values_list('checksum', flat=True)) - staged_size = float(channel_files.aggregate(used=Sum('file_size'))['used'] or 0) + channel_files = ( + self.files.filter(contentnode__tree_id=staging_tree_id) + .values("checksum") + .distinct() + .exclude(checksum__in=active_files.values_list("checksum", flat=True)) + ) + staged_size = float(channel_files.aggregate(used=Sum("file_size"))["used"] or 0) if self.get_available_space(active_files=active_files) < (staged_size): - raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.')) + raise PermissionDenied( + _("Out of storage! Request more space under Settings > Storage.") + ) def check_staged_space(self, size, checksum): if self.staged_files.filter(checksum=checksum).exists(): return True space = self.get_available_staged_space() if space < size: - raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.')) + raise PermissionDenied( + _("Out of storage! Request more space under Settings > Storage.") + ) def get_available_staged_space(self): - space_used = self.staged_files.values('checksum').distinct().aggregate(size=Sum("file_size"))['size'] or 0 + space_used = ( + self.staged_files.values("checksum") + .distinct() + .aggregate(size=Sum("file_size"))["size"] + or 0 + ) return float(max(self.disk_space - space_used, 0)) def get_available_space(self, active_files=None): - return float(max(self.disk_space - self.get_space_used(active_files=active_files), 0)) + return float( + max(self.disk_space - self.get_space_used(active_files=active_files), 0) + ) def get_user_active_trees(self): - return self.editable_channels.exclude(deleted=True)\ - .values(tree_id=F("main_tree__tree_id")) + return self.editable_channels.exclude(deleted=True).values( + tree_id=F("main_tree__tree_id") + ) def get_user_active_files(self): cte = With(self.get_user_active_trees().distinct()) - return cte.join(self.files.get_queryset(), contentnode__tree_id=cte.col.tree_id)\ - .with_cte(cte)\ - .values('checksum')\ + return ( + cte.join(self.files.get_queryset(), contentnode__tree_id=cte.col.tree_id) + .with_cte(cte) + .values("checksum") .distinct() + ) def get_space_used(self, active_files=None): active_files = active_files or self.get_user_active_files() - files = active_files.aggregate(total_used=Sum('file_size')) - return float(files['total_used'] or 0) + files = active_files.aggregate(total_used=Sum("file_size")) + return float(files["total_used"] or 0) def set_space_used(self): self.disk_space_used = self.get_space_used() @@ -354,13 +412,15 @@ def set_space_used(self): def get_space_used_by_kind(self): active_files = self.get_user_active_files() - files = active_files.values('preset__kind_id')\ - .annotate(space=Sum('file_size'))\ - .order_by() + files = ( + active_files.values("preset__kind_id") + .annotate(space=Sum("file_size")) + .order_by() + ) kind_dict = {} for item in files: - kind_dict[item['preset__kind_id']] = item['space'] + kind_dict[item["preset__kind_id"]] = item["space"] return kind_dict def email_user(self, subject, message, from_email=None, **kwargs): @@ -380,7 +440,7 @@ def get_full_name(self): """ Returns the first_name plus the last_name, with a space in between. """ - full_name = '%s %s' % (self.first_name, self.last_name) + full_name = "%s %s" % (self.first_name, self.last_name) return full_name.strip() def get_short_name(self): @@ -395,9 +455,10 @@ def get_token(self): def save(self, *args, **kwargs): from contentcuration.utils.user import calculate_user_storage + super(User, self).save(*args, **kwargs) - if 'disk_space' in self._field_updates.changed(): + if "disk_space" in self._field_updates.changed(): calculate_user_storage(self.pk) changed = False @@ -407,7 +468,9 @@ def save(self, *args, **kwargs): changed = True if not self.clipboard_tree: - self.clipboard_tree = ContentNode.objects.create(title=self.email + " clipboard", kind_id=content_kinds.TOPIC) + self.clipboard_tree = ContentNode.objects.create( + title=self.email + " clipboard", kind_id=content_kinds.TOPIC + ) self.clipboard_tree.save() changed = True @@ -423,14 +486,19 @@ def get_server_rev(self): .with_cte(changes_cte) .filter(applied=True) .values_list("server_rev", flat=True) - .order_by("-server_rev").first() + .order_by("-server_rev") + .first() ) or 0 class Meta: verbose_name = "User" verbose_name_plural = "Users" indexes = [ - UniqueActiveUserIndex(Lower('email'), condition=Q(is_active=True), name="contentcura_email_d4d492_idx") + UniqueActiveUserIndex( + Lower("email"), + condition=Q(is_active=True), + name="contentcura_email_d4d492_idx", + ) ] @classmethod @@ -444,13 +512,17 @@ def filter_view_queryset(cls, queryset, user): # all shared editors all_editable = User.editable_channels.through.objects.all() editable = all_editable.filter( - channel_id__in=all_editable.filter(user_id=user.pk).values_list("channel_id", flat=True) + channel_id__in=all_editable.filter(user_id=user.pk).values_list( + "channel_id", flat=True + ) ) # all shared viewers all_view_only = User.view_only_channels.through.objects.all() view_only = all_view_only.filter( - channel_id__in=all_view_only.filter(user_id=user.pk).values_list("channel_id", flat=True) + channel_id__in=all_view_only.filter(user_id=user.pk).values_list( + "channel_id", flat=True + ) ) return queryset.filter( @@ -490,9 +562,8 @@ def get_for_email(cls, email, deleted=False, **filters): class UUIDField(models.CharField): - def __init__(self, *args, **kwargs): - kwargs['max_length'] = 32 + kwargs["max_length"] = 32 super(UUIDField, self).__init__(*args, **kwargs) def prepare_value(self, value): @@ -556,14 +627,14 @@ def object_storage_name(instance, filename): :return: str """ - default_ext = '' + default_ext = "" if instance.file_format_id: - default_ext = '.{}'.format(instance.file_format_id) + default_ext = ".{}".format(instance.file_format_id) return generate_object_storage_name(instance.checksum, filename, default_ext) -def generate_object_storage_name(checksum, filename, default_ext=''): +def generate_object_storage_name(checksum, filename, default_ext=""): """ Separated from file_on_disk_name to allow for simple way to check if has already exists """ h = checksum basename, actual_ext = os.path.splitext(filename) @@ -639,6 +710,7 @@ def _save(self, name, content): class SecretToken(models.Model): """Tokens for channels""" + token = models.CharField(max_length=100, unique=True) is_primary = models.BooleanField(default=False) @@ -689,10 +761,10 @@ def get_channel_thumbnail(channel): if thumbnail_data.get("base64"): return thumbnail_data["base64"] - if channel.get("thumbnail") and 'static' not in channel.get("thumbnail"): + if channel.get("thumbnail") and "static" not in channel.get("thumbnail"): return generate_storage_url(channel.get("thumbnail")) - return '/static/img/kolibri_placeholder.png' + return "/static/img/kolibri_placeholder.png" CHANNEL_NAME_INDEX_NAME = "channel_name_idx" @@ -716,24 +788,31 @@ def boolean_val(val): class PermissionCTE(With): tree_id_fields = [ - "channel__{}__tree_id".format(tree_name) - for tree_name in CHANNEL_TREES + "channel__{}__tree_id".format(tree_name) for tree_name in CHANNEL_TREES ] def __init__(self, model, user_id, **kwargs): - queryset = model.objects.filter(user_id=user_id)\ - .annotate( - tree_id=Unnest(ArrayRemove(Array(*self.tree_id_fields), None), output_field=models.IntegerField()) + queryset = model.objects.filter(user_id=user_id).annotate( + tree_id=Unnest( + ArrayRemove(Array(*self.tree_id_fields), None), + output_field=models.IntegerField(), + ) + ) + super(PermissionCTE, self).__init__( + queryset=queryset.values("user_id", "channel_id", "tree_id"), **kwargs ) - super(PermissionCTE, self).__init__(queryset=queryset.values("user_id", "channel_id", "tree_id"), **kwargs) @classmethod def editable_channels(cls, user_id): - return PermissionCTE(User.editable_channels.through, user_id, name="editable_channels_cte") + return PermissionCTE( + User.editable_channels.through, user_id, name="editable_channels_cte" + ) @classmethod def view_only_channels(cls, user_id): - return PermissionCTE(User.view_only_channels.through, user_id, name="view_only_channels_cte") + return PermissionCTE( + User.view_only_channels.through, user_id, name="view_only_channels_cte" + ) def exists(self, *filters): return Exists(self.queryset().filter(*filters).values("user_id")) @@ -767,6 +846,7 @@ def update_or_create(self, defaults=None, **kwargs): class Channel(models.Model): """ Permissions come from association with organizations """ + id = UUIDField(primary_key=True, default=uuid.uuid4) name = models.CharField(max_length=200, blank=True) description = models.CharField(max_length=400, blank=True) @@ -776,39 +856,83 @@ class Channel(models.Model): thumbnail_encoding = JSONField(default=dict) editors = models.ManyToManyField( settings.AUTH_USER_MODEL, - related_name='editable_channels', + related_name="editable_channels", verbose_name="editors", help_text="Users with edit rights", blank=True, ) viewers = models.ManyToManyField( settings.AUTH_USER_MODEL, - related_name='view_only_channels', + related_name="view_only_channels", verbose_name="viewers", help_text="Users with view only rights", blank=True, ) - language = models.ForeignKey('Language', null=True, blank=True, related_name='channel_language', on_delete=models.SET_NULL) - trash_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_trash', on_delete=models.SET_NULL) - clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_clipboard', on_delete=models.SET_NULL) - main_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_main', on_delete=models.SET_NULL) - staging_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_staging', on_delete=models.SET_NULL) - chef_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_chef', on_delete=models.SET_NULL) - previous_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_previous', on_delete=models.SET_NULL) + language = models.ForeignKey( + "Language", + null=True, + blank=True, + related_name="channel_language", + on_delete=models.SET_NULL, + ) + trash_tree = models.ForeignKey( + "ContentNode", + null=True, + blank=True, + related_name="channel_trash", + on_delete=models.SET_NULL, + ) + clipboard_tree = models.ForeignKey( + "ContentNode", + null=True, + blank=True, + related_name="channel_clipboard", + on_delete=models.SET_NULL, + ) + main_tree = models.ForeignKey( + "ContentNode", + null=True, + blank=True, + related_name="channel_main", + on_delete=models.SET_NULL, + ) + staging_tree = models.ForeignKey( + "ContentNode", + null=True, + blank=True, + related_name="channel_staging", + on_delete=models.SET_NULL, + ) + chef_tree = models.ForeignKey( + "ContentNode", + null=True, + blank=True, + related_name="channel_chef", + on_delete=models.SET_NULL, + ) + previous_tree = models.ForeignKey( + "ContentNode", + null=True, + blank=True, + related_name="channel_previous", + on_delete=models.SET_NULL, + ) bookmarked_by = models.ManyToManyField( settings.AUTH_USER_MODEL, - related_name='bookmarked_channels', + related_name="bookmarked_channels", verbose_name="bookmarked by", ) deleted = models.BooleanField(default=False, db_index=True) public = models.BooleanField(default=False, db_index=True) preferences = models.TextField(default=DEFAULT_USER_PREFERENCES) content_defaults = JSONField(default=dict) - priority = models.IntegerField(default=0, help_text="Order to display public channels") + priority = models.IntegerField( + default=0, help_text="Order to display public channels" + ) last_published = models.DateTimeField(blank=True, null=True) secret_tokens = models.ManyToManyField( SecretToken, - related_name='channels', + related_name="channels", verbose_name="secret tokens", blank=True, ) @@ -828,26 +952,28 @@ class Channel(models.Model): published_size = models.FloatField(default=0) included_languages = models.ManyToManyField( "Language", - related_name='channels', + related_name="channels", verbose_name="languages", blank=True, ) - _field_updates = FieldTracker(fields=[ - # Field to watch for changes - "description", - "language_id", - "thumbnail", - "name", - "thumbnail_encoding", - # watch these fields for changes - # but exclude them from setting changed - # on the main tree - "deleted", - "public", - "main_tree_id", - "version", - ]) + _field_updates = FieldTracker( + fields=[ + # Field to watch for changes + "description", + "language_id", + "thumbnail", + "name", + "thumbnail_encoding", + # watch these fields for changes + # but exclude them from setting changed + # on the main tree + "deleted", + "public", + "main_tree_id", + "version", + ] + ) objects = ChannelModelQuerySet.as_manager() @@ -863,7 +989,11 @@ def filter_edit_queryset(cls, queryset, user): if not user_id: return queryset.none() - edit = Exists(User.editable_channels.through.objects.filter(user_id=user_id, channel_id=OuterRef("id"))) + edit = Exists( + User.editable_channels.through.objects.filter( + user_id=user_id, channel_id=OuterRef("id") + ) + ) queryset = queryset.annotate(edit=edit) if user.is_admin: return queryset @@ -877,8 +1007,16 @@ def filter_view_queryset(cls, queryset, user): if user_id: filters = dict(user_id=user_id, channel_id=OuterRef("id")) - edit = Exists(User.editable_channels.through.objects.filter(**filters).values("user_id")) - view = Exists(User.view_only_channels.through.objects.filter(**filters).values("user_id")) + edit = Exists( + User.editable_channels.through.objects.filter(**filters).values( + "user_id" + ) + ) + view = Exists( + User.view_only_channels.through.objects.filter(**filters).values( + "user_id" + ) + ) else: edit = boolean_val(False) view = boolean_val(False) @@ -893,9 +1031,9 @@ def filter_view_queryset(cls, queryset, user): permission_filter = Q() if user_id: - pending_channels = Invitation.objects.filter(email=user_email, revoked=False, declined=False, accepted=False).values_list( - "channel_id", flat=True - ) + pending_channels = Invitation.objects.filter( + email=user_email, revoked=False, declined=False, accepted=False + ).values_list("channel_id", flat=True) permission_filter = ( Q(view=True) | Q(edit=True) | Q(deleted=False, id__in=pending_channels) ) @@ -904,7 +1042,11 @@ def filter_view_queryset(cls, queryset, user): @classmethod def get_all_channels(cls): - return cls.objects.select_related('main_tree').prefetch_related('editors', 'viewers').distinct() + return ( + cls.objects.select_related("main_tree") + .prefetch_related("editors", "viewers") + .distinct() + ) def resource_size_key(self): return "{}_resource_size".format(self.pk) @@ -916,13 +1058,15 @@ def get_resource_size(self): if cached_data: return cached_data tree_id = self.main_tree.tree_id - files = File.objects.select_related('contentnode', 'assessment_item')\ - .filter(contentnode__tree_id=tree_id)\ - .values('checksum', 'file_size')\ - .distinct()\ - .aggregate(resource_size=Sum('file_size')) - cache.set(self.resource_size_key(), files['resource_size'] or 0, None) - return files['resource_size'] or 0 + files = ( + File.objects.select_related("contentnode", "assessment_item") + .filter(contentnode__tree_id=tree_id) + .values("checksum", "file_size") + .distinct() + .aggregate(resource_size=Sum("file_size")) + ) + cache.set(self.resource_size_key(), files["resource_size"] or 0, None) + return files["resource_size"] or 0 def on_create(self): actor_id = getattr(self, "_actor_id", None) @@ -945,7 +1089,12 @@ def on_create(self): ) # Ensure that locust or unit tests raise if there are any concurrency issues with tree ids. if settings.DEBUG: - if ContentNode.objects.filter(parent=None, tree_id=self.main_tree.tree_id).count() != 1: + if ( + ContentNode.objects.filter( + parent=None, tree_id=self.main_tree.tree_id + ).count() + != 1 + ): raise AssertionError if not self.trash_tree: @@ -962,20 +1111,31 @@ def on_create(self): def on_update(self): # noqa C901 from contentcuration.utils.user import calculate_user_storage + original_values = self._field_updates.changed() - blacklist = set([ - "public", - "main_tree_id", - "version", - ]) + blacklist = set( + [ + "public", + "main_tree_id", + "version", + ] + ) - if self.main_tree and original_values and any((True for field in original_values if field not in blacklist)): + if ( + self.main_tree + and original_values + and any((True for field in original_values if field not in blacklist)) + ): # Changing channel metadata should also mark main_tree as changed self.main_tree.changed = True # Check if original thumbnail is no longer referenced - if "thumbnail" in original_values and original_values["thumbnail"] and 'static' not in original_values["thumbnail"]: + if ( + "thumbnail" in original_values + and original_values["thumbnail"] + and "static" not in original_values["thumbnail"] + ): filename, ext = os.path.splitext(original_values["thumbnail"]) delete_empty_file_reference(filename, ext[1:]) @@ -987,7 +1147,9 @@ def on_update(self): # noqa C901 if "deleted" in original_values and not original_values["deleted"]: self.pending_editors.all().delete() # Delete db if channel has been deleted and mark as unpublished - export_db_storage_path = os.path.join(settings.DB_ROOT, "{channel_id}.sqlite3".format(channel_id=self.id)) + export_db_storage_path = os.path.join( + settings.DB_ROOT, "{channel_id}.sqlite3".format(channel_id=self.id) + ) if default_storage.exists(export_db_storage_path): default_storage.delete(export_db_storage_path) if self.main_tree: @@ -1006,7 +1168,9 @@ def on_update(self): # noqa C901 self.main_tree.save() # if this change affects the published channel list, clear the channel cache - if "public" in original_values and (self.main_tree and self.main_tree.published): + if "public" in original_values and ( + self.main_tree and self.main_tree.published + ): delete_public_channel_cache_keys() def save(self, *args, **kwargs): @@ -1022,19 +1186,33 @@ def save(self, *args, **kwargs): super(Channel, self).save(*args, **kwargs) if creating: - self.history.create(actor_id=self._actor_id, action=channel_history.CREATION) + self.history.create( + actor_id=self._actor_id, action=channel_history.CREATION + ) def get_thumbnail(self): return get_channel_thumbnail(self) def has_changes(self): - return self.main_tree.get_descendants(include_self=True).filter(changed=True).exists() + return ( + self.main_tree.get_descendants(include_self=True) + .filter(changed=True) + .exists() + ) def get_date_modified(self): - return self.main_tree.get_descendants(include_self=True).aggregate(last_modified=Max('modified'))['last_modified'] + return self.main_tree.get_descendants(include_self=True).aggregate( + last_modified=Max("modified") + )["last_modified"] def get_resource_count(self): - return self.main_tree.get_descendants().exclude(kind_id=content_kinds.TOPIC).order_by('content_id').distinct('content_id').count() + return ( + self.main_tree.get_descendants() + .exclude(kind_id=content_kinds.TOPIC) + .order_by("content_id") + .distinct("content_id") + .count() + ) def get_human_token(self): return self.secret_tokens.get(is_primary=True) @@ -1043,7 +1221,9 @@ def get_channel_id_token(self): return self.secret_tokens.get(token=self.id) def make_token(self): - token = self.secret_tokens.create(token=SecretToken.generate_new_token(), is_primary=True) + token = self.secret_tokens.create( + token=SecretToken.generate_new_token(), is_primary=True + ) self.secret_tokens.get_or_create(token=self.id) return token @@ -1057,7 +1237,9 @@ def make_public(self, bypass_signals=False): Returns the same channel object. """ if bypass_signals: - self.public = True # set this attribute still, so the object will be updated + self.public = ( + True # set this attribute still, so the object will be updated + ) Channel.objects.filter(id=self.id).update(public=True) # clear the channel cache delete_public_channel_cache_keys() @@ -1081,7 +1263,8 @@ def get_server_rev(self): .with_cte(changes_cte) .filter(applied=True) .values_list("server_rev", flat=True) - .order_by("-server_rev").first() + .order_by("-server_rev") + .first() ) or 0 @property @@ -1100,12 +1283,20 @@ def get_public_channels(cls, defer_nonmain_trees=False): If defer_nonmain_trees is True, defer the loading of all trees except for the main_tree.""" if defer_nonmain_trees: - c = (Channel.objects - .filter(public=True) - .exclude(deleted=True) - .select_related('main_tree') - .prefetch_related('editors') - .defer('trash_tree', 'clipboard_tree', 'staging_tree', 'chef_tree', 'previous_tree', 'viewers')) + c = ( + Channel.objects.filter(public=True) + .exclude(deleted=True) + .select_related("main_tree") + .prefetch_related("editors") + .defer( + "trash_tree", + "clipboard_tree", + "staging_tree", + "chef_tree", + "previous_tree", + "viewers", + ) + ) else: c = Channel.objects.filter(public=True).exclude(deleted=True) @@ -1118,9 +1309,7 @@ class Meta: indexes = [ models.Index(fields=["name"], name=CHANNEL_NAME_INDEX_NAME), ] - index_together = [ - ["deleted", "public"] - ] + index_together = [["deleted", "public"]] CHANNEL_HISTORY_CHANNEL_INDEX_NAME = "idx_channel_history_channel_id" @@ -1130,8 +1319,21 @@ class ChannelHistory(models.Model): """ Model for tracking certain actions performed on a channel """ - channel = models.ForeignKey('Channel', null=False, blank=False, related_name='history', on_delete=models.CASCADE) - actor = models.ForeignKey('User', null=False, blank=False, related_name='channel_history', on_delete=models.CASCADE) + + channel = models.ForeignKey( + "Channel", + null=False, + blank=False, + related_name="history", + on_delete=models.CASCADE, + ) + actor = models.ForeignKey( + "User", + null=False, + blank=False, + related_name="channel_history", + on_delete=models.CASCADE, + ) performed = models.DateTimeField(default=timezone.now) action = models.CharField(max_length=50, choices=channel_history.choices) @@ -1141,7 +1343,11 @@ def prune(cls): Prunes history records by keeping the most recent actions for each channel and type, and deleting all other older actions """ - keep_ids = cls.objects.distinct("channel_id", "action").order_by("channel_id", "action", "-performed").values_list("id", flat=True) + keep_ids = ( + cls.objects.distinct("channel_id", "action") + .order_by("channel_id", "action", "-performed") + .values_list("id", flat=True) + ) cls.objects.exclude(id__in=keep_ids).delete() class Meta: @@ -1149,7 +1355,9 @@ class Meta: verbose_name_plural = "Channel histories" indexes = [ - models.Index(fields=["channel_id"], name=CHANNEL_HISTORY_CHANNEL_INDEX_NAME), + models.Index( + fields=["channel_id"], name=CHANNEL_HISTORY_CHANNEL_INDEX_NAME + ), ] @@ -1157,7 +1365,14 @@ class UserHistory(models.Model): """ Model that stores the user's action history. """ - user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=False, related_name="history", on_delete=models.CASCADE) + + user = models.ForeignKey( + settings.AUTH_USER_MODEL, + null=False, + blank=False, + related_name="history", + on_delete=models.CASCADE, + ) action = models.CharField(max_length=32, choices=user_history.choices) performed_at = models.DateTimeField(default=timezone.now) @@ -1172,19 +1387,29 @@ class ChannelSet(models.Model): public = models.BooleanField(default=False, db_index=True) editors = models.ManyToManyField( settings.AUTH_USER_MODEL, - related_name='channel_sets', + related_name="channel_sets", verbose_name="editors", help_text="Users with edit rights", blank=True, ) - secret_token = models.ForeignKey('SecretToken', null=True, blank=True, related_name='channel_sets', on_delete=models.SET_NULL) + secret_token = models.ForeignKey( + "SecretToken", + null=True, + blank=True, + related_name="channel_sets", + on_delete=models.SET_NULL, + ) @classmethod def filter_edit_queryset(cls, queryset, user): if user.is_anonymous: return queryset.none() user_id = not user.is_anonymous and user.id - edit = Exists(User.channel_sets.through.objects.filter(user_id=user_id, channelset_id=OuterRef("id"))) + edit = Exists( + User.channel_sets.through.objects.filter( + user_id=user_id, channelset_id=OuterRef("id") + ) + ) queryset = queryset.annotate(edit=edit) if user.is_admin: return queryset @@ -1207,7 +1432,9 @@ def save(self, *args, **kwargs): def on_create(self): if not self.secret_token: - self.secret_token = SecretToken.objects.create(token=SecretToken.generate_new_token()) + self.secret_token = SecretToken.objects.create( + token=SecretToken.generate_new_token() + ) def delete(self, *args, **kwargs): super(ChannelSet, self).delete(*args, **kwargs) @@ -1219,20 +1446,28 @@ def delete(self, *args, **kwargs): class ContentTag(models.Model): id = UUIDField(primary_key=True, default=uuid.uuid4) tag_name = models.CharField(max_length=50) - channel = models.ForeignKey('Channel', related_name='tags', blank=True, null=True, db_index=True, on_delete=models.SET_NULL) + channel = models.ForeignKey( + "Channel", + related_name="tags", + blank=True, + null=True, + db_index=True, + on_delete=models.SET_NULL, + ) objects = CustomManager() def __str__(self): return self.tag_name class Meta: - unique_together = ['tag_name', 'channel'] + unique_together = ["tag_name", "channel"] class License(models.Model): """ Normalize the license of ContentNode model """ + license_name = models.CharField(max_length=50) license_url = models.URLField(blank=True) license_description = models.TextField(blank=True) @@ -1247,7 +1482,7 @@ class License(models.Model): @classmethod def validate_name(cls, name): if cls.objects.filter(license_name=name).count() == 0: - raise ValidationError('License `{}` does not exist'.format(name)) + raise ValidationError("License `{}` does not exist".format(name)) def __str__(self): return self.license_name @@ -1263,6 +1498,7 @@ class ContentNode(MPTTModel, models.Model): """ By default, all nodes have a title and can be used as a topic. """ + # Random id used internally on Studio (See `node_id` for id used in Kolibri) id = UUIDField(primary_key=True, default=uuid.uuid4) @@ -1271,18 +1507,26 @@ class ContentNode(MPTTModel, models.Model): # interacts with a piece of content, all substantially similar pieces of # content should be marked as such as well. We track these "substantially # similar" types of content by having them have the same content_id. - content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False, db_index=True) + content_id = UUIDField( + primary_key=False, default=uuid.uuid4, editable=False, db_index=True + ) # Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False) # TODO: disallow nulls once existing models have been set - original_channel_id = UUIDField(primary_key=False, editable=False, null=True, - db_index=True) # Original channel copied from - source_channel_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate channel copied from + original_channel_id = UUIDField( + primary_key=False, editable=False, null=True, db_index=True + ) # Original channel copied from + source_channel_id = UUIDField( + primary_key=False, editable=False, null=True + ) # Immediate channel copied from # Original node_id of node copied from (TODO: original_node_id clashes with original_node field - temporary) - original_source_node_id = UUIDField(primary_key=False, editable=False, null=True, - db_index=True) - source_node_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate node_id of node copied from + original_source_node_id = UUIDField( + primary_key=False, editable=False, null=True, db_index=True + ) + source_node_id = UUIDField( + primary_key=False, editable=False, null=True + ) # Immediate node_id of node copied from # Fields specific to content generated by Ricecooker source_id = models.CharField(max_length=200, blank=True, null=True) @@ -1290,24 +1534,75 @@ class ContentNode(MPTTModel, models.Model): title = models.CharField(max_length=200, blank=True) description = models.TextField(blank=True) - kind = models.ForeignKey('ContentKind', related_name='contentnodes', db_index=True, null=True, blank=True, on_delete=models.SET_NULL) - license = models.ForeignKey('License', null=True, blank=True, on_delete=models.SET_NULL) + kind = models.ForeignKey( + "ContentKind", + related_name="contentnodes", + db_index=True, + null=True, + blank=True, + on_delete=models.SET_NULL, + ) + license = models.ForeignKey( + "License", null=True, blank=True, on_delete=models.SET_NULL + ) license_description = models.CharField(max_length=400, null=True, blank=True) - prerequisite = models.ManyToManyField('self', related_name='is_prerequisite_of', - through='PrerequisiteContentRelationship', symmetrical=False, blank=True) - is_related = models.ManyToManyField('self', related_name='relate_to', through='RelatedContentRelationship', - symmetrical=False, blank=True) - language = models.ForeignKey('Language', null=True, blank=True, related_name='content_language', on_delete=models.SET_NULL) - parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True, on_delete=models.CASCADE) - tags = models.ManyToManyField(ContentTag, symmetrical=False, related_name='tagged_content', blank=True) + prerequisite = models.ManyToManyField( + "self", + related_name="is_prerequisite_of", + through="PrerequisiteContentRelationship", + symmetrical=False, + blank=True, + ) + is_related = models.ManyToManyField( + "self", + related_name="relate_to", + through="RelatedContentRelationship", + symmetrical=False, + blank=True, + ) + language = models.ForeignKey( + "Language", + null=True, + blank=True, + related_name="content_language", + on_delete=models.SET_NULL, + ) + parent = TreeForeignKey( + "self", + null=True, + blank=True, + related_name="children", + db_index=True, + on_delete=models.CASCADE, + ) + tags = models.ManyToManyField( + ContentTag, symmetrical=False, related_name="tagged_content", blank=True + ) # No longer used - sort_order = models.FloatField(max_length=50, default=1, verbose_name="sort order", - help_text="Ascending, lowest number shown first") - copyright_holder = models.CharField(max_length=200, null=True, blank=True, default="", - help_text="Organization of person who holds the essential rights") + sort_order = models.FloatField( + max_length=50, + default=1, + verbose_name="sort order", + help_text="Ascending, lowest number shown first", + ) + copyright_holder = models.CharField( + max_length=200, + null=True, + blank=True, + default="", + help_text="Organization of person who holds the essential rights", + ) # legacy field... - original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='duplicates') - cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='clones') + original_node = TreeForeignKey( + "self", + on_delete=models.SET_NULL, + null=True, + blank=True, + related_name="duplicates", + ) + cloned_source = TreeForeignKey( + "self", on_delete=models.SET_NULL, null=True, blank=True, related_name="clones" + ) thumbnail_encoding = models.TextField(blank=True, null=True) @@ -1325,14 +1620,31 @@ class ContentNode(MPTTModel, models.Model): - n: n value for M out of N mastery criteria """ extra_fields = JSONField(default=dict, blank=True, null=True) - author = models.CharField(max_length=200, blank=True, default="", help_text="Who created this content?", - null=True) - aggregator = models.CharField(max_length=200, blank=True, default="", help_text="Who gathered this content together?", - null=True) - provider = models.CharField(max_length=200, blank=True, default="", help_text="Who distributed this content?", - null=True) - - role_visibility = models.CharField(max_length=50, choices=roles.choices, default=roles.LEARNER) + author = models.CharField( + max_length=200, + blank=True, + default="", + help_text="Who created this content?", + null=True, + ) + aggregator = models.CharField( + max_length=200, + blank=True, + default="", + help_text="Who gathered this content together?", + null=True, + ) + provider = models.CharField( + max_length=200, + blank=True, + default="", + help_text="Who distributed this content?", + null=True, + ) + + role_visibility = models.CharField( + max_length=50, choices=roles.choices, default=roles.LEARNER + ) freeze_authoring_data = models.BooleanField(default=False) # Fields for metadata labels @@ -1350,7 +1662,11 @@ class ContentNode(MPTTModel, models.Model): # A field for storing a suggested duration for the content node # this duration should be in seconds. - suggested_duration = models.IntegerField(blank=True, null=True, help_text="Suggested duration for the content node (in seconds)") + suggested_duration = models.IntegerField( + blank=True, + null=True, + help_text="Suggested duration for the content node (in seconds)", + ) objects = CustomContentNodeTreeManager() @@ -1389,9 +1705,15 @@ def filter_by_pk(cls, pk): if tree_id: query = query.filter(tree_id=tree_id) else: - tree_id = ContentNode.objects.filter(pk=pk).values_list("tree_id", flat=True).first() + tree_id = ( + ContentNode.objects.filter(pk=pk) + .values_list("tree_id", flat=True) + .first() + ) if tree_id: - cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk), tree_id, None) + cache.set( + CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk), tree_id, None + ) query = query.filter(tree_id=tree_id) else: query = query.none() @@ -1429,24 +1751,26 @@ def filter_view_queryset(cls, queryset, user): ) if not user_id: - return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True) + return queryset.annotate( + edit=boolean_val(False), view=boolean_val(False) + ).filter(public=True) edit_cte = PermissionCTE.editable_channels(user_id) view_cte = PermissionCTE.view_only_channels(user_id) - queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate( - edit=edit_cte.exists(cls._permission_filter), - view=view_cte.exists(cls._permission_filter), + queryset = ( + queryset.with_cte(edit_cte) + .with_cte(view_cte) + .annotate( + edit=edit_cte.exists(cls._permission_filter), + view=view_cte.exists(cls._permission_filter), + ) ) if user.is_admin: return queryset - return queryset.filter( - Q(view=True) - | Q(edit=True) - | Q(public=True) - ) + return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True)) @raise_if_unsaved def get_root(self): @@ -1461,12 +1785,12 @@ def get_root_id(self): if self.is_root_node() and self.kind_id != content_kinds.TOPIC: return self - return ContentNode.objects.values_list('pk', flat=True).get( - tree_id=self._mpttfield('tree_id'), + return ContentNode.objects.values_list("pk", flat=True).get( + tree_id=self._mpttfield("tree_id"), parent=None, ) - def get_tree_data(self, levels=float('inf')): + def get_tree_data(self, levels=float("inf")): """ Returns `levels`-deep tree information starting at current node. Args: @@ -1484,7 +1808,9 @@ def get_tree_data(self, levels=float('inf')): } children = self.children.all() if levels > 0: - node_data["children"] = [c.get_tree_data(levels=levels - 1) for c in children] + node_data["children"] = [ + c.get_tree_data(levels=levels - 1) for c in children + ] return node_data if self.kind_id == content_kinds.EXERCISE: return { @@ -1497,7 +1823,9 @@ def get_tree_data(self, levels=float('inf')): return { "title": self.title, "kind": self.kind_id, - "file_size": self.files.values('file_size').aggregate(size=Sum('file_size'))['size'], + "file_size": self.files.values("file_size").aggregate( + size=Sum("file_size") + )["size"], "node_id": self.node_id, "studio_id": self.id, } @@ -1505,9 +1833,20 @@ def get_tree_data(self, levels=float('inf')): def get_original_node(self): original_node = self.original_node or self if self.original_channel_id and self.original_source_node_id: - original_tree_id = Channel.objects.select_related("main_tree").get(pk=self.original_channel_id).main_tree.tree_id - original_node = ContentNode.objects.filter(tree_id=original_tree_id, node_id=self.original_source_node_id).first() or \ - ContentNode.objects.filter(tree_id=original_tree_id, content_id=self.content_id).first() or self + original_tree_id = ( + Channel.objects.select_related("main_tree") + .get(pk=self.original_channel_id) + .main_tree.tree_id + ) + original_node = ( + ContentNode.objects.filter( + tree_id=original_tree_id, node_id=self.original_source_node_id + ).first() + or ContentNode.objects.filter( + tree_id=original_tree_id, content_id=self.content_id + ).first() + or self + ) return original_node def get_associated_presets(self): @@ -1552,7 +1891,13 @@ def get_channel(self): root = self.get_root() if not root: return None - return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(previous_tree=root)).first() + return Channel.objects.filter( + Q(main_tree=root) + | Q(chef_tree=root) + | Q(trash_tree=root) + | Q(staging_tree=root) + | Q(previous_tree=root) + ).first() except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError): return None @@ -1594,10 +1939,7 @@ def get_details(self, channel=None): node = ContentNode.objects.filter(pk=self.id, tree_id=self.tree_id).order_by() - descendants = ( - self.get_descendants() - .values("id") - ) + descendants = self.get_descendants().values("id") # Get resources resources = descendants.exclude(kind=content_kinds.TOPIC).order_by() @@ -1859,21 +2201,24 @@ def get_details(self, channel=None): def has_changes(self): mptt_opts = self._mptt_meta # Ignore fields that are used for dirty tracking, and also mptt fields, as changes to these are tracked in mptt manager methods. - blacklist = set([ - 'changed', - 'modified', - 'publishing', - mptt_opts.tree_id_attr, - mptt_opts.left_attr, - mptt_opts.right_attr, - mptt_opts.level_attr, - ]) + blacklist = set( + [ + "changed", + "modified", + "publishing", + mptt_opts.tree_id_attr, + mptt_opts.left_attr, + mptt_opts.right_attr, + mptt_opts.level_attr, + ] + ) original_values = self._field_updates.changed() return any((True for field in original_values if field not in blacklist)) def recalculate_editors_storage(self): from contentcuration.utils.user import calculate_user_storage - for editor in self.files.values_list('uploaded_by_id', flat=True).distinct(): + + for editor in self.files.values_list("uploaded_by_id", flat=True).distinct(): calculate_user_storage(editor) def mark_complete(self): # noqa C901 @@ -1886,37 +2231,56 @@ def mark_complete(self): # noqa C901 errors.append("Missing license") if self.license and self.license.is_custom and not self.license_description: errors.append("Missing license description for custom license") - if self.license and self.license.copyright_holder_required and not self.copyright_holder: + if ( + self.license + and self.license.copyright_holder_required + and not self.copyright_holder + ): errors.append("Missing required copyright holder") - if self.kind_id != content_kinds.EXERCISE and not self.files.filter(preset__supplementary=False).exists(): + if ( + self.kind_id != content_kinds.EXERCISE + and not self.files.filter(preset__supplementary=False).exists() + ): errors.append("Missing default file") if self.kind_id == content_kinds.EXERCISE: # Check to see if the exercise has at least one assessment item that has: if not self.assessment_items.filter( # Item with non-blank raw data - ~Q(raw_data="") | ( + ~Q(raw_data="") + | ( # A non-blank question - ~Q(question='') + ~Q(question="") # Non-blank answers - & ~Q(answers='[]') + & ~Q(answers="[]") # With either an input question or one answer marked as correct - & (Q(type=exercises.INPUT_QUESTION) | Q(answers__iregex=r'"correct":\s*true')) + & ( + Q(type=exercises.INPUT_QUESTION) + | Q(answers__iregex=r'"correct":\s*true') + ) ) ).exists(): - errors.append("No questions with question text and complete answers") + errors.append( + "No questions with question text and complete answers" + ) # Check that it has a mastery model set # Either check for the previous location for the mastery model, or rely on our completion criteria validation # that if it has been set, then it has been set correctly. - criterion = self.extra_fields.get("options", {}).get("completion_criteria") + criterion = self.extra_fields.get("options", {}).get( + "completion_criteria" + ) if not (self.extra_fields.get("mastery_model") or criterion): errors.append("Missing mastery criterion") if criterion: try: - completion_criteria.validate(criterion, kind=content_kinds.EXERCISE) + completion_criteria.validate( + criterion, kind=content_kinds.EXERCISE + ) except completion_criteria.ValidationError: errors.append("Mastery criterion is defined but is invalid") else: - criterion = self.extra_fields and self.extra_fields.get("options", {}).get("completion_criteria", {}) + criterion = self.extra_fields and self.extra_fields.get( + "options", {} + ).get("completion_criteria", {}) if criterion: try: completion_criteria.validate(criterion, kind=self.kind_id) @@ -1930,8 +2294,13 @@ def make_content_id_unique(self): If self is NOT an original contentnode (in other words, a copied contentnode) and a contentnode with same content_id exists then we update self's content_id. """ - is_node_original = self.original_source_node_id is None or self.original_source_node_id == self.node_id - node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter(content_id=self.content_id) + is_node_original = ( + self.original_source_node_id is None + or self.original_source_node_id == self.node_id + ) + node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter( + content_id=self.content_id + ) if (not is_node_original) and node_same_content_id.exists(): ContentNode.objects.filter(pk=self.pk).update(content_id=uuid.uuid4().hex) @@ -1958,9 +2327,7 @@ def move_to(self, target, *args, **kwargs): def set_default_learning_activity(self): if self.learning_activities is None: if self.kind in kind_activity_map: - self.learning_activities = { - kind_activity_map[self.kind]: True - } + self.learning_activities = {kind_activity_map[self.kind]: True} def save(self, skip_lock=False, *args, **kwargs): if self._state.adding: @@ -1988,15 +2355,21 @@ def save(self, skip_lock=False, *args, **kwargs): same_order = old_parent_id == self.parent_id if not same_order: - changed_ids = list(filter(lambda x: x is not None, set([old_parent_id, self.parent_id]))) + changed_ids = list( + filter(lambda x: x is not None, set([old_parent_id, self.parent_id])) + ) else: changed_ids = [] if not same_order and not skip_lock: # Lock the mptt fields for the trees of the old and new parent - with ContentNode.objects.lock_mptt(*ContentNode.objects - .filter(id__in=[pid for pid in [old_parent_id, self.parent_id] if pid]) - .values_list('tree_id', flat=True).distinct()): + with ContentNode.objects.lock_mptt( + *ContentNode.objects.filter( + id__in=[pid for pid in [old_parent_id, self.parent_id] if pid] + ) + .values_list("tree_id", flat=True) + .distinct() + ): super(ContentNode, self).save(*args, **kwargs) # Always write to the database for the parent change updates, as we have # no persistent object references for the original and new parent to modify @@ -2013,7 +2386,7 @@ def save(self, skip_lock=False, *args, **kwargs): save.alters_data = True def delete(self, *args, **kwargs): - parent = self.parent or self._field_updates.changed().get('parent') + parent = self.parent or self._field_updates.changed().get("parent") if parent: parent.changed = True parent.save() @@ -2036,15 +2409,30 @@ def copy_to( excluded_descendants=None, can_edit_source_channel=None, batch_size=None, - progress_tracker=None + progress_tracker=None, ): - return self._tree_manager.copy_node(self, target, position, pk, mods, excluded_descendants, can_edit_source_channel, batch_size, progress_tracker)[0] + return self._tree_manager.copy_node( + self, + target, + position, + pk, + mods, + excluded_descendants, + can_edit_source_channel, + batch_size, + progress_tracker, + )[0] def copy(self): return self.copy_to() def is_publishable(self): - return self.complete and self.get_descendants(include_self=True).exclude(kind_id=content_kinds.TOPIC).exists() + return ( + self.complete + and self.get_descendants(include_self=True) + .exclude(kind_id=content_kinds.TOPIC) + .exists() + ) class Meta: verbose_name = "Topic" @@ -2058,14 +2446,18 @@ class Meta: class ContentKind(models.Model): - kind = models.CharField(primary_key=True, max_length=200, choices=content_kinds.choices) + kind = models.CharField( + primary_key=True, max_length=200, choices=content_kinds.choices + ) def __str__(self): return self.kind class FileFormat(models.Model): - extension = models.CharField(primary_key=True, max_length=40, choices=file_formats.choices) + extension = models.CharField( + primary_key=True, max_length=40, choices=file_formats.choices + ) mimetype = models.CharField(max_length=200, blank=True) def __str__(self): @@ -2073,7 +2465,9 @@ def __str__(self): class FormatPreset(models.Model): - id = models.CharField(primary_key=True, max_length=150, choices=format_presets.choices) + id = models.CharField( + primary_key=True, max_length=150, choices=format_presets.choices + ) readable_name = models.CharField(max_length=400) multi_language = models.BooleanField(default=False) supplementary = models.BooleanField(default=False) @@ -2081,7 +2475,9 @@ class FormatPreset(models.Model): subtitle = models.BooleanField(default=False) display = models.BooleanField(default=True) # Render on client side order = models.IntegerField(default=0) - kind = models.ForeignKey(ContentKind, related_name='format_presets', null=True, on_delete=models.SET_NULL) + kind = models.ForeignKey( + ContentKind, related_name="format_presets", null=True, on_delete=models.SET_NULL + ) allowed_formats = models.ManyToManyField(FileFormat, blank=True) def __str__(self): @@ -2097,10 +2493,7 @@ def guess_format_preset(cls, filename): _, ext = os.path.splitext(filename) ext = ext.lstrip(".") - f = FormatPreset.objects.filter( - allowed_formats__extension=ext, - display=True - ) + f = FormatPreset.objects.filter(allowed_formats__extension=ext, display=True) return f.first() @classmethod @@ -2122,11 +2515,18 @@ class Language(models.Model): lang_subcode = models.CharField(max_length=10, db_index=True, blank=True, null=True) readable_name = models.CharField(max_length=100, blank=True) native_name = models.CharField(max_length=100, blank=True) - lang_direction = models.CharField(max_length=3, choices=languages.LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0]) + lang_direction = models.CharField( + max_length=3, + choices=languages.LANGUAGE_DIRECTIONS, + default=languages.LANGUAGE_DIRECTIONS[0][0], + ) def ietf_name(self): - return "{code}-{subcode}".format(code=self.lang_code, - subcode=self.lang_subcode) if self.lang_subcode else self.lang_code + return ( + "{code}-{subcode}".format(code=self.lang_code, subcode=self.lang_subcode) + if self.lang_subcode + else self.lang_code + ) def __str__(self): return self.ietf_name() @@ -2136,13 +2536,23 @@ def __str__(self): class AssessmentItem(models.Model): - type = models.CharField(max_length=50, choices=exercises.question_choices, default=exercises.MULTIPLE_SELECTION) + type = models.CharField( + max_length=50, + choices=exercises.question_choices, + default=exercises.MULTIPLE_SELECTION, + ) question = models.TextField(blank=True) hints = models.TextField(default="[]") answers = models.TextField(default="[]") order = models.IntegerField(default=1) - contentnode = models.ForeignKey('ContentNode', related_name="assessment_items", blank=True, null=True, - db_index=True, on_delete=models.CASCADE) + contentnode = models.ForeignKey( + "ContentNode", + related_name="assessment_items", + blank=True, + null=True, + db_index=True, + on_delete=models.CASCADE, + ) # Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta assessment_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False) raw_data = models.TextField(blank=True) @@ -2162,7 +2572,7 @@ class Meta: models.Index(fields=["assessment_id"], name=ASSESSMENT_ID_INDEX_NAME), ] - unique_together = ['contentnode', 'assessment_id'] + unique_together = ["contentnode", "assessment_id"] _permission_filter = Q(tree_id=OuterRef("contentnode__tree_id")) @@ -2197,14 +2607,20 @@ def filter_view_queryset(cls, queryset, user): ) if not user_id: - return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True) + return queryset.annotate( + edit=boolean_val(False), view=boolean_val(False) + ).filter(public=True) edit_cte = PermissionCTE.editable_channels(user_id) view_cte = PermissionCTE.view_only_channels(user_id) - queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate( - edit=edit_cte.exists(cls._permission_filter), - view=view_cte.exists(cls._permission_filter), + queryset = ( + queryset.with_cte(edit_cte) + .with_cte(view_cte) + .annotate( + edit=edit_cte.exists(cls._permission_filter), + view=view_cte.exists(cls._permission_filter), + ) ) if user.is_admin: @@ -2236,8 +2652,14 @@ def delete(self, *args, **kwargs): class SlideshowSlide(models.Model): - contentnode = models.ForeignKey('ContentNode', related_name="slideshow_slides", blank=True, null=True, - db_index=True, on_delete=models.CASCADE) + contentnode = models.ForeignKey( + "ContentNode", + related_name="slideshow_slides", + blank=True, + null=True, + db_index=True, + on_delete=models.CASCADE, + ) sort_order = models.FloatField(default=1.0) metadata = JSONField(default=dict) @@ -2246,9 +2668,16 @@ class StagedFile(models.Model): """ Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit """ + checksum = models.CharField(max_length=400, blank=True, db_index=True) file_size = models.IntegerField(blank=True, null=True) - uploaded_by = models.ForeignKey(User, related_name='staged_files', blank=True, null=True, on_delete=models.CASCADE) + uploaded_by = models.ForeignKey( + User, + related_name="staged_files", + blank=True, + null=True, + on_delete=models.CASCADE, + ) FILE_DISTINCT_INDEX_NAME = "file_checksum_file_size_idx" @@ -2268,27 +2697,73 @@ class File(models.Model): The bottom layer of the contentDB schema, defines the basic building brick for content. Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3... """ + id = UUIDField(primary_key=True, default=uuid.uuid4) checksum = models.CharField(max_length=400, blank=True, db_index=True) file_size = models.IntegerField(blank=True, null=True) - file_on_disk = models.FileField(upload_to=object_storage_name, storage=default_storage, max_length=500, - blank=True) - contentnode = models.ForeignKey(ContentNode, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE) - assessment_item = models.ForeignKey(AssessmentItem, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE) - slideshow_slide = models.ForeignKey(SlideshowSlide, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE) - file_format = models.ForeignKey(FileFormat, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL) - preset = models.ForeignKey(FormatPreset, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL) - language = models.ForeignKey(Language, related_name='files', blank=True, null=True, on_delete=models.SET_NULL) + file_on_disk = models.FileField( + upload_to=object_storage_name, + storage=default_storage, + max_length=500, + blank=True, + ) + contentnode = models.ForeignKey( + ContentNode, + related_name="files", + blank=True, + null=True, + db_index=True, + on_delete=models.CASCADE, + ) + assessment_item = models.ForeignKey( + AssessmentItem, + related_name="files", + blank=True, + null=True, + db_index=True, + on_delete=models.CASCADE, + ) + slideshow_slide = models.ForeignKey( + SlideshowSlide, + related_name="files", + blank=True, + null=True, + db_index=True, + on_delete=models.CASCADE, + ) + file_format = models.ForeignKey( + FileFormat, + related_name="files", + blank=True, + null=True, + db_index=True, + on_delete=models.SET_NULL, + ) + preset = models.ForeignKey( + FormatPreset, + related_name="files", + blank=True, + null=True, + db_index=True, + on_delete=models.SET_NULL, + ) + language = models.ForeignKey( + Language, related_name="files", blank=True, null=True, on_delete=models.SET_NULL + ) original_filename = models.CharField(max_length=255, blank=True) source_url = models.CharField(max_length=400, blank=True, null=True) - uploaded_by = models.ForeignKey(User, related_name='files', blank=True, null=True, on_delete=models.SET_NULL) + uploaded_by = models.ForeignKey( + User, related_name="files", blank=True, null=True, on_delete=models.SET_NULL + ) modified = models.DateTimeField(auto_now=True, verbose_name="modified", null=True) duration = models.IntegerField(blank=True, null=True) objects = CustomManager() - _permission_filter = Q(tree_id=OuterRef("contentnode__tree_id")) | Q(tree_id=OuterRef("assessment_item__contentnode__tree_id")) + _permission_filter = Q(tree_id=OuterRef("contentnode__tree_id")) | Q( + tree_id=OuterRef("assessment_item__contentnode__tree_id") + ) @classmethod def filter_edit_queryset(cls, queryset, user): @@ -2298,13 +2773,18 @@ def filter_edit_queryset(cls, queryset, user): return queryset.none() cte = PermissionCTE.editable_channels(user_id) - queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls._permission_filter)) + queryset = queryset.with_cte(cte).annotate( + edit=cte.exists(cls._permission_filter) + ) if user.is_admin: return queryset return queryset.filter( - Q(edit=True) | Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True) + Q(edit=True) + | Q( + uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True + ) ) @classmethod @@ -2313,22 +2793,34 @@ def filter_view_queryset(cls, queryset, user): queryset = queryset.annotate( public=Exists( - Channel.objects.filter(public=True).filter( + Channel.objects.filter(public=True) + .filter( Q(main_tree__tree_id=OuterRef("contentnode__tree_id")) - | Q(main_tree__tree_id=OuterRef("assessment_item__contentnode__tree_id")) - ).values("pk") + | Q( + main_tree__tree_id=OuterRef( + "assessment_item__contentnode__tree_id" + ) + ) + ) + .values("pk") ), ) if not user_id: - return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True) + return queryset.annotate( + edit=boolean_val(False), view=boolean_val(False) + ).filter(public=True) edit_cte = PermissionCTE.editable_channels(user_id) view_cte = PermissionCTE.view_only_channels(user_id) - queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate( - edit=edit_cte.exists(cls._permission_filter), - view=view_cte.exists(cls._permission_filter), + queryset = ( + queryset.with_cte(edit_cte) + .with_cte(view_cte) + .annotate( + edit=edit_cte.exists(cls._permission_filter), + view=view_cte.exists(cls._permission_filter), + ) ) if user.is_admin: @@ -2338,14 +2830,18 @@ def filter_view_queryset(cls, queryset, user): Q(view=True) | Q(edit=True) | Q(public=True) - | Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True) + | Q( + uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True + ) ) class Admin: pass def __str__(self): - return '{checksum}{extension}'.format(checksum=self.checksum, extension='.' + self.file_format.extension) + return "{checksum}{extension}".format( + checksum=self.checksum, extension="." + self.file_format.extension + ) def filename(self): """ @@ -2385,7 +2881,9 @@ def save(self, set_by_file_on_disk=True, *args, **kwargs): if self.file_format_id not in dict(file_formats.choices): raise ValidationError("Invalid file_format") - if set_by_file_on_disk and self.file_on_disk: # if file_on_disk is supplied, hash out the file + if ( + set_by_file_on_disk and self.file_on_disk + ): # if file_on_disk is supplied, hash out the file if self.checksum is None or self.checksum == "": md5 = hashlib.md5() for chunk in self.file_on_disk.chunks(): @@ -2395,11 +2893,13 @@ def save(self, set_by_file_on_disk=True, *args, **kwargs): if not self.file_size: self.file_size = self.file_on_disk.size if not self.file_format_id: - ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.') + ext = os.path.splitext(self.file_on_disk.name)[1].lstrip(".") if ext in list(dict(file_formats.choices).keys()): self.file_format_id = ext else: - raise ValueError("Files of type `{}` are not supported.".format(ext)) + raise ValueError( + "Files of type `{}` are not supported.".format(ext) + ) super(File, self).save(*args, **kwargs) @@ -2408,15 +2908,20 @@ def save(self, set_by_file_on_disk=True, *args, **kwargs): class Meta: indexes = [ - models.Index(fields=['checksum', 'file_size'], name=FILE_DISTINCT_INDEX_NAME), + models.Index( + fields=["checksum", "file_size"], name=FILE_DISTINCT_INDEX_NAME + ), models.Index(fields=["-modified"], name=FILE_MODIFIED_DESC_INDEX_NAME), ] constraints = [ # enforces that duration is null when not a media preset, but the duration may be null for media presets # but if not-null, should be greater than 0 models.CheckConstraint( - check=(Q(preset__in=MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True)), - name=FILE_DURATION_CONSTRAINT + check=( + Q(preset__in=MEDIA_PRESETS, duration__gt=0) + | Q(duration__isnull=True) + ), + name=FILE_DURATION_CONSTRAINT, ) ] @@ -2430,13 +2935,17 @@ def auto_delete_file_on_delete(sender, instance, **kwargs): """ # Recalculate storage from contentcuration.utils.user import calculate_user_storage + if instance.uploaded_by_id: calculate_user_storage(instance.uploaded_by_id) def delete_empty_file_reference(checksum, extension): - filename = checksum + '.' + extension - if not File.objects.filter(checksum=checksum).exists() and not Channel.objects.filter(thumbnail=filename).exists(): + filename = checksum + "." + extension + if ( + not File.objects.filter(checksum=checksum).exists() + and not Channel.objects.filter(thumbnail=filename).exists() + ): storage_path = generate_object_storage_name(checksum, filename) if default_storage.exists(storage_path): default_storage.delete(storage_path) @@ -2446,22 +2955,33 @@ class PrerequisiteContentRelationship(models.Model): """ Predefine the prerequisite relationship between two ContentNode objects. """ - target_node = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE) - prerequisite = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE) + + target_node = models.ForeignKey( + ContentNode, + related_name="%(app_label)s_%(class)s_target_node", + on_delete=models.CASCADE, + ) + prerequisite = models.ForeignKey( + ContentNode, + related_name="%(app_label)s_%(class)s_prerequisite", + on_delete=models.CASCADE, + ) class Meta: - unique_together = ['target_node', 'prerequisite'] + unique_together = ["target_node", "prerequisite"] def clean(self, *args, **kwargs): # self reference exception if self.target_node == self.prerequisite: - raise IntegrityError('Cannot self reference as prerequisite.') + raise IntegrityError("Cannot self reference as prerequisite.") # immediate cyclic exception - if PrerequisiteContentRelationship.objects.using(self._state.db) \ - .filter(target_node=self.prerequisite, prerequisite=self.target_node): + if PrerequisiteContentRelationship.objects.using(self._state.db).filter( + target_node=self.prerequisite, prerequisite=self.target_node + ): raise IntegrityError( - 'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!' - % (self.target_node, self.prerequisite)) + "Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!" + % (self.target_node, self.prerequisite) + ) # distant cyclic exception # elif # raise Exception('Note: Prerequisite relationship is acyclic! %s and %s forms a closed loop!' % ( @@ -2474,41 +2994,60 @@ def save(self, *args, **kwargs): super(PrerequisiteContentRelationship, self).save(*args, **kwargs) def __unicode__(self): - return u'%s' % (self.pk) + return "%s" % (self.pk) class RelatedContentRelationship(models.Model): """ Predefine the related relationship between two ContentNode objects. """ - contentnode_1 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_1', on_delete=models.CASCADE) - contentnode_2 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_2', on_delete=models.CASCADE) + + contentnode_1 = models.ForeignKey( + ContentNode, related_name="%(app_label)s_%(class)s_1", on_delete=models.CASCADE + ) + contentnode_2 = models.ForeignKey( + ContentNode, related_name="%(app_label)s_%(class)s_2", on_delete=models.CASCADE + ) class Meta: - unique_together = ['contentnode_1', 'contentnode_2'] + unique_together = ["contentnode_1", "contentnode_2"] def save(self, *args, **kwargs): # self reference exception if self.contentnode_1 == self.contentnode_2: - raise IntegrityError('Cannot self reference as related.') + raise IntegrityError("Cannot self reference as related.") # handle immediate cyclic - if RelatedContentRelationship.objects.using(self._state.db) \ - .filter(contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1): + if RelatedContentRelationship.objects.using(self._state.db).filter( + contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1 + ): return # silently cancel the save super(RelatedContentRelationship, self).save(*args, **kwargs) class Invitation(models.Model): """ Invitation to edit channel """ + id = UUIDField(primary_key=True, default=uuid.uuid4) accepted = models.BooleanField(default=False) declined = models.BooleanField(default=False) revoked = models.BooleanField(default=False) - invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, related_name='sent_to') + invited = models.ForeignKey( + settings.AUTH_USER_MODEL, + on_delete=models.SET_NULL, + null=True, + related_name="sent_to", + ) share_mode = models.CharField(max_length=50, default=EDIT_ACCESS) email = models.EmailField(max_length=100, null=True) - sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='sent_by', null=True, on_delete=models.CASCADE) - channel = models.ForeignKey('Channel', null=True, related_name='pending_editors', on_delete=models.CASCADE) + sender = models.ForeignKey( + settings.AUTH_USER_MODEL, + related_name="sent_by", + null=True, + on_delete=models.CASCADE, + ) + channel = models.ForeignKey( + "Channel", null=True, related_name="pending_editors", on_delete=models.CASCADE + ) first_name = models.CharField(max_length=100, blank=True) last_name = models.CharField(max_length=100, blank=True, null=True) @@ -2536,9 +3075,7 @@ def filter_edit_queryset(cls, queryset, user): return queryset return queryset.filter( - Q(email__iexact=user.email) - | Q(sender=user) - | Q(channel__editors=user) + Q(email__iexact=user.email) | Q(sender=user) | Q(channel__editors=user) ).distinct() @classmethod @@ -2562,22 +3099,38 @@ class Change(models.Model): # so that we can validate they have permissions to do so # allow to be null so that we don't lose changes if a user # account is hard deleted. - created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.SET_NULL, related_name="changes_by_user") + created_by = models.ForeignKey( + settings.AUTH_USER_MODEL, + null=True, + blank=True, + on_delete=models.SET_NULL, + related_name="changes_by_user", + ) # Almost all changes are related to channels, but some are specific only to users # so we allow this to be nullable for these edge cases. # Indexed by default because it's a ForeignKey field. - channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=models.CASCADE) + channel = models.ForeignKey( + Channel, null=True, blank=True, on_delete=models.CASCADE + ) # For those changes related to users, store a user value instead of channel # this may be different to created_by, as changes to invitations affect individual users. # Indexed by default because it's a ForeignKey field. - user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.CASCADE, related_name="changes_about_user") + user = models.ForeignKey( + settings.AUTH_USER_MODEL, + null=True, + blank=True, + on_delete=models.CASCADE, + related_name="changes_about_user", + ) # Use client_rev to keep track of changes coming from the client side # but let it be blank or null for changes we generate on the server side client_rev = models.IntegerField(null=True, blank=True) # client_rev numbers are by session, we add the session key here for bookkeeping # to allow a check within the same session to return whether a change has been applied # or not, and hence remove it from the frontend - session = models.ForeignKey(Session, null=True, blank=True, on_delete=models.SET_NULL) + session = models.ForeignKey( + Session, null=True, blank=True, on_delete=models.SET_NULL + ) table = models.CharField(max_length=32) change_type = models.IntegerField() # Use the DRF JSONEncoder class as the encoder here @@ -2612,12 +3165,20 @@ def _create_from_change( ): change_type = data.pop("type") if table is None or table not in ALL_TABLES: - raise TypeError("table is a required argument for creating changes and must be a valid table name") + raise TypeError( + "table is a required argument for creating changes and must be a valid table name" + ) if change_type is None or change_type not in ALL_CHANGES: - raise TypeError("change_type is a required argument for creating changes and must be a valid change type integer") + raise TypeError( + "change_type is a required argument for creating changes and must be a valid change type integer" + ) # Don't let someone mark a change as unpublishable if it's not in the list of tables that make changes that we can publish # also, by definition, publishing is not a publishable change - this probably doesn't matter, but making sense is nice. - unpublishable = unpublishable or table not in PUBLISHABLE_CHANGE_TABLES or change_type == PUBLISHED + unpublishable = ( + unpublishable + or table not in PUBLISHABLE_CHANGE_TABLES + or change_type == PUBLISHED + ) return cls( session_id=session_key, created_by_id=created_by_id, @@ -2632,7 +3193,14 @@ def _create_from_change( ) @classmethod - def create_changes(cls, changes, created_by_id=None, session_key=None, applied=False, unpublishable=False): + def create_changes( + cls, + changes, + created_by_id=None, + session_key=None, + applied=False, + unpublishable=False, + ): change_models = [] for change in changes: change_models.append( @@ -2641,7 +3209,7 @@ def create_changes(cls, changes, created_by_id=None, session_key=None, applied=F session_key=session_key, applied=applied, unpublishable=unpublishable, - **change + **change, ) ) @@ -2649,22 +3217,37 @@ def create_changes(cls, changes, created_by_id=None, session_key=None, applied=F return change_models @classmethod - def create_change(cls, change, created_by_id=None, session_key=None, applied=False, unpublishable=False): - obj = cls._create_from_change(created_by_id=created_by_id, session_key=session_key, applied=applied, unpublishable=unpublishable, **change) + def create_change( + cls, + change, + created_by_id=None, + session_key=None, + applied=False, + unpublishable=False, + ): + obj = cls._create_from_change( + created_by_id=created_by_id, + session_key=session_key, + applied=applied, + unpublishable=unpublishable, + **change, + ) obj.save() return obj @classmethod def serialize(cls, change): datum = get_attribute(change, ["kwargs"]).copy() - datum.update({ - "server_rev": get_attribute(change, ["server_rev"]), - "table": get_attribute(change, ["table"]), - "type": get_attribute(change, ["change_type"]), - "channel_id": get_attribute(change, ["channel_id"]), - "user_id": get_attribute(change, ["user_id"]), - "created_by_id": get_attribute(change, ["created_by_id"]) - }) + datum.update( + { + "server_rev": get_attribute(change, ["server_rev"]), + "table": get_attribute(change, ["table"]), + "type": get_attribute(change, ["change_type"]), + "channel_id": get_attribute(change, ["channel_id"]), + "user_id": get_attribute(change, ["user_id"]), + "created_by_id": get_attribute(change, ["created_by_id"]), + } + ) return datum def serialize_to_change_dict(self): @@ -2677,22 +3260,31 @@ class CustomTaskMetadata(models.Model): max_length=255, unique=True, ) - user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="tasks", on_delete=models.CASCADE, null=True) + user = models.ForeignKey( + settings.AUTH_USER_MODEL, + related_name="tasks", + on_delete=models.CASCADE, + null=True, + ) channel_id = DjangoUUIDField(db_index=True, null=True, blank=True) - progress = models.IntegerField(null=True, blank=True, validators=[MinValueValidator(0), MaxValueValidator(100)]) + progress = models.IntegerField( + null=True, blank=True, validators=[MinValueValidator(0), MaxValueValidator(100)] + ) # A hash of the task name and kwargs for identifying repeat tasks signature = models.CharField(null=True, blank=False, max_length=32) date_created = models.DateTimeField( auto_now_add=True, - verbose_name=_('Created DateTime'), - help_text=_('Datetime field when the custom_metadata for task was created in UTC') + verbose_name=_("Created DateTime"), + help_text=_( + "Datetime field when the custom_metadata for task was created in UTC" + ), ) class Meta: indexes = [ models.Index( - fields=['signature'], - name='task_result_signature', + fields=["signature"], + name="task_result_signature", ), ] @@ -2738,7 +3330,9 @@ class Meta: class BaseFeedbackInteractionEvent(models.Model): - feedback_type = models.CharField(max_length=50, choices=feedback.FEEDBACK_TYPE_CHOICES) + feedback_type = models.CharField( + max_length=50, choices=feedback.FEEDBACK_TYPE_CHOICES + ) feedback_reason = models.TextField(max_length=1500) class Meta: diff --git a/contentcuration/contentcuration/node_metadata/cte.py b/contentcuration/contentcuration/node_metadata/cte.py index 36b0241635..29154710ac 100644 --- a/contentcuration/contentcuration/node_metadata/cte.py +++ b/contentcuration/contentcuration/node_metadata/cte.py @@ -34,10 +34,10 @@ def get(self): return self.cte def build(self): - raise NotImplementedError('Build method must create CTE') + raise NotImplementedError("Build method must create CTE") def join(self, query): - raise NotImplementedError('Join method must join query with CTE') + raise NotImplementedError("Join method must join query with CTE") @property def col(self): @@ -47,17 +47,19 @@ def col(self): class LeftContentCTE(MetadataCTE): def join(self, query): cte = self.get() - return cte.join(query, content_id=cte.col.content_id, _join_type=LOUTER).with_cte(cte) + return cte.join( + query, content_id=cte.col.content_id, _join_type=LOUTER + ).with_cte(cte) class TreeMetadataCTE(MetadataCTE): - columns = ['tree_id'] + columns = ["tree_id"] def build(self): - tree_ids = self.query.values('tree_id') + tree_ids = self.query.values("tree_id") return With( ContentNode.objects.filter(tree_id__in=tree_ids).values(*set(self.columns)), - name='tree_cte' + name="tree_cte", ) def join(self, query): @@ -66,13 +68,14 @@ def join(self, query): class AssessmentCountCTE(LeftContentCTE): - columns = ['content_id'] + columns = ["content_id"] def build(self): - q = self.query.filter(kind_id=content_kinds.EXERCISE, assessment_items__deleted=False)\ - .annotate(assessment_count=Count(F('assessment_items__id'), distinct=True)) + q = self.query.filter( + kind_id=content_kinds.EXERCISE, assessment_items__deleted=False + ).annotate(assessment_count=Count(F("assessment_items__id"), distinct=True)) - return With(q.values(*set(self.columns)), name='assessment_count_cte') + return With(q.values(*set(self.columns)), name="assessment_count_cte") class FileMetadataCTE(LeftContentCTE): @@ -83,19 +86,22 @@ def build(self): columns = set(self.columns) files = nodes.values( - 'content_id', - **{column: F('files__{}'.format(column)) for column in columns} + "content_id", + **{column: F("files__{}".format(column)) for column in columns} ).distinct() assessment_files = nodes.values( - 'content_id', - **{column: F('assessment_items__files__{}'.format(column)) for column in columns} + "content_id", + **{ + column: F("assessment_items__files__{}".format(column)) + for column in columns + } ).distinct() - return With(files.union(assessment_files).values(*columns), name='file_cte') + return With(files.union(assessment_files).values(*columns), name="file_cte") class ResourceSizeCTE(LeftContentCTE): - columns = ['content_id'] + columns = ["content_id"] def build(self): """ @@ -103,20 +109,24 @@ def build(self): file records would produce incorrect result for resource sizes due to summing. """ files_cte = FileMetadataCTE(self.query) - files_cte.add_columns(('file_size', 'checksum')) + files_cte.add_columns(("file_size", "checksum")) - resource_condition = BooleanComparison(F('kind_id'), '!=', Value(content_kinds.TOPIC)) + resource_condition = BooleanComparison( + F("kind_id"), "!=", Value(content_kinds.TOPIC) + ) - q = files_cte.join(self.query).annotate(resource_size=Sum( - Case( - # aggregate file_size when selected node is not a topic - When( - condition=WhenQ(resource_condition), - then=Coalesce(files_cte.col.file_size, Value(0)), + q = files_cte.join(self.query).annotate( + resource_size=Sum( + Case( + # aggregate file_size when selected node is not a topic + When( + condition=WhenQ(resource_condition), + then=Coalesce(files_cte.col.file_size, Value(0)), + ), + default=Value(0), ), - default=Value(0) - ), - output_field=IntegerField() - )) + output_field=IntegerField(), + ) + ) - return With(q.values(*set(self.columns)), name='resource_size_cte') + return With(q.values(*set(self.columns)), name="resource_size_cte") diff --git a/contentcuration/contentcuration/node_metadata/query.py b/contentcuration/contentcuration/node_metadata/query.py index eb544a1658..221e80f341 100644 --- a/contentcuration/contentcuration/node_metadata/query.py +++ b/contentcuration/contentcuration/node_metadata/query.py @@ -50,7 +50,9 @@ def get(self, node_pk): :return: A dict of metadata for the node identified by `node_pk` """ if self.query is None: - return Metadata(ContentNode.filter_by_pk(pk=node_pk), **self.annotations).get(node_pk) + return Metadata( + ContentNode.filter_by_pk(pk=node_pk), **self.annotations + ).get(node_pk) if self.metadata is None: self.metadata = {} @@ -58,7 +60,7 @@ def get(self, node_pk): # Finally, clear ordering (MPTT adds ordering by default) for row in query: - self.metadata.update({row.pop('id'): row}) + self.metadata.update({row.pop("id"): row}) return self.metadata.get(node_pk) @@ -67,7 +69,7 @@ def build(self): :return: A complete queryset to return the metadata """ if len(self.annotations) == 0: - raise ValueError('No metadata to retrieve') + raise ValueError("No metadata to retrieve") ctes = [] @@ -90,19 +92,26 @@ def build(self): if len(ctes) > 0: for cte in ctes: query = cte.join(query) - annotations.update({ - field_name: annotation.get_annotation(cte) - for field_name, annotation in self.annotations.items() - if isinstance(annotation, MetadataAnnotation) - and annotation.cte and isinstance(cte, annotation.cte) - }) - - annotations.update(**{ - field_name: annotation.get_annotation(None) - if isinstance(annotation, MetadataAnnotation) else annotation - for field_name, annotation in self.annotations.items() - if not isinstance(annotation, MetadataAnnotation) or annotation.cte is None - }) + annotations.update( + { + field_name: annotation.get_annotation(cte) + for field_name, annotation in self.annotations.items() + if isinstance(annotation, MetadataAnnotation) + and annotation.cte + and isinstance(cte, annotation.cte) + } + ) + + annotations.update( + **{ + field_name: annotation.get_annotation(None) + if isinstance(annotation, MetadataAnnotation) + else annotation + for field_name, annotation in self.annotations.items() + if not isinstance(annotation, MetadataAnnotation) + or annotation.cte is None + } + ) # Finally, clear ordering (MPTT adds ordering by default) - return query.values('id').annotate(**annotations).order_by() + return query.values("id").annotate(**annotations).order_by() diff --git a/contentcuration/contentcuration/not_production_settings.py b/contentcuration/contentcuration/not_production_settings.py index 8d3bcd3e12..afcc6460bc 100644 --- a/contentcuration/contentcuration/not_production_settings.py +++ b/contentcuration/contentcuration/not_production_settings.py @@ -3,8 +3,8 @@ ALLOWED_HOSTS = ["studio.local", "192.168.31.9", "127.0.0.1", "*"] ACCOUNT_ACTIVATION_DAYS = 7 -EMAIL_BACKEND = 'postmark.django_backend.EmailBackend' -POSTMARK_API_KEY = 'POSTMARK_API_TEST' +EMAIL_BACKEND = "postmark.django_backend.EmailBackend" +POSTMARK_API_KEY = "POSTMARK_API_TEST" POSTMARK_TEST_MODE = True SITE_ID = 2 diff --git a/contentcuration/contentcuration/perftools/objective.py b/contentcuration/contentcuration/perftools/objective.py index 8777e5a089..0405be92d5 100644 --- a/contentcuration/contentcuration/perftools/objective.py +++ b/contentcuration/contentcuration/perftools/objective.py @@ -1,8 +1,12 @@ import sys -# TODO: Investigate more precise timing libraries import time -from contentcuration.models import ContentKind, ContentNode, File +from contentcuration.models import ContentKind +from contentcuration.models import ContentNode +from contentcuration.models import File + +# TODO: Investigate more precise timing libraries + def print_progress(text): sys.stdout.write("\r" + text) @@ -16,15 +20,21 @@ class Objective: """ def __init__(self): - self.topic, topic_created = ContentKind.objects.get_or_create(kind='Topic') - self.root_node = ContentNode.objects.create(title='test_server_perf Root Node', kind=self.topic) + self.topic, topic_created = ContentKind.objects.get_or_create(kind="Topic") + self.root_node = ContentNode.objects.create( + title="test_server_perf Root Node", kind=self.topic + ) def __del__(self): if self.root_node: - raise Exception("Test cleanup not run. Ensure you manually delete root node with id {} and all nodes and files that are connected to it.".format(self.root_node.pk)) + raise Exception( + "Test cleanup not run. Ensure you manually delete root node with id {} and all nodes and files that are connected to it.".format( + self.root_node.pk + ) + ) def cleanup(self): - print("Performing clean up, please wait...") + print("Performing clean up, please wait...") # noqa: T201 try: if self.root_node: files = File.objects.filter(contentnode=self.root_node) @@ -33,9 +43,13 @@ def cleanup(self): self.root_node.delete() self.root_node = None - except Exception as e: + except Exception: if self.root_node: - print("Error in cleanup. Root node with id {} may still exist.".format(self.root_node.pk)) + print( # noqa: T201 + "Error in cleanup. Root node with id {} may still exist.".format( + self.root_node.pk + ) + ) raise def create_content_nodes(self, num_nodes=100): @@ -52,11 +66,15 @@ def create_content_nodes(self, num_nodes=100): start = time.time() for i in range(num_nodes): - node = ContentNode.objects.create(title="test_server_perf Node {}".format(i), parent=parent, kind=self.topic) + node = ContentNode.objects.create( + title="test_server_perf Node {}".format(i), + parent=parent, + kind=self.topic, + ) # try to create a multi-level tree structure to better test tree recalc operations if num_nodes > 20: if i % (num_nodes / 10) == 0: - sys.stdout.write('.') + sys.stdout.write(".") sys.stdout.flush() parent = node @@ -76,7 +94,7 @@ def create_files(self, num_files=100): start = time.time() for i in range(num_files): - file_obj = File.objects.create() + _ = File.objects.create() elapsed = time.time() - start if File.objects.count() != current_files + num_files: @@ -100,7 +118,11 @@ def get_object_creation_stats(self, object_type, num_objects=100, num_runs=10): run_times = [] for i in range(num_runs): - print_progress("Creating {} {} objects. Test run {} of {}".format(num_objects, object_type, i+1, num_runs)) + print_progress( + "Creating {} {} objects. Test run {} of {}".format( + num_objects, object_type, i + 1, num_runs + ) + ) run_times.append(creation_func(num_objects)) return self._calc_stats(run_times, num_objects) @@ -116,7 +138,11 @@ def get_object_creation_stats_mptt_delay(self, num_objects=100, num_runs=10): run_times = [] for i in range(num_runs): - print_progress("Creating {} {} objects with delay_mptt_updates. Test run {} of {}".format(num_objects, 'ContentNode', i+1, num_runs)) + print_progress( + "Creating {} {} objects with delay_mptt_updates. Test run {} of {}".format( + num_objects, "ContentNode", i + 1, num_runs + ) + ) with ContentNode.objects.delay_mptt_updates(): run_times.append(self.create_content_nodes(num_objects)) @@ -128,8 +154,10 @@ def get_large_channel_creation_stats(self): num_files = num_nodes * 3 stats = {} - stats['Node creation time'] = self.get_object_creation_stats_mptt_delay(num_nodes, num_runs=1)['min'] - stats['File creation time'] = self.create_files(num_files) + stats["Node creation time"] = self.get_object_creation_stats_mptt_delay( + num_nodes, num_runs=1 + )["min"] + stats["File creation time"] = self.create_files(num_files) return stats @@ -141,8 +169,8 @@ def _calc_stats(self, run_times, num_items): average = total_time / len(run_times) return { - 'min': run_times[0], - 'max': run_times[-1], - 'average': average, - 'per_record_average': average / num_items + "min": run_times[0], + "max": run_times[-1], + "average": average, + "per_record_average": average / num_items, } diff --git a/contentcuration/contentcuration/production_settings.py b/contentcuration/contentcuration/production_settings.py index 969ff4dca4..a00bf43a41 100644 --- a/contentcuration/contentcuration/production_settings.py +++ b/contentcuration/contentcuration/production_settings.py @@ -10,11 +10,11 @@ MEDIA_ROOT = base_settings.STORAGE_ROOT -DEFAULT_FILE_STORAGE = 'contentcuration.utils.gcs_storage.CompositeGCS' +DEFAULT_FILE_STORAGE = "contentcuration.utils.gcs_storage.CompositeGCS" SESSION_ENGINE = "django.contrib.sessions.backends.db" # email settings -EMAIL_BACKEND = 'postmark.django_backend.EmailBackend' +EMAIL_BACKEND = "postmark.django_backend.EmailBackend" POSTMARK_API_KEY = get_secret("EMAIL_CREDENTIALS_POSTMARK_API_KEY") LANGUAGE_CODE = get_secret("LANGUAGE_CODE") or "en" @@ -22,22 +22,22 @@ # Google drive settings GOOGLE_STORAGE_REQUEST_SHEET = "1uC1nsJPx_5g6pQT6ay0qciUVya0zUFJ8wIwbsTEh60Y" GOOGLE_FEEDBACK_SHEET = "1aPQ9_zMJgNAMf0Oqr26NChzwSEJz6oQHuPCPKmNRFRQ" -GOOGLE_AUTH_JSON = get_secret("GOOGLE_DRIVE_AUTH_JSON") or base_settings.GOOGLE_AUTH_JSON +GOOGLE_AUTH_JSON = ( + get_secret("GOOGLE_DRIVE_AUTH_JSON") or base_settings.GOOGLE_AUTH_JSON +) # Activate django-prometheus -INSTALLED_APPS = INSTALLED_APPS + ( - "django_prometheus", -) +INSTALLED_APPS = INSTALLED_APPS + ("django_prometheus",) MIDDLEWARE = ( - ("django_prometheus.middleware.PrometheusBeforeMiddleware",) + - MIDDLEWARE + - ("django_prometheus.middleware.PrometheusAfterMiddleware",) + ("django_prometheus.middleware.PrometheusBeforeMiddleware",) + + MIDDLEWARE + + ("django_prometheus.middleware.PrometheusAfterMiddleware",) ) CACHES["default"]["BACKEND"] = "django_prometheus.cache.backends.redis.RedisCache" if SITE_READ_ONLY: - CACHES['default']['BACKEND'] = "django_prometheus.cache.backends.locmem.LocMemCache" + CACHES["default"]["BACKEND"] = "django_prometheus.cache.backends.locmem.LocMemCache" DATABASES["default"]["ENGINE"] = "django_prometheus.db.backends.postgresql" diff --git a/contentcuration/contentcuration/serializers.py b/contentcuration/contentcuration/serializers.py index 7b39963c0f..c1a6082402 100644 --- a/contentcuration/contentcuration/serializers.py +++ b/contentcuration/contentcuration/serializers.py @@ -26,14 +26,19 @@ class PublicChannelSerializer(serializers.ModelSerializer): """ Called by the public API, primarily used by Kolibri. Contains information more specific to Kolibri's needs. """ - kind_count = serializers.SerializerMethodField('generate_kind_count') - matching_tokens = serializers.SerializerMethodField('match_tokens') - icon_encoding = serializers.SerializerMethodField('get_thumbnail_encoding') - version_notes = serializers.SerializerMethodField('sort_published_data') + + kind_count = serializers.SerializerMethodField("generate_kind_count") + matching_tokens = serializers.SerializerMethodField("match_tokens") + icon_encoding = serializers.SerializerMethodField("get_thumbnail_encoding") + version_notes = serializers.SerializerMethodField("sort_published_data") def match_tokens(self, channel): - tokens = json.loads(channel.tokens) if hasattr(channel, 'tokens') else [] - return list(channel.secret_tokens.filter(token__in=tokens).values_list('token', flat=True)) + tokens = json.loads(channel.tokens) if hasattr(channel, "tokens") else [] + return list( + channel.secret_tokens.filter(token__in=tokens).values_list( + "token", flat=True + ) + ) def get_thumbnail_encoding(self, channel): """ @@ -44,7 +49,7 @@ def get_thumbnail_encoding(self, channel): if channel.icon_encoding: return channel.icon_encoding if channel.thumbnail_encoding: - base64 = channel.thumbnail_encoding.get('base64') + base64 = channel.thumbnail_encoding.get("base64") if base64: return base64 @@ -54,14 +59,27 @@ def generate_kind_count(self, channel): return channel.published_kind_count and json.loads(channel.published_kind_count) def sort_published_data(self, channel): - data = {int(k): v['version_notes'] for k, v in channel.published_data.items()} + data = {int(k): v["version_notes"] for k, v in channel.published_data.items()} return OrderedDict(sorted(data.items())) class Meta: model = Channel - fields = ('id', 'name', 'language', 'included_languages', 'description', 'total_resource_count', 'version', - 'kind_count', 'published_size', 'last_published', 'icon_encoding', 'matching_tokens', 'public', - 'version_notes') + fields = ( + "id", + "name", + "language", + "included_languages", + "description", + "total_resource_count", + "version", + "kind_count", + "published_size", + "last_published", + "icon_encoding", + "matching_tokens", + "public", + "version_notes", + ) class SimplifiedChannelProbeCheckSerializer(serializers.ModelSerializer): @@ -69,13 +87,14 @@ class SimplifiedChannelProbeCheckSerializer(serializers.ModelSerializer): class Meta: model = Channel - fields = ('id', 'name', 'description', 'thumbnail', 'main_tree') + fields = ("id", "name", "description", "thumbnail", "main_tree") class GetTreeDataSerializer(serializers.Serializer): """ Used by get_*_tree_data endpoints to ontain "lightweight" tree data. """ + channel_id = serializers.CharField(required=True) - tree = serializers.CharField(required=False, default='main') + tree = serializers.CharField(required=False, default="main") node_id = serializers.CharField(required=False) diff --git a/contentcuration/contentcuration/settings.py b/contentcuration/contentcuration/settings.py index a911055032..0f18ed0131 100644 --- a/contentcuration/contentcuration/settings.py +++ b/contentcuration/contentcuration/settings.py @@ -35,29 +35,34 @@ EXPORT_ROOT = "exports" BETA_MODE = os.getenv("STUDIO_BETA_MODE") -RUNNING_TESTS = (sys.argv[1:2] == ['test'] or os.path.basename(sys.argv[0]) == 'pytest') +RUNNING_TESTS = sys.argv[1:2] == ["test"] or os.path.basename(sys.argv[0]) == "pytest" # hardcoding all this info for now. Potential for shared reference with webpack? WEBPACK_LOADER = { - 'DEFAULT': { + "DEFAULT": { # trailing empty string to include trailing / - 'BUNDLE_DIR_NAME': os.path.join('studio', ''), - 'STATS_FILE': os.path.join(BASE_DIR, 'build', 'webpack-stats.json'), + "BUNDLE_DIR_NAME": os.path.join("studio", ""), + "STATS_FILE": os.path.join(BASE_DIR, "build", "webpack-stats.json"), } } -PERMISSION_TEMPLATE_ROOT = os.path.join(BASE_DIR, "contentcuration", "templates", "permissions") +PERMISSION_TEMPLATE_ROOT = os.path.join( + BASE_DIR, "contentcuration", "templates", "permissions" +) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! -SECRET_KEY = os.getenv("DJANGO_SECRET_KEY") or '_s0k@&o%m6bzg7s(0p(w6z5xbo%vy%mj+xx(w3mhs=f0ve0+h2' +SECRET_KEY = ( + os.getenv("DJANGO_SECRET_KEY") + or "_s0k@&o%m6bzg7s(0p(w6z5xbo%vy%mj+xx(w3mhs=f0ve0+h2" +) # SECURITY WARNING: don't run with debug turned on in production! # DEBUG = True -SESSION_COOKIE_NAME = 'kolibri_studio_sessionid' +SESSION_COOKIE_NAME = "kolibri_studio_sessionid" ALLOWED_HOSTS = ["*"] # In production, we serve through a file socket, so this is OK. @@ -65,74 +70,75 @@ # Application definition INSTALLED_APPS = ( - 'contentcuration.apps.ContentConfig', - 'django.contrib.auth', - 'django.contrib.contenttypes', - 'django.contrib.admin', - 'django.contrib.sessions', - 'django.contrib.messages', - 'django.contrib.sites', - 'django.contrib.staticfiles', - 'rest_framework', - 'django_js_reverse', - 'kolibri_content', - 'readonly', - 'le_utils', - 'rest_framework.authtoken', - 'search', - 'django_s3_storage', - 'webpack_loader', - 'django_filters', - 'django.contrib.postgres', - 'django_celery_results', - 'kolibri_public', - 'automation', + "contentcuration.apps.ContentConfig", + "django.contrib.auth", + "django.contrib.contenttypes", + "django.contrib.admin", + "django.contrib.sessions", + "django.contrib.messages", + "django.contrib.sites", + "django.contrib.staticfiles", + "rest_framework", + "django_js_reverse", + "kolibri_content", + "readonly", + "le_utils", + "rest_framework.authtoken", + "search", + "django_s3_storage", + "webpack_loader", + "django_filters", + "django.contrib.postgres", + "django_celery_results", + "kolibri_public", + "automation", ) SESSION_ENGINE = "django.contrib.sessions.backends.cached_db" REDIS_URL = "redis://:{password}@{endpoint}/".format( password=os.getenv("CELERY_REDIS_PASSWORD") or "", - endpoint=os.getenv("CELERY_BROKER_ENDPOINT") or "localhost:6379") + endpoint=os.getenv("CELERY_BROKER_ENDPOINT") or "localhost:6379", +) CACHE_REDIS_DB = os.getenv("CACHE_REDIS_DB") or "1" CACHES = { - 'default': { - 'BACKEND': 'django_redis.cache.RedisCache', - 'LOCATION': '{url}{db}'.format(url=REDIS_URL, db=CACHE_REDIS_DB), - 'OPTIONS': { - 'CLIENT_CLASS': 'django_redis.client.DefaultClient', - } + "default": { + "BACKEND": "django_redis.cache.RedisCache", + "LOCATION": "{url}{db}".format(url=REDIS_URL, db=CACHE_REDIS_DB), + "OPTIONS": { + "CLIENT_CLASS": "django_redis.client.DefaultClient", + }, } } # READ-ONLY SETTINGS # Set STUDIO_INCIDENT_TYPE to a key from contentcuration.utils.incidents to activate -INCIDENT_TYPE = os.getenv('STUDIO_INCIDENT_TYPE') +INCIDENT_TYPE = os.getenv("STUDIO_INCIDENT_TYPE") INCIDENT = INCIDENTS.get(INCIDENT_TYPE) -SITE_READ_ONLY = INCIDENT and INCIDENT['readonly'] +SITE_READ_ONLY = INCIDENT and INCIDENT["readonly"] # If Studio is in readonly mode, it will throw a DatabaseWriteError # Use a local cache to bypass the readonly property if SITE_READ_ONLY: - CACHES['default']['BACKEND'] = 'django.core.cache.backends.locmem.LocMemCache' - CACHES['default']['LOCATION'] = 'readonly_cache' + CACHES["default"]["BACKEND"] = "django.core.cache.backends.locmem.LocMemCache" + CACHES["default"]["LOCATION"] = "readonly_cache" MIDDLEWARE = ( # 'django.middleware.cache.UpdateCacheMiddleware', - 'contentcuration.middleware.session.KolibriStudioSessionMiddleware', - 'contentcuration.middleware.locale.KolibriStudioLocaleMiddleware', - 'django.middleware.common.CommonMiddleware', - 'django.middleware.common.BrokenLinkEmailsMiddleware', - 'django.middleware.csrf.CsrfViewMiddleware', - 'django.middleware.http.ConditionalGetMiddleware', - 'django.contrib.auth.middleware.AuthenticationMiddleware', - 'django.contrib.messages.middleware.MessageMiddleware', - 'django.middleware.clickjacking.XFrameOptionsMiddleware', - 'django.middleware.security.SecurityMiddleware', - 'contentcuration.middleware.db_readonly.DatabaseReadOnlyMiddleware', + "contentcuration.middleware.session.KolibriStudioSessionMiddleware", + "contentcuration.middleware.locale.KolibriStudioLocaleMiddleware", + "django.middleware.common.CommonMiddleware", + "django.middleware.common.BrokenLinkEmailsMiddleware", + "django.middleware.csrf.CsrfViewMiddleware", + "django.middleware.http.ConditionalGetMiddleware", + "django.contrib.auth.middleware.AuthenticationMiddleware", + "django.contrib.messages.middleware.MessageMiddleware", + "django.middleware.clickjacking.XFrameOptionsMiddleware", + "django.middleware.security.SecurityMiddleware", + "contentcuration.middleware.db_readonly.DatabaseReadOnlyMiddleware", # 'django.middleware.cache.FetchFromCacheMiddleware', ) @@ -142,114 +148,109 @@ ) + MIDDLEWARE SUPPORTED_BROWSERS = [ - 'Chrome', - 'Firefox', - 'Safari', + "Chrome", + "Firefox", + "Safari", ] -HEALTH_CHECK_BROWSERS = [ - 'kube-probe', - 'GoogleHC', - 'Studio-Internal-Prober' -] +HEALTH_CHECK_BROWSERS = ["kube-probe", "GoogleHC", "Studio-Internal-Prober"] REST_FRAMEWORK = { - 'DEFAULT_PERMISSION_CLASSES': ( - 'rest_framework.permissions.IsAuthenticated', - ), - 'DEFAULT_AUTHENTICATION_CLASSES': ( - 'rest_framework.authentication.SessionAuthentication', + "DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",), + "DEFAULT_AUTHENTICATION_CLASSES": ( + "rest_framework.authentication.SessionAuthentication", # 'rest_framework.authentication.BasicAuthentication', - 'rest_framework.authentication.TokenAuthentication', - ) + "rest_framework.authentication.TokenAuthentication", + ), } -ROOT_URLCONF = 'contentcuration.urls' +ROOT_URLCONF = "contentcuration.urls" TEMPLATES = [ { - 'BACKEND': 'django.template.backends.django.DjangoTemplates', - 'DIRS': ['/templates/'], - 'APP_DIRS': True, - 'OPTIONS': { - 'context_processors': [ - 'django.template.context_processors.debug', - 'django.template.context_processors.request', - 'django.contrib.auth.context_processors.auth', - 'django.contrib.messages.context_processors.messages', - 'readonly.context_processors.readonly', - 'contentcuration.context_processors.site_variables', - 'contentcuration.context_processors.url_tag', + "BACKEND": "django.template.backends.django.DjangoTemplates", + "DIRS": ["/templates/"], + "APP_DIRS": True, + "OPTIONS": { + "context_processors": [ + "django.template.context_processors.debug", + "django.template.context_processors.request", + "django.contrib.auth.context_processors.auth", + "django.contrib.messages.context_processors.messages", + "readonly.context_processors.readonly", + "contentcuration.context_processors.site_variables", + "contentcuration.context_processors.url_tag", ], }, }, ] -WSGI_APPLICATION = 'contentcuration.wsgi.application' +WSGI_APPLICATION = "contentcuration.wsgi.application" # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.postgresql_psycopg2', - 'NAME': os.getenv("DATA_DB_NAME") or 'kolibri-studio', + "default": { + "ENGINE": "django.db.backends.postgresql_psycopg2", + "NAME": os.getenv("DATA_DB_NAME") or "kolibri-studio", # For dev purposes only - 'USER': os.getenv('DATA_DB_USER') or 'learningequality', - 'PASSWORD': os.getenv('DATA_DB_PASS') or 'kolibri', - 'HOST': os.getenv('DATA_DB_HOST') or 'localhost', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. - 'PORT': '', # Set to empty string for default. + "USER": os.getenv("DATA_DB_USER") or "learningequality", + "PASSWORD": os.getenv("DATA_DB_PASS") or "kolibri", + "HOST": os.getenv("DATA_DB_HOST") + or "localhost", # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. + "PORT": "", # Set to empty string for default. }, } -IS_CONTENTNODE_TABLE_PARTITIONED = os.getenv("IS_CONTENTNODE_TABLE_PARTITIONED") or False +IS_CONTENTNODE_TABLE_PARTITIONED = ( + os.getenv("IS_CONTENTNODE_TABLE_PARTITIONED") or False +) DATABASE_ROUTERS = [ "kolibri_content.router.ContentDBRouter", ] LOGGING = { - 'version': 1, - 'disable_existing_loggers': False, - 'handlers': { - 'file': { - 'level': 'DEBUG', - 'class': 'logging.FileHandler', - 'filename': os.getenv('DJANGO_LOG_FILE') or 'django.log' + "version": 1, + "disable_existing_loggers": False, + "handlers": { + "file": { + "level": "DEBUG", + "class": "logging.FileHandler", + "filename": os.getenv("DJANGO_LOG_FILE") or "django.log", }, - 'console': { - 'class': 'logging.StreamHandler', + "console": { + "class": "logging.StreamHandler", }, - 'null': { - 'class': 'logging.NullHandler' - } + "null": {"class": "logging.NullHandler"}, }, - 'loggers': { - 'command': { - 'handlers': ['console'], - 'level': 'DEBUG' if globals().get('DEBUG') else 'INFO', - 'propagate': True, + "loggers": { + "command": { + "handlers": ["console"], + "level": "DEBUG" if globals().get("DEBUG") else "INFO", + "propagate": True, }, - 'django': { - 'handlers': ['file', 'console'], - 'level': 'DEBUG' if globals().get('DEBUG') else 'INFO', - 'propagate': True, + "django": { + "handlers": ["file", "console"], + "level": "DEBUG" if globals().get("DEBUG") else "INFO", + "propagate": True, }, - 'django.db.backends': { - 'handlers': ['null'], - 'propagate': False, - 'level': 'DEBUG' - } - } + "django.db.backends": { + "handlers": ["null"], + "propagate": False, + "level": "DEBUG", + }, + }, } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ -LANGUAGE_CODE = 'en' +LANGUAGE_CODE = "en" -TIME_ZONE = 'UTC' +TIME_ZONE = "UTC" USE_I18N = True @@ -257,9 +258,7 @@ USE_TZ = True -LOCALE_PATHS = ( - os.path.join(BASE_DIR, 'locale'), -) +LOCALE_PATHS = (os.path.join(BASE_DIR, "locale"),) def gettext(s): @@ -267,40 +266,40 @@ def gettext(s): LANGUAGES = ( - ('en', gettext('English')), - ('es-es', gettext('Spanish')), - ('ar', gettext('Arabic')), - ('fr-fr', gettext('French')), - ('pt-br', gettext('Portuguese')), + ("en", gettext("English")), + ("es-es", gettext("Spanish")), + ("ar", gettext("Arabic")), + ("fr-fr", gettext("French")), + ("pt-br", gettext("Portuguese")), # ('en-PT', gettext('English - Pirate')), ) PRODUCTION_SITE_ID = 1 SITE_BY_ID = { - 'master': PRODUCTION_SITE_ID, - 'unstable': 3, - 'hotfixes': 4, + "master": PRODUCTION_SITE_ID, + "unstable": 3, + "hotfixes": 4, } # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ -STATIC_URL = '/static/' +STATIC_URL = "/static/" -STORAGE_URL = '/content/storage/' +STORAGE_URL = "/content/storage/" -CONTENT_DATABASE_URL = '/content/databases/' +CONTENT_DATABASE_URL = "/content/databases/" -CSV_URL = '/content/csvs/' +CSV_URL = "/content/csvs/" -LOGIN_REDIRECT_URL = '/channels/' -LOGIN_URL = '/accounts/' +LOGIN_REDIRECT_URL = "/channels/" +LOGIN_URL = "/accounts/" -AUTH_USER_MODEL = 'contentcuration.User' +AUTH_USER_MODEL = "contentcuration.User" ACCOUNT_ACTIVATION_DAYS = 7 REGISTRATION_OPEN = True -SITE_ID = SITE_BY_ID.get(os.getenv('BRANCH_ENVIRONMENT'), 1) +SITE_ID = SITE_BY_ID.get(os.getenv("BRANCH_ENVIRONMENT"), 1) # Used for serializing datetime objects. DATE_TIME_FORMAT = "%Y-%m-%d %H:%M:%S" @@ -309,11 +308,11 @@ def gettext(s): SEND_USER_ACTIVATION_NOTIFICATION_EMAIL = bool( os.getenv("SEND_USER_ACTIVATION_NOTIFICATION_EMAIL") ) -SPACE_REQUEST_EMAIL = 'content@learningequality.org' -REGISTRATION_INFORMATION_EMAIL = 'studio-registrations@learningequality.org' -HELP_EMAIL = 'content@learningequality.org' -DEFAULT_FROM_EMAIL = 'Kolibri Studio ' -POLICY_EMAIL = 'legal@learningequality.org' +SPACE_REQUEST_EMAIL = "content@learningequality.org" +REGISTRATION_INFORMATION_EMAIL = "studio-registrations@learningequality.org" +HELP_EMAIL = "content@learningequality.org" +DEFAULT_FROM_EMAIL = "Kolibri Studio " +POLICY_EMAIL = "legal@learningequality.org" # Used to determine how many days a user # has to undo accidentally deleting account. @@ -321,33 +320,30 @@ def gettext(s): DEFAULT_LICENSE = 1 -SERVER_EMAIL = 'curation-errors@learningequality.org' -ADMINS = [('Errors', SERVER_EMAIL)] +SERVER_EMAIL = "curation-errors@learningequality.org" +ADMINS = [("Errors", SERVER_EMAIL)] DEFAULT_TITLE = "Kolibri Studio" IGNORABLE_404_URLS = [ - re.compile(r'\.(php|cgi)$'), - re.compile(r'^/phpmyadmin/'), - re.compile(r'^/apple-touch-icon.*\.png$'), - re.compile(r'^/favicon\.ico$'), - re.compile(r'^/robots\.txt$'), + re.compile(r"\.(php|cgi)$"), + re.compile(r"^/phpmyadmin/"), + re.compile(r"^/apple-touch-icon.*\.png$"), + re.compile(r"^/favicon\.ico$"), + re.compile(r"^/robots\.txt$"), ] # CELERY CONFIGURATIONS CELERY_REDIS_DB = os.getenv("CELERY_REDIS_DB") or "0" CELERY = { - "broker_url": "{url}{db}".format( - url=REDIS_URL, - db=CELERY_REDIS_DB - ), + "broker_url": "{url}{db}".format(url=REDIS_URL, db=CELERY_REDIS_DB), # with a redis broker, tasks will be re-sent if not completed within the duration of this timeout "broker_transport_options": {"visibility_timeout": 4 * 3600}, "redis_db": CELERY_REDIS_DB, "result_backend": "django-db", "redis_backend_health_check_interval": 600, - "timezone": os.getenv("CELERY_TIMEZONE") or 'Africa/Nairobi', - "accept_content": ['application/json'], + "timezone": os.getenv("CELERY_TIMEZONE") or "Africa/Nairobi", + "accept_content": ["application/json"], "task_serializer": "json", "result_serializer": "json", "result_extended": True, @@ -361,11 +357,11 @@ def gettext(s): ORPHAN_DATE_CLEAN_UP_THRESHOLD = TWO_WEEKS_AGO # CLOUD STORAGE SETTINGS -DEFAULT_FILE_STORAGE = 'django_s3_storage.storage.S3Storage' -AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID') or 'development' -AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY') or 'development' -AWS_S3_BUCKET_NAME = os.getenv('AWS_BUCKET_NAME') or 'content' -AWS_S3_ENDPOINT_URL = os.getenv('AWS_S3_ENDPOINT_URL') or 'http://localhost:9000' +DEFAULT_FILE_STORAGE = "django_s3_storage.storage.S3Storage" +AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID") or "development" +AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY") or "development" +AWS_S3_BUCKET_NAME = os.getenv("AWS_BUCKET_NAME") or "content" +AWS_S3_ENDPOINT_URL = os.getenv("AWS_S3_ENDPOINT_URL") or "http://localhost:9000" AWS_AUTO_CREATE_BUCKET = False AWS_S3_FILE_OVERWRITE = True AWS_S3_BUCKET_AUTH = False @@ -374,7 +370,9 @@ def gettext(s): # defaults to what's inferred from the environment. See # https://cloud.google.com/docs/authentication/production # for how these credentials are inferred automatically. -GCS_STORAGE_SERVICE_ACCOUNT_KEY_PATH = os.getenv("GOOGLE_CLOUD_STORAGE_SERVICE_ACCOUNT_CREDENTIALS") +GCS_STORAGE_SERVICE_ACCOUNT_KEY_PATH = os.getenv( + "GOOGLE_CLOUD_STORAGE_SERVICE_ACCOUNT_CREDENTIALS" +) # GOOGLE DRIVE SETTINGS GOOGLE_AUTH_JSON = "credentials/client_secret.json" @@ -401,13 +399,14 @@ def gettext(s): if key: key = key.strip() # strip any possible whitespace or trailing newline -SENTRY_DSN = 'https://{secret}@sentry.io/1252819'.format(secret=key) if key else None +SENTRY_DSN = "https://{secret}@sentry.io/1252819".format(secret=key) if key else None SENTRY_ENVIRONMENT = get_secret("BRANCH_ENVIRONMENT") SENTRY_RELEASE = os.environ.get("RELEASE_COMMIT_SHA") SENTRY_ACTIVE = False if SENTRY_DSN and SENTRY_RELEASE and SENTRY_ENVIRONMENT: import sentry_sdk + # TODO: there are also Celery and Redis integrations, but since they are new # I left them as a separate task so we can spend more time on testing. from sentry_sdk.integrations.django import DjangoIntegration diff --git a/contentcuration/contentcuration/signals.py b/contentcuration/contentcuration/signals.py index e96446569f..c724565306 100644 --- a/contentcuration/contentcuration/signals.py +++ b/contentcuration/contentcuration/signals.py @@ -10,7 +10,7 @@ def set_jit(sender, connection, **kwargs): optimize its use. https://www.postgresql.org/docs/12/runtime-config-query.html#GUC-JIT """ - if connection.vendor == 'postgresql': + if connection.vendor == "postgresql": db_features = DatabaseFeatures(connection) # JIT is new in v11, and for reference this returns True for v11 and following if db_features.is_postgresql_11: diff --git a/contentcuration/contentcuration/tasks.py b/contentcuration/contentcuration/tasks.py index 5ebb81c08e..129cd78302 100644 --- a/contentcuration/contentcuration/tasks.py +++ b/contentcuration/contentcuration/tasks.py @@ -32,7 +32,10 @@ def apply_user_changes_task(self, user_id): :param user_id: The user ID for which to process changes """ from contentcuration.viewsets.sync.base import apply_changes - changes_qs = Change.objects.filter(applied=False, errored=False, user_id=user_id, channel__isnull=True) + + changes_qs = Change.objects.filter( + applied=False, errored=False, user_id=user_id, channel__isnull=True + ) apply_changes(changes_qs) if changes_qs.exists(): self.requeue() @@ -45,7 +48,10 @@ def apply_channel_changes_task(self, channel_id): :param channel_id: The channel ID for which to process changes """ from contentcuration.viewsets.sync.base import apply_changes - changes_qs = Change.objects.filter(applied=False, errored=False, channel_id=channel_id) + + changes_qs = Change.objects.filter( + applied=False, errored=False, channel_id=channel_id + ) apply_changes(changes_qs) if changes_qs.exists(): self.requeue() @@ -53,13 +59,14 @@ def apply_channel_changes_task(self, channel_id): class CustomEmailMessage(EmailMessage): """ - jayoshih: There's an issue with the django postmark backend where - _build_message attempts to attach files as base64. However, - the django EmailMessage attach method makes all content with a text/* - mimetype to be encoded as a string, causing `base64.b64encode(content)` - to fail. This is a workaround to ensure that content is still encoded as - bytes when it comes to encoding the attachment as base64 + jayoshih: There's an issue with the django postmark backend where + _build_message attempts to attach files as base64. However, + the django EmailMessage attach method makes all content with a text/* + mimetype to be encoded as a string, causing `base64.b64encode(content)` + to fail. This is a workaround to ensure that content is still encoded as + bytes when it comes to encoding the attachment as base64 """ + def attach(self, filename=None, content=None, mimetype=None): if filename is None: raise AssertionError @@ -76,6 +83,7 @@ def generateusercsv_task(user_id, language=settings.LANGUAGE_CODE): user = User.objects.get(pk=user_id) csv_path = write_user_csv(user) subject = render_to_string("export/user_csv_email_subject.txt", {}) + subject = "".join(subject.splitlines()) message = render_to_string( "export/user_csv_email.txt", { @@ -86,8 +94,10 @@ def generateusercsv_task(user_id, language=settings.LANGUAGE_CODE): }, ) - email = CustomEmailMessage(subject, message, settings.DEFAULT_FROM_EMAIL, [user.email]) - email.encoding = 'utf-8' + email = CustomEmailMessage( + subject, message, settings.DEFAULT_FROM_EMAIL, [user.email] + ) + email.encoding = "utf-8" email.attach_file(csv_path, mimetype="text/csv") email.send() @@ -115,7 +125,11 @@ def calculate_user_storage_task(user_id): user = User.objects.get(pk=user_id) user.set_space_used() except User.DoesNotExist: - logging.error("Tried to calculate user storage for user with id {} but they do not exist".format(user_id)) + logging.error( + "Tried to calculate user storage for user with id {} but they do not exist".format( + user_id + ) + ) @app.task(name="calculate_resource_size_task") @@ -127,10 +141,21 @@ def calculate_resource_size_task(node_id, channel_id): @app.task(name="sendcustomemails_task") def sendcustomemails_task(subject, message, query): - subject = render_to_string('registration/custom_email_subject.txt', {'subject': subject}) + subject = render_to_string( + "registration/custom_email_subject.txt", {"subject": subject} + ) + subject = "".join(subject.splitlines()) recipients = AdminUserFilter(data=query).qs.distinct() for recipient in recipients: - text = message.format(current_date=time.strftime("%A, %B %d"), current_time=time.strftime("%H:%M %Z"), **recipient.__dict__) - text = render_to_string('registration/custom_email.txt', {'message': text}) - recipient.email_user(subject, text, settings.DEFAULT_FROM_EMAIL, ) + text = message.format( + current_date=time.strftime("%A, %B %d"), + current_time=time.strftime("%H:%M %Z"), + **recipient.__dict__ + ) + text = render_to_string("registration/custom_email.txt", {"message": text}) + recipient.email_user( + subject, + text, + settings.DEFAULT_FROM_EMAIL, + ) diff --git a/contentcuration/contentcuration/templates/permissions/permissions_email_subject.txt b/contentcuration/contentcuration/templates/permissions/permissions_email_subject.txt index 7304527c22..4c20de29f0 100644 --- a/contentcuration/contentcuration/templates/permissions/permissions_email_subject.txt +++ b/contentcuration/contentcuration/templates/permissions/permissions_email_subject.txt @@ -1 +1 @@ -{% load i18n %}{% if share_mode == 'edit' %}{% blocktrans with channel=channel %}You've been invited to edit {{ channel }}{% endblocktrans %}{% else %}{% blocktrans with channel=channel %}You've been invited to view {{ channel }}{% endblocktrans %}{% endif %} \ No newline at end of file +{% load i18n %}{% if share_mode == 'edit' %}{% blocktrans with channel=channel %}You've been invited to edit {{ channel }}{% endblocktrans %}{% else %}{% blocktrans with channel=channel %}You've been invited to view {{ channel }}{% endblocktrans %}{% endif %} diff --git a/contentcuration/contentcuration/templates/perseus/exercise.json b/contentcuration/contentcuration/templates/perseus/exercise.json index 44e287e571..4408a80bb1 100644 --- a/contentcuration/contentcuration/templates/perseus/exercise.json +++ b/contentcuration/contentcuration/templates/perseus/exercise.json @@ -1 +1 @@ -{{exercise | safe}} \ No newline at end of file +{{exercise | safe}} diff --git a/contentcuration/contentcuration/templates/registration/custom_email_subject.txt b/contentcuration/contentcuration/templates/registration/custom_email_subject.txt index ad591957af..82fd21ed2a 100644 --- a/contentcuration/contentcuration/templates/registration/custom_email_subject.txt +++ b/contentcuration/contentcuration/templates/registration/custom_email_subject.txt @@ -1 +1 @@ -{% load i18n %} {{ subject }} \ No newline at end of file +{% load i18n %} {{ subject }} diff --git a/contentcuration/contentcuration/templates/registration/registration_information_email.txt b/contentcuration/contentcuration/templates/registration/registration_information_email.txt index 314ec38ae2..b165e8a93e 100644 --- a/contentcuration/contentcuration/templates/registration/registration_information_email.txt +++ b/contentcuration/contentcuration/templates/registration/registration_information_email.txt @@ -11,4 +11,4 @@ Storage Needed: {{information.space_needed}}{% endif %} Location(s): {{information.locations | join:", "}} Heard about us from: {{information.heard_from}} -{% endautoescape %} \ No newline at end of file +{% endautoescape %} diff --git a/contentcuration/contentcuration/templatetags/export_tags.py b/contentcuration/contentcuration/templatetags/export_tags.py index 4d82025abc..4003c4606a 100644 --- a/contentcuration/contentcuration/templatetags/export_tags.py +++ b/contentcuration/contentcuration/templatetags/export_tags.py @@ -12,20 +12,22 @@ THUMBNAIL_DIMENSION = 200 # PDFs where encoding returns None will fail, so use this in case images aren't found -DEFAULT_ENCODING = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/" \ - "9hAAACk0lEQVR4AaWTA7TbbABA8/+zreMdzbYOZtu2bbt4rpPUtvlebbezbdvMvsxmG99740" \ - "CDF6z4p/G3RYkFLQPGmvj8mx30m7uo1LhNO6ou50r++zrkMoj/cRWUJmIz0gvEDXIVvP/Hbd" \ - "xRte+chaXe7gDDsP9WwqLJixicgqWwsNrncZFJ2UnmM+Xy1awlqDz/LVsKC6oDtxA0k/B1aD" \ - "Oi6rMBVVi2ys1Td+qd5NU8ZV0cWEKeWsZ4IKbdn3ikOJTogm9bw1PWw50twAWNFbS9oK1UlX" \ - "Y337KA6sxwiBb/NIJYM3KrRNOSppD1YNtM9wwHUs+S188M38hXtCKKNSOAM4PmzKCgWQhaNU" \ - "SiGCIE1DKGYozyJc5EW47ZZ2Ka3U0oNieTbLNjruOHsCO3LvNgq6cZznAHuAICah5DohjDUEG" \ - "+OciQRsbQlFGKUOvrw9d6uSiiKcu3h9S86F7Me/oMtv/yFVsofaQCYHyhxtcLuFSGNDwatCGI" \ - "SrZE6EzXIJYkoqILPR0k2oCMo/b1EOpcQqEnjkXPnseOX71uEuqDvQCTAqfjW5fhGkQlWyMQf" \ - "acZYRHs61jc4HKOJAGXBE+1F1vjdRiwegEstrywB9OYK5zdITZH6xUHTnUADgLcpaBZD1omxCY" \ - "5m6K7HRaEUDxDZjoyWOs9Xwu/43lbWTUKSfwwzNGfROX2hvg2wGrLjEcGIwTHTHR3sQW0jSEcIN" \ - "tsnembjYu2z0fKfngHaEXm2jzYmXaUHL7k3H+z6YftOxagZXEXNJ2+eJV3zGF/8RZyWZ6RakH8ad" \ - "Z9AksmLmz6nO2cy/3vl9+CnJdYZJRmn+x1HsOOh07BkcTF0p/z39hBuoJNuW9U2nF01rngydo/+xr" \ - "/aXwDY2vpQfdHLrIAAAAASUVORK5CYII=" +DEFAULT_ENCODING = ( + "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/" + "9hAAACk0lEQVR4AaWTA7TbbABA8/+zreMdzbYOZtu2bbt4rpPUtvlebbezbdvMvsxmG99740" + "CDF6z4p/G3RYkFLQPGmvj8mx30m7uo1LhNO6ou50r++zrkMoj/cRWUJmIz0gvEDXIVvP/Hbd" + "xRte+chaXe7gDDsP9WwqLJixicgqWwsNrncZFJ2UnmM+Xy1awlqDz/LVsKC6oDtxA0k/B1aD" + "Oi6rMBVVi2ys1Td+qd5NU8ZV0cWEKeWsZ4IKbdn3ikOJTogm9bw1PWw50twAWNFbS9oK1UlX" + "Y337KA6sxwiBb/NIJYM3KrRNOSppD1YNtM9wwHUs+S188M38hXtCKKNSOAM4PmzKCgWQhaNU" + "SiGCIE1DKGYozyJc5EW47ZZ2Ka3U0oNieTbLNjruOHsCO3LvNgq6cZznAHuAICah5DohjDUEG" + "+OciQRsbQlFGKUOvrw9d6uSiiKcu3h9S86F7Me/oMtv/yFVsofaQCYHyhxtcLuFSGNDwatCGI" + "SrZE6EzXIJYkoqILPR0k2oCMo/b1EOpcQqEnjkXPnseOX71uEuqDvQCTAqfjW5fhGkQlWyMQf" + "acZYRHs61jc4HKOJAGXBE+1F1vjdRiwegEstrywB9OYK5zdITZH6xUHTnUADgLcpaBZD1omxCY" + "5m6K7HRaEUDxDZjoyWOs9Xwu/43lbWTUKSfwwzNGfROX2hvg2wGrLjEcGIwTHTHR3sQW0jSEcIN" + "tsnembjYu2z0fKfngHaEXm2jzYmXaUHL7k3H+z6YftOxagZXEXNJ2+eJV3zGF/8RZyWZ6RakH8ad" + "Z9AksmLmz6nO2cy/3vl9+CnJdYZJRmn+x1HsOOh07BkcTF0p/z39hBuoJNuW9U2nF01rngydo/+xr" + "/aXwDY2vpQfdHLrIAAAAASUVORK5CYII=" +) register = template.Library() logmodule.basicConfig() @@ -42,11 +44,15 @@ def encode_base64(value, dimension=THUMBNAIL_DIMENSION): return get_thumbnail_encoding(value, dimension=dimension) except IOError: try: - filepath = os.path.join(settings.STATIC_ROOT, 'img', 'kolibri_placeholder.png') + filepath = os.path.join( + settings.STATIC_ROOT, "img", "kolibri_placeholder.png" + ) - with open(filepath, 'rb') as image_file: + with open(filepath, "rb") as image_file: _, ext = os.path.splitext(value) - return "data:image/{};base64,{}".format(ext[1:], base64.b64encode(image_file.read())) + return "data:image/{};base64,{}".format( + ext[1:], base64.b64encode(image_file.read()) + ) except IOError: logging.warning("Could not find {}".format(value)) return DEFAULT_ENCODING @@ -59,13 +65,15 @@ def encode_static_base64(value, dimension=None): if value.startswith(settings.STATIC_URL): value = os.path.basename(value) - filepath = os.path.join(settings.STATIC_ROOT, 'img', value) + filepath = os.path.join(settings.STATIC_ROOT, "img", value) if dimension: return get_thumbnail_encoding(filepath, dimension=int(dimension)) - with open(filepath, 'rb') as image_file: + with open(filepath, "rb") as image_file: _, ext = os.path.splitext(value) - return "data:image/{};base64,{}".format(ext[1:], base64.b64encode(image_file.read())) + return "data:image/{};base64,{}".format( + ext[1:], base64.b64encode(image_file.read()) + ) except IOError: logging.warning("Could not find {}".format(value)) return DEFAULT_ENCODING diff --git a/contentcuration/contentcuration/templatetags/license_tags.py b/contentcuration/contentcuration/templatetags/license_tags.py index dd68ca07fa..89331cf141 100644 --- a/contentcuration/contentcuration/templatetags/license_tags.py +++ b/contentcuration/contentcuration/templatetags/license_tags.py @@ -8,47 +8,66 @@ LICENSE_MAPPING = None -DESCRIPTION_MAPPING = {"CC BY": _("The Attribution License lets others distribute, " - "remix, tweak, and build upon your work, even commercially, " - "as long as they credit you for the original creation. This " - "is the most accommodating of licenses offered. Recommended " - "for maximum dissemination and use of licensed materials."), - "CC BY-SA": _("The Attribution-ShareAlike License lets others remix," - " tweak, and build upon your work even for commercial purposes," - " as long as they credit you and license their new creations " - "under the identical terms. This license is often compared to " - "\"copyleft\" free and open source software licenses. All new " - "works based on yours will carry the same license, so any " - "derivatives will also allow commercial use. This is the " - "license used by Wikipedia, and is recommended for materials " - "that would benefit from incorporating content from Wikipedia " - "and similarly licensed projects."), - "CC BY-ND": _("The Attribution-NoDerivs License allows for redistribution," - " commercial and non-commercial, as long as it is passed along " - "unchanged and in whole, with credit to you."), - "CC BY-NC": _("The Attribution-NonCommercial License lets others remix, " - "tweak, and build upon your work non-commercially, and although " - "their new works must also acknowledge you and be non-commercial, " - "they don't have to license their derivative works on the same terms."), - "CC BY-NC-SA": _("The Attribution-NonCommercial-ShareAlike License lets " - "others remix, tweak, and build upon your work non-commercially, " - "as long as they credit you and license their new creations under " - "the identical terms."), - "CC BY-NC-ND": _("The Attribution-NonCommercial-NoDerivs License is the " - "most restrictive of our six main licenses, only allowing others " - "to download your works and share them with others as long as they " - "credit you, but they can't change them in any way or use them commercially."), - "All Rights Reserved": _("The All Rights Reserved License indicates that " - "the copyright holder reserves, or holds for their own use, all " - "the rights provided by copyright law under one specific copyright treaty."), - "Public Domain": _("Public Domain work has been identified as being free " - "of known restrictions under copyright law, including all related " - "and neighboring rights."), - "Special Permissions": _("Special Permissions is a custom license to use" - " when the current licenses do not apply to the content. The " - "owner of this license is responsible for creating a description " - "of what this license entails."), - } +DESCRIPTION_MAPPING = { + "CC BY": _( + "The Attribution License lets others distribute, " + "remix, tweak, and build upon your work, even commercially, " + "as long as they credit you for the original creation. This " + "is the most accommodating of licenses offered. Recommended " + "for maximum dissemination and use of licensed materials." + ), + "CC BY-SA": _( + "The Attribution-ShareAlike License lets others remix," + " tweak, and build upon your work even for commercial purposes," + " as long as they credit you and license their new creations " + "under the identical terms. This license is often compared to " + '"copyleft" free and open source software licenses. All new ' + "works based on yours will carry the same license, so any " + "derivatives will also allow commercial use. This is the " + "license used by Wikipedia, and is recommended for materials " + "that would benefit from incorporating content from Wikipedia " + "and similarly licensed projects." + ), + "CC BY-ND": _( + "The Attribution-NoDerivs License allows for redistribution," + " commercial and non-commercial, as long as it is passed along " + "unchanged and in whole, with credit to you." + ), + "CC BY-NC": _( + "The Attribution-NonCommercial License lets others remix, " + "tweak, and build upon your work non-commercially, and although " + "their new works must also acknowledge you and be non-commercial, " + "they don't have to license their derivative works on the same terms." + ), + "CC BY-NC-SA": _( + "The Attribution-NonCommercial-ShareAlike License lets " + "others remix, tweak, and build upon your work non-commercially, " + "as long as they credit you and license their new creations under " + "the identical terms." + ), + "CC BY-NC-ND": _( + "The Attribution-NonCommercial-NoDerivs License is the " + "most restrictive of our six main licenses, only allowing others " + "to download your works and share them with others as long as they " + "credit you, but they can't change them in any way or use them commercially." + ), + "All Rights Reserved": _( + "The All Rights Reserved License indicates that " + "the copyright holder reserves, or holds for their own use, all " + "the rights provided by copyright law under one specific copyright treaty." + ), + "Public Domain": _( + "Public Domain work has been identified as being free " + "of known restrictions under copyright law, including all related " + "and neighboring rights." + ), + "Special Permissions": _( + "Special Permissions is a custom license to use" + " when the current licenses do not apply to the content. The " + "owner of this license is responsible for creating a description " + "of what this license entails." + ), +} @register.filter(is_safe=True) @@ -56,7 +75,9 @@ def get_license_url(value): global LICENSE_MAPPING if not LICENSE_MAPPING: - LICENSE_MAPPING = {lic.license_name: lic.license_url for lic in License.objects.all()} + LICENSE_MAPPING = { + lic.license_name: lic.license_url for lic in License.objects.all() + } return LICENSE_MAPPING.get(value) @@ -64,4 +85,7 @@ def get_license_url(value): @register.filter(is_safe=True) @stringfilter def get_license_description(value): - return DESCRIPTION_MAPPING.get(value) or License.objects.get(license_name=value).description + return ( + DESCRIPTION_MAPPING.get(value) + or License.objects.get(license_name=value).description + ) diff --git a/contentcuration/contentcuration/templatetags/perseus_tags.py b/contentcuration/contentcuration/templatetags/perseus_tags.py index 3f3c223aec..24cea74df3 100644 --- a/contentcuration/contentcuration/templatetags/perseus_tags.py +++ b/contentcuration/contentcuration/templatetags/perseus_tags.py @@ -1,4 +1,5 @@ import json + from django import template from django.template.defaultfilters import stringfilter diff --git a/contentcuration/contentcuration/templatetags/translation_tags.py b/contentcuration/contentcuration/templatetags/translation_tags.py index c6a71df2e2..5a1fc51b2c 100644 --- a/contentcuration/contentcuration/templatetags/translation_tags.py +++ b/contentcuration/contentcuration/templatetags/translation_tags.py @@ -9,22 +9,24 @@ @register.simple_tag -def render_bundle_css(bundle_name, config='DEFAULT', attrs=''): +def render_bundle_css(bundle_name, config="DEFAULT", attrs=""): """ A tag to conditionally load css depending on whether the page is being rendered for an LTR or RTL language. Using webpack-rtl-plugin, we now have two css files for every bundle. One that just ends in .css for LTR, and the other that ends in .rtl.css for RTL. This will conditionally load the correct one depending on the current language setting. """ - bidi = get_language_info(get_language())['bidi'] - files = utils.get_files(bundle_name, extension='css', config=config) + bidi = get_language_info(get_language())["bidi"] + files = utils.get_files(bundle_name, extension="css", config=config) if bidi: - files = [x for x in files if x['name'].endswith('rtl.css')] + files = [x for x in files if x["name"].endswith("rtl.css")] else: - files = [x for x in files if not x['name'].endswith('rtl.css')] + files = [x for x in files if not x["name"].endswith("rtl.css")] tags = [] for chunk in files: - tags.append(( - '' - ).format(chunk['url'], attrs)) - return mark_safe('\n'.join(tags)) + tags.append( + ('').format( + chunk["url"], attrs + ) + ) + return mark_safe("\n".join(tags)) diff --git a/contentcuration/contentcuration/tests/db/test_advisory_lock.py b/contentcuration/contentcuration/tests/db/test_advisory_lock.py index 63a3650b46..14009f8392 100644 --- a/contentcuration/contentcuration/tests/db/test_advisory_lock.py +++ b/contentcuration/contentcuration/tests/db/test_advisory_lock.py @@ -8,7 +8,9 @@ from django.db import transaction from django.test.testcases import SimpleTestCase -from django_concurrent_tests.management.commands.concurrent_call_wrapper import use_test_databases +from django_concurrent_tests.management.commands.concurrent_call_wrapper import ( + use_test_databases, +) from mock import mock from mock import patch from pytest import mark @@ -25,35 +27,197 @@ # flake8: noqa -@mark.parametrize("key1, key2, unlock, session, shared, wait, expected_query", [ - # transaction level - (1, None, False, False, False, True, "SELECT pg_advisory_xact_lock(%s) AS lock;"), - (3, None, False, False, True, True, "SELECT pg_advisory_xact_lock_shared(%s) AS lock;"), - (4, None, False, False, True, False, "SELECT pg_try_advisory_xact_lock_shared(%s) AS lock;"), - (5, None, False, False, False, False, "SELECT pg_try_advisory_xact_lock(%s) AS lock;"), - (6, 1, False, False, False, True, "SELECT pg_advisory_xact_lock(%s, %s) AS lock;"), - (7, 2, False, False, True, True, "SELECT pg_advisory_xact_lock_shared(%s, %s) AS lock;"), - (8, 3, False, False, True, False, "SELECT pg_try_advisory_xact_lock_shared(%s, %s) AS lock;"), - (9, 4, False, False, False, False, "SELECT pg_try_advisory_xact_lock(%s, %s) AS lock;"), - - # session level - (10, None, False, True, False, True, "SELECT pg_advisory_lock(%s) AS lock;"), - (11, None, True, True, False, True, "SELECT pg_advisory_unlock(%s) AS lock;"), - (12, None, False, True, True, True, "SELECT pg_advisory_lock_shared(%s) AS lock;"), - (13, None, True, True, True, True, "SELECT pg_advisory_unlock_shared(%s) AS lock;"), - (14, None, False, True, False, False, "SELECT pg_try_advisory_lock(%s) AS lock;"), - (15, None, True, True, False, False, "SELECT pg_try_advisory_unlock(%s) AS lock;"), - (16, None, False, True, True, False, "SELECT pg_try_advisory_lock_shared(%s) AS lock;"), - (17, None, True, True, True, False, "SELECT pg_try_advisory_unlock_shared(%s) AS lock;"), - (18, 1, False, True, False, True, "SELECT pg_advisory_lock(%s, %s) AS lock;"), - (19, 2, True, True, False, True, "SELECT pg_advisory_unlock(%s, %s) AS lock;"), - (20, 3, False, True, True, True, "SELECT pg_advisory_lock_shared(%s, %s) AS lock;"), - (21, 4, True, True, True, True, "SELECT pg_advisory_unlock_shared(%s, %s) AS lock;"), - (22, 5, False, True, False, False, "SELECT pg_try_advisory_lock(%s, %s) AS lock;"), - (23, 6, True, True, False, False, "SELECT pg_try_advisory_unlock(%s, %s) AS lock;"), - (24, 7, False, True, True, False, "SELECT pg_try_advisory_lock_shared(%s, %s) AS lock;"), - (25, 8, True, True, True, False, "SELECT pg_try_advisory_unlock_shared(%s, %s) AS lock;"), -]) +@mark.parametrize( + "key1, key2, unlock, session, shared, wait, expected_query", + [ + # transaction level + ( + 1, + None, + False, + False, + False, + True, + "SELECT pg_advisory_xact_lock(%s) AS lock;", + ), + ( + 3, + None, + False, + False, + True, + True, + "SELECT pg_advisory_xact_lock_shared(%s) AS lock;", + ), + ( + 4, + None, + False, + False, + True, + False, + "SELECT pg_try_advisory_xact_lock_shared(%s) AS lock;", + ), + ( + 5, + None, + False, + False, + False, + False, + "SELECT pg_try_advisory_xact_lock(%s) AS lock;", + ), + ( + 6, + 1, + False, + False, + False, + True, + "SELECT pg_advisory_xact_lock(%s, %s) AS lock;", + ), + ( + 7, + 2, + False, + False, + True, + True, + "SELECT pg_advisory_xact_lock_shared(%s, %s) AS lock;", + ), + ( + 8, + 3, + False, + False, + True, + False, + "SELECT pg_try_advisory_xact_lock_shared(%s, %s) AS lock;", + ), + ( + 9, + 4, + False, + False, + False, + False, + "SELECT pg_try_advisory_xact_lock(%s, %s) AS lock;", + ), + # session level + (10, None, False, True, False, True, "SELECT pg_advisory_lock(%s) AS lock;"), + (11, None, True, True, False, True, "SELECT pg_advisory_unlock(%s) AS lock;"), + ( + 12, + None, + False, + True, + True, + True, + "SELECT pg_advisory_lock_shared(%s) AS lock;", + ), + ( + 13, + None, + True, + True, + True, + True, + "SELECT pg_advisory_unlock_shared(%s) AS lock;", + ), + ( + 14, + None, + False, + True, + False, + False, + "SELECT pg_try_advisory_lock(%s) AS lock;", + ), + ( + 15, + None, + True, + True, + False, + False, + "SELECT pg_try_advisory_unlock(%s) AS lock;", + ), + ( + 16, + None, + False, + True, + True, + False, + "SELECT pg_try_advisory_lock_shared(%s) AS lock;", + ), + ( + 17, + None, + True, + True, + True, + False, + "SELECT pg_try_advisory_unlock_shared(%s) AS lock;", + ), + (18, 1, False, True, False, True, "SELECT pg_advisory_lock(%s, %s) AS lock;"), + (19, 2, True, True, False, True, "SELECT pg_advisory_unlock(%s, %s) AS lock;"), + ( + 20, + 3, + False, + True, + True, + True, + "SELECT pg_advisory_lock_shared(%s, %s) AS lock;", + ), + ( + 21, + 4, + True, + True, + True, + True, + "SELECT pg_advisory_unlock_shared(%s, %s) AS lock;", + ), + ( + 22, + 5, + False, + True, + False, + False, + "SELECT pg_try_advisory_lock(%s, %s) AS lock;", + ), + ( + 23, + 6, + True, + True, + False, + False, + "SELECT pg_try_advisory_unlock(%s, %s) AS lock;", + ), + ( + 24, + 7, + False, + True, + True, + False, + "SELECT pg_try_advisory_lock_shared(%s, %s) AS lock;", + ), + ( + 25, + 8, + True, + True, + True, + False, + "SELECT pg_try_advisory_unlock_shared(%s, %s) AS lock;", + ), + ], +) def test_execute_lock(key1, key2, unlock, session, shared, wait, expected_query): with patch("contentcuration.db.advisory_lock.connection") as conn: cursor = mock.Mock() @@ -61,7 +225,9 @@ def test_execute_lock(key1, key2, unlock, session, shared, wait, expected_query) conn.in_atomic_block.return_value = not session cursor.execute.return_value = True - with execute_lock(key1, key2=key2, unlock=unlock, session=session, shared=shared, wait=wait) as c: + with execute_lock( + key1, key2=key2, unlock=unlock, session=session, shared=shared, wait=wait + ) as c: assert c == cursor expected_params = [key1] @@ -73,22 +239,27 @@ def test_execute_lock(key1, key2, unlock, session, shared, wait, expected_query) assert params == expected_params -@mark.parametrize("unlock, in_atomic_block", [ - (False, False), - (True, False), - (True, True), -]) +@mark.parametrize( + "unlock, in_atomic_block", + [ + (False, False), + (True, False), + (True, True), + ], +) def test_execute_lock__not_implemented(unlock, in_atomic_block): with patch("contentcuration.db.advisory_lock.connection") as conn: conn.in_atomic_block = in_atomic_block with raises(NotImplementedError): - with execute_lock(99, key2=99, unlock=unlock, session=False, shared=False, wait=False): + with execute_lock( + 99, key2=99, unlock=unlock, session=False, shared=False, wait=False + ): pass -START_SIGNAL = 'START_SIGNAL' -END_SIGNAL = 'END_SIGNAL' +START_SIGNAL = "START_SIGNAL" +END_SIGNAL = "END_SIGNAL" SLEEP_SEC = 0.1 @@ -126,6 +297,7 @@ class AdvisoryLockDatabaseTest(SimpleTestCase): """ Test case that creates simultaneous locking situations """ + # this test manages its own transactions allow_database_queries = True diff --git a/contentcuration/contentcuration/tests/helpers.py b/contentcuration/contentcuration/tests/helpers.py index b0eeecb4a3..2635e79f56 100644 --- a/contentcuration/contentcuration/tests/helpers.py +++ b/contentcuration/contentcuration/tests/helpers.py @@ -51,4 +51,5 @@ def mock_class_instance(target): class MockClass(target_cls): def __new__(cls, *args, **kwargs): return mock.Mock(spec_set=cls) + return MockClass() diff --git a/contentcuration/contentcuration/tests/test_assessment_item_keypair_constraint_migration_.py b/contentcuration/contentcuration/tests/test_assessment_item_keypair_constraint_migration_.py index caec5669ae..acfabcffc9 100644 --- a/contentcuration/contentcuration/tests/test_assessment_item_keypair_constraint_migration_.py +++ b/contentcuration/contentcuration/tests/test_assessment_item_keypair_constraint_migration_.py @@ -6,16 +6,15 @@ class TestForwardAssessmentItemKeypairConstraint(StudioTestCase): - def test_prevent_two_identical_keypairs(self): contentnode = cc.ContentNode.objects.create(kind_id=exercise(), extra_fields={}) contentnode.save() - item1 = cc.AssessmentItem.objects.create(assessment_id='abc') + item1 = cc.AssessmentItem.objects.create(assessment_id="abc") item1.contentnode = contentnode item1.save() - item2 = cc.AssessmentItem.objects.create(assessment_id='abc') + item2 = cc.AssessmentItem.objects.create(assessment_id="abc") item2.contentnode = contentnode with pytest.raises(Exception) as execinfo: item2.save() - assert 'duplicate key value violates unique constraint' in str(execinfo.value) + assert "duplicate key value violates unique constraint" in str(execinfo.value) diff --git a/contentcuration/contentcuration/tests/test_asynctask.py b/contentcuration/contentcuration/tests/test_asynctask.py index 9521fc6adf..79b239099b 100644 --- a/contentcuration/contentcuration/tests/test_asynctask.py +++ b/contentcuration/contentcuration/tests/test_asynctask.py @@ -95,11 +95,14 @@ def _celery_task_worker(): # clear the "fixups" which would mess up the connection to the DB app.fixups = [] app._fixups = [] - app.worker_main(argv=[ - "worker", - "--task-events", - "--concurrency", "1", - ]) + app.worker_main( + argv=[ + "worker", + "--task-events", + "--concurrency", + "1", + ] + ) def _return_celery_task_object(task_id): @@ -114,6 +117,7 @@ class AsyncTaskTestCase(TransactionTestCase): This MUST use `serialized_rollback` due to DB transaction isolation interactions between the pytest framework and running the Celery worker in another thread """ + serialized_rollback = True @classmethod @@ -166,7 +170,9 @@ def test_asynctask_reports_success(self): self.assertEqual(celery_task_result.task_name, "test_task") self.assertEqual(async_result.status, states.SUCCESS) self.assertEqual(TaskResult.objects.get(task_id=async_result.id).result, "42") - self.assertEqual(TaskResult.objects.get(task_id=async_result.id).status, states.SUCCESS) + self.assertEqual( + TaskResult.objects.get(task_id=async_result.id).status, states.SUCCESS + ) def test_asynctask_reports_error(self): """ @@ -196,7 +202,9 @@ def test_only_create_async_task_creates_task_entry(self): async_result = plain_test_task.apply() result = self._wait_for(async_result) self.assertEquals(result, 42) - self.assertEquals(TaskResult.objects.filter(task_id=async_result.task_id).count(), 0) + self.assertEquals( + TaskResult.objects.filter(task_id=async_result.task_id).count(), 0 + ) @pytest.mark.skip(reason="This test is flaky on Github Actions") def test_fetch_or_enqueue_task(self): @@ -264,4 +272,4 @@ def test_revoke_task(self): try: TaskResult.objects.get(task_id=async_result.task_id, status=states.REVOKED) except TaskResult.DoesNotExist: - self.fail('Missing revoked task result') + self.fail("Missing revoked task result") diff --git a/contentcuration/contentcuration/tests/test_channel_model.py b/contentcuration/contentcuration/tests/test_channel_model.py index 1ea51a2bd6..fd65c5753d 100755 --- a/contentcuration/contentcuration/tests/test_channel_model.py +++ b/contentcuration/contentcuration/tests/test_channel_model.py @@ -157,7 +157,9 @@ def setUp(self): super(GetAllChannelsTestCase, self).setUp() # create 10 channels for comparison - self.channels = [Channel.objects.create(actor_id=self.admin_user.id) for _ in range(10)] + self.channels = [ + Channel.objects.create(actor_id=self.admin_user.id) for _ in range(10) + ] def test_returns_all_channels_in_the_db(self): """ @@ -176,7 +178,9 @@ class ChannelSetTestCase(BaseAPITestCase): def setUp(self): super(ChannelSetTestCase, self).setUp() self.channelset = mixer.blend(ChannelSet, editors=[self.user]) - self.channels = [Channel.objects.create(actor_id=self.user.id) for _ in range(10)] + self.channels = [ + Channel.objects.create(actor_id=self.user.id) for _ in range(10) + ] for chann in self.channels: chann.secret_tokens.add(self.channelset.secret_token) chann.editors.add(self.user) @@ -272,7 +276,9 @@ class ChannelMetadataSaveTestCase(StudioTestCase): def setUp(self): super(ChannelMetadataSaveTestCase, self).setUp() - self.channels = [Channel.objects.create(actor_id=self.admin_user.id) for _ in range(5)] + self.channels = [ + Channel.objects.create(actor_id=self.admin_user.id) for _ in range(5) + ] for c in self.channels: c.main_tree.changed = False c.main_tree.save() diff --git a/contentcuration/contentcuration/tests/test_chef_pipeline.py b/contentcuration/contentcuration/tests/test_chef_pipeline.py index 51ee683209..26f98dcb67 100644 --- a/contentcuration/contentcuration/tests/test_chef_pipeline.py +++ b/contentcuration/contentcuration/tests/test_chef_pipeline.py @@ -193,7 +193,9 @@ def test_add_node_with_tags(self): node_data = node_json( {"kind": "video", "license": cc.License.objects.all()[0].license_name} ) - unique_title = "This is a title that we can almost certainly find uniquely later" + unique_title = ( + "This is a title that we can almost certainly find uniquely later" + ) node_data["tags"] = ["test"] node_data["title"] = unique_title response = self.post( diff --git a/contentcuration/contentcuration/tests/test_completion_criteria.py b/contentcuration/contentcuration/tests/test_completion_criteria.py index 6af5cdcfa6..a0daec10d7 100644 --- a/contentcuration/contentcuration/tests/test_completion_criteria.py +++ b/contentcuration/contentcuration/tests/test_completion_criteria.py @@ -15,15 +15,28 @@ def test_validate__success__empty(self): validate({}) def test_validate__fail__model(self): - with self.assertRaisesRegex(ValidationError, "model 'does not exist' is not one of"): + with self.assertRaisesRegex( + ValidationError, "model 'does not exist' is not one of" + ): validate({"model": "does not exist"}) def test_validate__fail__threshold(self): - with self.assertRaisesRegex(ValidationError, "object doesn't satisfy 'anyOf' conditions"): + with self.assertRaisesRegex( + ValidationError, "object doesn't satisfy 'anyOf' conditions" + ): validate({"model": completion_criteria.PAGES, "threshold": "not a number"}) def test_validate__content_kind(self): with self.assertRaisesRegex(ValidationError, "is invalid for content kind"): - validate({"model": completion_criteria.PAGES, "threshold": 1}, kind=content_kinds.EXERCISE) + validate( + {"model": completion_criteria.PAGES, "threshold": 1}, + kind=content_kinds.EXERCISE, + ) with self.assertRaisesRegex(ValidationError, "is invalid for content kind"): - validate({"model": completion_criteria.MASTERY, "threshold": {"mastery_model": mastery_criteria.DO_ALL}}, kind=content_kinds.DOCUMENT) + validate( + { + "model": completion_criteria.MASTERY, + "threshold": {"mastery_model": mastery_criteria.DO_ALL}, + }, + kind=content_kinds.DOCUMENT, + ) diff --git a/contentcuration/contentcuration/tests/test_contentnodes.py b/contentcuration/contentcuration/tests/test_contentnodes.py index 383861a2c3..420ff69b2b 100644 --- a/contentcuration/contentcuration/tests/test_contentnodes.py +++ b/contentcuration/contentcuration/tests/test_contentnodes.py @@ -180,12 +180,26 @@ def test_get_node_details(self): # assert format of list fields, including that they do not contain invalid data list_fields = [ - "kind_count", "languages", "accessible_languages", "licenses", "tags", "original_channels", - "authors", "aggregators", "providers", "copyright_holders" + "kind_count", + "languages", + "accessible_languages", + "licenses", + "tags", + "original_channels", + "authors", + "aggregators", + "providers", + "copyright_holders", ] for field in list_fields: - self.assertIsInstance(details.get(field), list, f"Field '{field}' isn't a list") - self.assertEqual(len(details[field]), len([value for value in details[field] if value]), f"List field '{field}' has falsy values") + self.assertIsInstance( + details.get(field), list, f"Field '{field}' isn't a list" + ) + self.assertEqual( + len(details[field]), + len([value for value in details[field] if value]), + f"List field '{field}' has falsy values", + ) class NodeOperationsTestCase(StudioTestCase): @@ -824,7 +838,9 @@ def test_resync_after_more_subs_added(self): def _create_video_node(self, title, parent, withsubs=False): data = dict( - kind_id="video", title=title, node_id="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + kind_id="video", + title=title, + node_id="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", ) video_node = testdata.node(data, parent=parent) @@ -869,7 +885,9 @@ def _setup_original_and_deriative_nodes(self): # Setup derivative channel self.new_channel = Channel.objects.create( - name="derivative of teschannel", source_id="lkajs", actor_id=self.admin_user.id + name="derivative of teschannel", + source_id="lkajs", + actor_id=self.admin_user.id, ) self.new_channel.save() self.new_channel.main_tree = self._create_empty_tree() @@ -939,7 +957,7 @@ class NodeCompletionTestCase(StudioTestCase): }, "model": completion_criteria.MASTERY, } - } + }, } def setUp(self): @@ -960,30 +978,52 @@ def test_create_topic_set_complete_parent_no_title(self): def test_create_topic_set_complete_parent_title(self): channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.TOPIC, parent=channel.main_tree) + new_obj = ContentNode( + title="yes", kind_id=content_kinds.TOPIC, parent=channel.main_tree + ) new_obj.save() new_obj.mark_complete() self.assertTrue(new_obj.complete) def test_create_video_set_complete_no_license(self): channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.VIDEO, parent=channel.main_tree) + new_obj = ContentNode( + title="yes", kind_id=content_kinds.VIDEO, parent=channel.main_tree + ) new_obj.save() - File.objects.create(contentnode=new_obj, preset_id=format_presets.VIDEO_HIGH_RES, checksum=uuid.uuid4().hex) + File.objects.create( + contentnode=new_obj, + preset_id=format_presets.VIDEO_HIGH_RES, + checksum=uuid.uuid4().hex, + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_video_set_complete_custom_license_no_description(self): - custom_licenses = list(License.objects.filter(is_custom=True).values_list("pk", flat=True)) + custom_licenses = list( + License.objects.filter(is_custom=True).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.VIDEO, parent=channel.main_tree, license_id=custom_licenses[0], copyright_holder="Some person") + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.VIDEO, + parent=channel.main_tree, + license_id=custom_licenses[0], + copyright_holder="Some person", + ) new_obj.save() - File.objects.create(contentnode=new_obj, preset_id=format_presets.VIDEO_HIGH_RES, checksum=uuid.uuid4().hex) + File.objects.create( + contentnode=new_obj, + preset_id=format_presets.VIDEO_HIGH_RES, + checksum=uuid.uuid4().hex, + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_video_set_complete_custom_license_with_description(self): - custom_licenses = list(License.objects.filter(is_custom=True).values_list("pk", flat=True)) + custom_licenses = list( + License.objects.filter(is_custom=True).values_list("pk", flat=True) + ) channel = testdata.channel() new_obj = ContentNode( title="yes", @@ -991,50 +1031,109 @@ def test_create_video_set_complete_custom_license_with_description(self): parent=channel.main_tree, license_id=custom_licenses[0], license_description="don't do this!", - copyright_holder="Some person" + copyright_holder="Some person", ) new_obj.save() - File.objects.create(contentnode=new_obj, preset_id=format_presets.VIDEO_HIGH_RES, checksum=uuid.uuid4().hex) + File.objects.create( + contentnode=new_obj, + preset_id=format_presets.VIDEO_HIGH_RES, + checksum=uuid.uuid4().hex, + ) new_obj.mark_complete() self.assertTrue(new_obj.complete) - def test_create_video_set_complete_copyright_holder_required_no_copyright_holder(self): - required_holder = list(License.objects.filter(copyright_holder_required=True, is_custom=False).values_list("pk", flat=True)) + def test_create_video_set_complete_copyright_holder_required_no_copyright_holder( + self, + ): + required_holder = list( + License.objects.filter( + copyright_holder_required=True, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.VIDEO, parent=channel.main_tree, license_id=required_holder[0]) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.VIDEO, + parent=channel.main_tree, + license_id=required_holder[0], + ) new_obj.save() - File.objects.create(contentnode=new_obj, preset_id=format_presets.VIDEO_HIGH_RES, checksum=uuid.uuid4().hex) + File.objects.create( + contentnode=new_obj, + preset_id=format_presets.VIDEO_HIGH_RES, + checksum=uuid.uuid4().hex, + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_video_set_complete_copyright_holder_required_copyright_holder(self): - required_holder = list(License.objects.filter(copyright_holder_required=True, is_custom=False).values_list("pk", flat=True)) + required_holder = list( + License.objects.filter( + copyright_holder_required=True, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.VIDEO, parent=channel.main_tree, license_id=required_holder[0], copyright_holder="Some person") + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.VIDEO, + parent=channel.main_tree, + license_id=required_holder[0], + copyright_holder="Some person", + ) new_obj.save() - File.objects.create(contentnode=new_obj, preset_id=format_presets.VIDEO_HIGH_RES, checksum=uuid.uuid4().hex) + File.objects.create( + contentnode=new_obj, + preset_id=format_presets.VIDEO_HIGH_RES, + checksum=uuid.uuid4().hex, + ) new_obj.mark_complete() self.assertTrue(new_obj.complete) def test_create_video_no_files(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.VIDEO, parent=channel.main_tree, license_id=licenses[0]) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.VIDEO, + parent=channel.main_tree, + license_id=licenses[0], + ) new_obj.save() new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_video_thumbnail_only(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.VIDEO, parent=channel.main_tree, license_id=licenses[0]) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.VIDEO, + parent=channel.main_tree, + license_id=licenses[0], + ) new_obj.save() - File.objects.create(contentnode=new_obj, preset_id=format_presets.VIDEO_THUMBNAIL, checksum=uuid.uuid4().hex) + File.objects.create( + contentnode=new_obj, + preset_id=format_presets.VIDEO_THUMBNAIL, + checksum=uuid.uuid4().hex, + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_video_invalid_completion_criterion(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() new_obj = ContentNode( title="yes", @@ -1052,121 +1151,252 @@ def test_create_video_invalid_completion_criterion(self): }, "model": completion_criteria.MASTERY, } - } + }, }, ) new_obj.save() - File.objects.create(contentnode=new_obj, preset_id=format_presets.VIDEO_HIGH_RES, checksum=uuid.uuid4().hex) + File.objects.create( + contentnode=new_obj, + preset_id=format_presets.VIDEO_HIGH_RES, + checksum=uuid.uuid4().hex, + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_exercise_no_assessment_items(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields=self.new_extra_fields) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.new_extra_fields, + ) new_obj.save() new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_exercise_invalid_assessment_item_no_question(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields=self.new_extra_fields) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.new_extra_fields, + ) new_obj.save() - AssessmentItem.objects.create(contentnode=new_obj, answers="[{\"correct\": true, \"text\": \"answer\"}]") + AssessmentItem.objects.create( + contentnode=new_obj, answers='[{"correct": true, "text": "answer"}]' + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_exercise_invalid_assessment_item_no_answers(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields=self.new_extra_fields) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.new_extra_fields, + ) new_obj.save() - AssessmentItem.objects.create(contentnode=new_obj, question="This is a question") + AssessmentItem.objects.create( + contentnode=new_obj, question="This is a question" + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_exercise_invalid_assessment_item_no_correct_answers(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields=self.new_extra_fields) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.new_extra_fields, + ) new_obj.save() - AssessmentItem.objects.create(contentnode=new_obj, question="This is a question", answers="[{\"correct\": false, \"text\": \"answer\"}]") + AssessmentItem.objects.create( + contentnode=new_obj, + question="This is a question", + answers='[{"correct": false, "text": "answer"}]', + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_exercise_valid_assessment_item_no_correct_answers_input(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields=self.new_extra_fields) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.new_extra_fields, + ) new_obj.save() AssessmentItem.objects.create( contentnode=new_obj, question="This is a question", - answers="[{\"correct\": false, \"text\": \"answer\"}]", - type=exercises.INPUT_QUESTION + answers='[{"correct": false, "text": "answer"}]', + type=exercises.INPUT_QUESTION, ) new_obj.mark_complete() self.assertTrue(new_obj.complete) def test_create_exercise_valid_assessment_items(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields=self.new_extra_fields) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.new_extra_fields, + ) new_obj.save() - AssessmentItem.objects.create(contentnode=new_obj, question="This is a question", answers="[{\"correct\": true, \"text\": \"answer\"}]") + AssessmentItem.objects.create( + contentnode=new_obj, + question="This is a question", + answers='[{"correct": true, "text": "answer"}]', + ) new_obj.mark_complete() self.assertTrue(new_obj.complete) def test_create_exercise_valid_assessment_items_raw_data(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields=self.new_extra_fields) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.new_extra_fields, + ) new_obj.save() - AssessmentItem.objects.create(contentnode=new_obj, raw_data="{\"question\": {}}") + AssessmentItem.objects.create(contentnode=new_obj, raw_data='{"question": {}}') new_obj.mark_complete() self.assertTrue(new_obj.complete) def test_create_exercise_no_extra_fields(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0]) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + ) new_obj.save() - AssessmentItem.objects.create(contentnode=new_obj, question="This is a question", answers="[{\"correct\": true, \"text\": \"answer\"}]") + AssessmentItem.objects.create( + contentnode=new_obj, + question="This is a question", + answers='[{"correct": true, "text": "answer"}]', + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_exercise_old_extra_fields(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields=self.old_extra_fields) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields=self.old_extra_fields, + ) new_obj.save() - AssessmentItem.objects.create(contentnode=new_obj, question="This is a question", answers="[{\"correct\": true, \"text\": \"answer\"}]") + AssessmentItem.objects.create( + contentnode=new_obj, + question="This is a question", + answers='[{"correct": true, "text": "answer"}]', + ) new_obj.mark_complete() self.assertTrue(new_obj.complete) def test_create_exercise_bad_new_extra_fields(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() - new_obj = ContentNode(title="yes", kind_id=content_kinds.EXERCISE, parent=channel.main_tree, license_id=licenses[0], extra_fields={ - "randomize": False, - "options": { - "completion_criteria": { - "threshold": { - "mastery_model": exercises.M_OF_N, - "n": 5, - }, - "model": completion_criteria.MASTERY, - } - } - }) + new_obj = ContentNode( + title="yes", + kind_id=content_kinds.EXERCISE, + parent=channel.main_tree, + license_id=licenses[0], + extra_fields={ + "randomize": False, + "options": { + "completion_criteria": { + "threshold": { + "mastery_model": exercises.M_OF_N, + "n": 5, + }, + "model": completion_criteria.MASTERY, + } + }, + }, + ) new_obj.save() - AssessmentItem.objects.create(contentnode=new_obj, question="This is a question", answers="[{\"correct\": true, \"text\": \"answer\"}]") + AssessmentItem.objects.create( + contentnode=new_obj, + question="This is a question", + answers='[{"correct": true, "text": "answer"}]', + ) new_obj.mark_complete() self.assertFalse(new_obj.complete) def test_create_video_null_extra_fields(self): - licenses = list(License.objects.filter(copyright_holder_required=False, is_custom=False).values_list("pk", flat=True)) + licenses = list( + License.objects.filter( + copyright_holder_required=False, is_custom=False + ).values_list("pk", flat=True) + ) channel = testdata.channel() new_obj = ContentNode( title="yes", @@ -1177,7 +1407,11 @@ def test_create_video_null_extra_fields(self): extra_fields=None, ) new_obj.save() - File.objects.create(contentnode=new_obj, preset_id=format_presets.VIDEO_HIGH_RES, checksum=uuid.uuid4().hex) + File.objects.create( + contentnode=new_obj, + preset_id=format_presets.VIDEO_HIGH_RES, + checksum=uuid.uuid4().hex, + ) try: new_obj.mark_complete() except AttributeError: diff --git a/contentcuration/contentcuration/tests/test_createchannel.py b/contentcuration/contentcuration/tests/test_createchannel.py index 06d7f3c393..ec28381e6b 100644 --- a/contentcuration/contentcuration/tests/test_createchannel.py +++ b/contentcuration/contentcuration/tests/test_createchannel.py @@ -57,11 +57,17 @@ def setUp(self): super(CreateChannelTestCase, self).setUpBase() self.topic = models.ContentKind.objects.get(kind="topic") self.license = models.License.objects.all()[0] - self.fileinfo_audio = create_studio_file("abc", preset='audio', ext='mp3') - self.fileinfo_video = create_studio_file("def", preset='high_res_video', ext='mp4') - self.fileinfo_video_webm = create_studio_file("ghi", preset='high_res_video', ext='webm') - self.fileinfo_document = create_studio_file("jkl", preset='document', ext='pdf') - self.fileinfo_exercise = create_studio_file("mno", preset='exercise', ext='perseus') + self.fileinfo_audio = create_studio_file("abc", preset="audio", ext="mp3") + self.fileinfo_video = create_studio_file( + "def", preset="high_res_video", ext="mp4" + ) + self.fileinfo_video_webm = create_studio_file( + "ghi", preset="high_res_video", ext="webm" + ) + self.fileinfo_document = create_studio_file("jkl", preset="document", ext="pdf") + self.fileinfo_exercise = create_studio_file( + "mno", preset="exercise", ext="perseus" + ) def create_channel(self): create_channel_url = str(reverse_lazy("api_create_channel")) diff --git a/contentcuration/contentcuration/tests/test_decorators.py b/contentcuration/contentcuration/tests/test_decorators.py index 2c795716d7..e1a6ded135 100644 --- a/contentcuration/contentcuration/tests/test_decorators.py +++ b/contentcuration/contentcuration/tests/test_decorators.py @@ -20,4 +20,6 @@ def do_test(): mock_task.fetch_or_enqueue.assert_not_called() do_test() - mock_task.fetch_or_enqueue.assert_called_once_with(self.user, user_id=self.user.id) + mock_task.fetch_or_enqueue.assert_called_once_with( + self.user, user_id=self.user.id + ) diff --git a/contentcuration/contentcuration/tests/test_exportchannel.py b/contentcuration/contentcuration/tests/test_exportchannel.py index f4143b53b1..57599c0942 100644 --- a/contentcuration/contentcuration/tests/test_exportchannel.py +++ b/contentcuration/contentcuration/tests/test_exportchannel.py @@ -25,11 +25,12 @@ from .base import StudioTestCase from .helpers import clear_tasks -from .testdata import channel, tree +from .testdata import channel from .testdata import create_studio_file from .testdata import node as create_node from .testdata import slideshow from .testdata import thumbnail_bytes +from .testdata import tree from contentcuration import models as cc from contentcuration.models import CustomTaskMetadata from contentcuration.utils.celery.tasks import generate_task_signature @@ -53,11 +54,10 @@ def description(): class ExportChannelTestCase(StudioTestCase): - @classmethod def setUpClass(cls): super(ExportChannelTestCase, cls).setUpClass() - cls.patch_copy_db = patch('contentcuration.utils.publish.save_export_database') + cls.patch_copy_db = patch("contentcuration.utils.publish.save_export_database") cls.patch_copy_db.start() @classmethod @@ -74,39 +74,57 @@ def setUp(self): self.content_channel.save() # Add some incomplete nodes to ensure they don't get published. - new_node = create_node({'kind_id': 'topic', 'title': 'Incomplete topic', 'children': []}) + new_node = create_node( + {"kind_id": "topic", "title": "Incomplete topic", "children": []} + ) new_node.complete = False new_node.parent = self.content_channel.main_tree new_node.save() - new_video = create_node({'kind_id': 'video', 'title': 'Incomplete video', 'children': []}) + new_video = create_node( + {"kind_id": "video", "title": "Incomplete video", "children": []} + ) new_video.complete = False new_video.parent = new_node new_video.save() # Add a complete node within an incomplete node to ensure it's excluded. - new_video = create_node({'kind_id': 'video', 'title': 'Complete video', 'children': []}) + new_video = create_node( + {"kind_id": "video", "title": "Complete video", "children": []} + ) new_video.complete = True new_video.parent = new_node new_video.save() # Add a node with tags greater than 30 chars to ensure they get excluded. - new_video = create_node({'kind_id': 'video', 'tags': [{'tag_name': 'kolbasdasdasrissadasdwzxcztudio'}, {'tag_name': 'kolbasdasdasrissadasdwzxcztudi'}, - {'tag_name': 'kolbasdasdasrissadasdwzxc'}], 'title': 'kolibri tag test', 'children': []}) + new_video = create_node( + { + "kind_id": "video", + "tags": [ + {"tag_name": "kolbasdasdasrissadasdwzxcztudio"}, + {"tag_name": "kolbasdasdasrissadasdwzxcztudi"}, + {"tag_name": "kolbasdasdasrissadasdwzxc"}, + ], + "title": "kolibri tag test", + "children": [], + } + ) new_video.complete = True new_video.parent = self.content_channel.main_tree new_video.save() # Add a node to test completion criteria. extra_fields = { - "options": { - "completion_criteria": { - "model": "time", - "threshold": 20 - } - } + "options": {"completion_criteria": {"model": "time", "threshold": 20}} } - new_video = create_node({'kind_id': 'video', 'title': 'Completion criteria test', 'extra_fields': extra_fields, 'children': []}) + new_video = create_node( + { + "kind_id": "video", + "title": "Completion criteria test", + "extra_fields": extra_fields, + "children": [], + } + ) new_video.complete = True new_video.parent = self.content_channel.main_tree new_video.save() @@ -120,29 +138,41 @@ def setUp(self): "m": 1, "n": 2, "mastery_model": exercises.M_OF_N, - } + }, } } } current_exercise = cc.ContentNode.objects.filter(kind_id="exercise").first() - new_exercise = create_node({'kind_id': 'exercise', 'title': 'Mastery test', 'extra_fields': extra_fields}) + new_exercise = create_node( + { + "kind_id": "exercise", + "title": "Mastery test", + "extra_fields": extra_fields, + } + ) new_exercise.complete = True new_exercise.parent = current_exercise.parent new_exercise.save() - bad_container = create_node({'kind_id': 'topic', 'title': 'Bad topic container', 'children': []}) + bad_container = create_node( + {"kind_id": "topic", "title": "Bad topic container", "children": []} + ) bad_container.complete = True bad_container.parent = self.content_channel.main_tree bad_container.save() # exercise without mastery model, but marked as complete - broken_exercise = create_node({'kind_id': 'exercise', 'title': 'Bad mastery test', 'extra_fields': {}}) + broken_exercise = create_node( + {"kind_id": "exercise", "title": "Bad mastery test", "extra_fields": {}} + ) broken_exercise.complete = True broken_exercise.parent = bad_container broken_exercise.save() - thumbnail_data = create_studio_file(thumbnail_bytes, preset="exercise_thumbnail", ext="png") + thumbnail_data = create_studio_file( + thumbnail_bytes, preset="exercise_thumbnail", ext="png" + ) file_obj = thumbnail_data["db_file"] file_obj.contentnode = new_exercise file_obj.save() @@ -152,17 +182,25 @@ def setUp(self): ai.save() legacy_extra_fields = { - 'mastery_model': exercises.M_OF_N, - 'randomize': True, - 'm': 1, - 'n': 2 + "mastery_model": exercises.M_OF_N, + "randomize": True, + "m": 1, + "n": 2, } - legacy_exercise = create_node({'kind_id': 'exercise', 'title': 'Legacy Mastery test', 'extra_fields': legacy_extra_fields}) + legacy_exercise = create_node( + { + "kind_id": "exercise", + "title": "Legacy Mastery test", + "extra_fields": legacy_extra_fields, + } + ) legacy_exercise.complete = True legacy_exercise.parent = current_exercise.parent legacy_exercise.save() - thumbnail_data = create_studio_file(thumbnail_bytes, preset="exercise_thumbnail", ext="png") + thumbnail_data = create_studio_file( + thumbnail_bytes, preset="exercise_thumbnail", ext="png" + ) file_obj = thumbnail_data["db_file"] file_obj.contentnode = legacy_exercise file_obj.save() @@ -174,12 +212,12 @@ def setUp(self): first_topic = self.content_channel.main_tree.get_descendants().first() # Add a publishable topic to ensure it does not inherit but that its children do - new_node = create_node({'kind_id': 'topic', 'title': 'Disinherited topic'}) + new_node = create_node({"kind_id": "topic", "title": "Disinherited topic"}) new_node.complete = True new_node.parent = first_topic new_node.save() - new_video = create_node({'kind_id': 'video', 'title': 'Inheriting video'}) + new_video = create_node({"kind_id": "video", "title": "Inheriting video"}) new_video.complete = True new_video.parent = new_node new_video.save() @@ -223,7 +261,9 @@ def setUp(self): first_topic_first_child.save() set_channel_icon_encoding(self.content_channel) - self.tempdb = create_content_database(self.content_channel, True, self.admin_user.id, True) + self.tempdb = create_content_database( + self.content_channel, True, self.admin_user.id, True + ) set_active_content_database(self.tempdb) @@ -249,7 +289,9 @@ def test_contentnode_license_data(self): for node in nodes: if node.license: self.assertEqual(node.license_name, node.license.license_name) - self.assertEqual(node.license_description, node.license.license_description) + self.assertEqual( + node.license_description, node.license.license_description + ) def test_contentnode_incomplete_not_published(self): kolibri_nodes = kolibri_models.ContentNode.objects.all() @@ -272,10 +314,15 @@ def test_contentnode_incomplete_not_published(self): assert kolibri_nodes.filter(pk=node.node_id).count() == 0 # bad exercise node should not be published (technically incomplete) - assert kolibri_models.ContentNode.objects.filter(title='Bad mastery test').count() == 0 + assert ( + kolibri_models.ContentNode.objects.filter(title="Bad mastery test").count() + == 0 + ) def test_tags_greater_than_30_excluded(self): - tag_node = kolibri_models.ContentNode.objects.filter(title='kolibri tag test').first() + tag_node = kolibri_models.ContentNode.objects.filter( + title="kolibri tag test" + ).first() published_tags = tag_node.tags.all() assert published_tags.count() == 2 @@ -283,19 +330,25 @@ def test_tags_greater_than_30_excluded(self): assert len(t.tag_name) <= 30 def test_duration_override_on_completion_criteria_time(self): - completion_criteria_node = kolibri_models.ContentNode.objects.filter(title='Completion criteria test').first() - non_completion_criteria_node = kolibri_models.ContentNode.objects.filter(title='kolibri tag test').first() + completion_criteria_node = kolibri_models.ContentNode.objects.filter( + title="Completion criteria test" + ).first() + non_completion_criteria_node = kolibri_models.ContentNode.objects.filter( + title="kolibri tag test" + ).first() assert completion_criteria_node.duration == 20 assert non_completion_criteria_node.duration == 100 def test_completion_criteria_set(self): - completion_criteria_node = kolibri_models.ContentNode.objects.filter(title='Completion criteria test').first() + completion_criteria_node = kolibri_models.ContentNode.objects.filter( + title="Completion criteria test" + ).first() - self.assertEqual(completion_criteria_node.options["completion_criteria"], { - "model": "time", - "threshold": 20 - }) + self.assertEqual( + completion_criteria_node.options["completion_criteria"], + {"model": "time", "threshold": 20}, + ) def test_contentnode_channel_id_data(self): channel = kolibri_models.ChannelMetadata.objects.first() @@ -313,31 +366,39 @@ def test_contentnode_file_checksum_data(self): def test_contentnode_file_extension_data(self): files = kolibri_models.File.objects.all() assert files.count() > 0 - for file in files.prefetch_related('local_file'): + for file in files.prefetch_related("local_file"): self.assertEqual(file.extension, file.local_file.extension) def test_contentnode_file_size_data(self): files = kolibri_models.File.objects.all() assert files.count() > 0 - for file in files.prefetch_related('local_file'): + for file in files.prefetch_related("local_file"): self.assertEqual(file.file_size, file.local_file.file_size) def test_channel_icon_encoding(self): self.assertIsNotNone(self.content_channel.icon_encoding) def test_assessment_metadata(self): - for i, exercise in enumerate(kolibri_models.ContentNode.objects.filter(kind="exercise")): + for i, exercise in enumerate( + kolibri_models.ContentNode.objects.filter(kind="exercise") + ): asm = exercise.assessmentmetadata.first() self.assertTrue(isinstance(asm.assessment_item_ids, list)) mastery = asm.mastery_model self.assertTrue(isinstance(mastery, dict)) - self.assertEqual(mastery["type"], exercises.DO_ALL if i == 0 else exercises.M_OF_N) + self.assertEqual( + mastery["type"], exercises.DO_ALL if i == 0 else exercises.M_OF_N + ) self.assertEqual(mastery["m"], 3 if i == 0 else 1) self.assertEqual(mastery["n"], 3 if i == 0 else 2) def test_inherited_language(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - for child in kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id)[1:]: + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + for child in kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + )[1:]: if child.kind == "topic": self.assertIsNone(child.lang_id) self.assertEqual(child.children.first().lang_id, "fr") @@ -345,108 +406,164 @@ def test_inherited_language(self): self.assertEqual(child.lang_id, "fr") def test_inherited_language_no_overwrite(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - first_child = kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id).first() + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + first_child = kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + ).first() self.assertEqual(first_child.lang_id, "sw") def test_inherited_category(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - for child in kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id)[1:]: + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + for child in kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + )[1:]: if child.kind == "topic": self.assertIsNone(child.categories) - self.assertEqual(child.children.first().categories, subjects.MATHEMATICS) + self.assertEqual( + child.children.first().categories, subjects.MATHEMATICS + ) else: self.assertEqual(child.categories, subjects.MATHEMATICS) def test_inherited_category_no_overwrite(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - first_child = kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id).first() + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + first_child = kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + ).first() self.assertEqual(first_child.categories, subjects.ALGEBRA) def test_inherited_needs(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - for child in kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id)[1:]: + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + for child in kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + )[1:]: if child.kind == "topic": self.assertIsNone(child.learner_needs) - self.assertEqual(child.children.first().learner_needs, needs.PRIOR_KNOWLEDGE) + self.assertEqual( + child.children.first().learner_needs, needs.PRIOR_KNOWLEDGE + ) else: self.assertEqual(child.learner_needs, needs.PRIOR_KNOWLEDGE) def test_inherited_needs_no_overwrite(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - first_child = kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id).first() + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + first_child = kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + ).first() self.assertEqual(first_child.learner_needs, needs.FOR_BEGINNERS) def test_topics_no_accessibility_label(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) topic = kolibri_models.ContentNode.objects.get(id=first_topic_node_id) self.assertIsNone(topic.accessibility_labels) def test_child_no_inherit_accessibility_label(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - first_child = kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id).first() + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + first_child = kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + ).first() # Should only be the learning activities we set on the child directly, not any parent ones. - self.assertEqual(first_child.accessibility_labels, accessibility_categories.CAPTIONS_SUBTITLES) + self.assertEqual( + first_child.accessibility_labels, + accessibility_categories.CAPTIONS_SUBTITLES, + ) def test_inherited_grade_levels(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - for child in kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id): + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + for child in kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + ): if child.kind == "topic": self.assertIsNone(child.grade_levels) - self.assertEqual(child.children.first().grade_levels, levels.LOWER_SECONDARY) + self.assertEqual( + child.children.first().grade_levels, levels.LOWER_SECONDARY + ) else: self.assertEqual(child.grade_levels, levels.LOWER_SECONDARY) def test_inherited_resource_types(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - for child in kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id): + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + for child in kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + ): if child.kind == "topic": self.assertIsNone(child.resource_types) - self.assertEqual(child.children.first().resource_types, resource_type.LESSON_PLAN) + self.assertEqual( + child.children.first().resource_types, resource_type.LESSON_PLAN + ) else: self.assertEqual(child.resource_types, resource_type.LESSON_PLAN) def test_topics_no_learning_activity(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) topic = kolibri_models.ContentNode.objects.get(id=first_topic_node_id) self.assertIsNone(topic.learning_activities) def test_child_no_inherit_learning_activity(self): - first_topic_node_id = self.content_channel.main_tree.get_descendants().first().node_id - first_child = kolibri_models.ContentNode.objects.filter(parent_id=first_topic_node_id).first() + first_topic_node_id = ( + self.content_channel.main_tree.get_descendants().first().node_id + ) + first_child = kolibri_models.ContentNode.objects.filter( + parent_id=first_topic_node_id + ).first() # Should only be the learning activities we set on the child directly, not any parent ones. self.assertEqual(first_child.learning_activities, learning_activities.LISTEN) def test_publish_no_modify_exercise_extra_fields(self): exercise = cc.ContentNode.objects.get(title="Mastery test") - self.assertEqual(exercise.extra_fields["options"]["completion_criteria"]["threshold"], { - "m": 1, - "n": 2, - "mastery_model": exercises.M_OF_N, - }) - published_exercise = kolibri_models.ContentNode.objects.get(title="Mastery test") - self.assertEqual(published_exercise.options["completion_criteria"]["threshold"], { - "m": 1, - "n": 2, - "mastery_model": exercises.M_OF_N, - }) + self.assertEqual( + exercise.extra_fields["options"]["completion_criteria"]["threshold"], + { + "m": 1, + "n": 2, + "mastery_model": exercises.M_OF_N, + }, + ) + published_exercise = kolibri_models.ContentNode.objects.get( + title="Mastery test" + ) + self.assertEqual( + published_exercise.options["completion_criteria"]["threshold"], + { + "m": 1, + "n": 2, + "mastery_model": exercises.M_OF_N, + }, + ) def test_publish_no_modify_legacy_exercise_extra_fields(self): current_exercise = cc.ContentNode.objects.get(title="Legacy Mastery test") - self.assertEqual(current_exercise.extra_fields, { - 'mastery_model': exercises.M_OF_N, - 'randomize': True, - 'm': 1, - 'n': 2 - }) + self.assertEqual( + current_exercise.extra_fields, + {"mastery_model": exercises.M_OF_N, "randomize": True, "m": 1, "n": 2}, + ) class EmptyChannelTestCase(StudioTestCase): - @classmethod def setUpClass(cls): super(EmptyChannelTestCase, cls).setUpClass() - cls.patch_copy_db = patch('contentcuration.utils.publish.save_export_database') + cls.patch_copy_db = patch("contentcuration.utils.publish.save_export_database") cls.patch_copy_db.start() @classmethod @@ -468,7 +585,7 @@ class ChannelExportUtilityFunctionTestCase(StudioTestCase): @classmethod def setUpClass(cls): super(ChannelExportUtilityFunctionTestCase, cls).setUpClass() - cls.patch_copy_db = patch('contentcuration.utils.publish.save_export_database') + cls.patch_copy_db = patch("contentcuration.utils.publish.save_export_database") cls.patch_copy_db.start() @classmethod @@ -481,10 +598,9 @@ def setUp(self): fh, output_db = tempfile.mkstemp(suffix=".sqlite3") self.output_db = output_db set_active_content_database(self.output_db) - call_command("migrate", - "content", - database=get_active_content_database(), - no_input=True) + call_command( + "migrate", "content", database=get_active_content_database(), no_input=True + ) def tearDown(self): # Clean up datbase connection after the test @@ -501,23 +617,39 @@ def test_convert_channel_thumbnail_empty_thumbnail(self): self.assertEqual("", convert_channel_thumbnail(channel)) def test_convert_channel_thumbnail_static_thumbnail(self): - channel = cc.Channel.objects.create(thumbnail="/static/kolibri_flapping_bird.png", actor_id=self.admin_user.id) + channel = cc.Channel.objects.create( + thumbnail="/static/kolibri_flapping_bird.png", actor_id=self.admin_user.id + ) self.assertEqual("", convert_channel_thumbnail(channel)) def test_convert_channel_thumbnail_encoding_valid(self): channel = cc.Channel.objects.create( - thumbnail="/content/kolibri_flapping_bird.png", thumbnail_encoding={"base64": "flappy_bird"}, actor_id=self.admin_user.id) + thumbnail="/content/kolibri_flapping_bird.png", + thumbnail_encoding={"base64": "flappy_bird"}, + actor_id=self.admin_user.id, + ) self.assertEqual("flappy_bird", convert_channel_thumbnail(channel)) def test_convert_channel_thumbnail_encoding_invalid(self): - with patch("contentcuration.utils.publish.get_thumbnail_encoding", return_value="this is a test"): - channel = cc.Channel.objects.create(thumbnail="/content/kolibri_flapping_bird.png", thumbnail_encoding={}, actor_id=self.admin_user.id) + with patch( + "contentcuration.utils.publish.get_thumbnail_encoding", + return_value="this is a test", + ): + channel = cc.Channel.objects.create( + thumbnail="/content/kolibri_flapping_bird.png", + thumbnail_encoding={}, + actor_id=self.admin_user.id, + ) self.assertEqual("this is a test", convert_channel_thumbnail(channel)) def test_create_slideshow_manifest(self): - ccnode = cc.ContentNode.objects.create(kind_id=slideshow(), extra_fields={}, complete=True) + ccnode = cc.ContentNode.objects.create( + kind_id=slideshow(), extra_fields={}, complete=True + ) create_slideshow_manifest(ccnode) - manifest_collection = cc.File.objects.filter(contentnode=ccnode, preset_id=u"slideshow_manifest") + manifest_collection = cc.File.objects.filter( + contentnode=ccnode, preset_id=u"slideshow_manifest" + ) assert len(manifest_collection) == 1 @@ -525,7 +657,7 @@ class ChannelExportPrerequisiteTestCase(StudioTestCase): @classmethod def setUpClass(cls): super(ChannelExportPrerequisiteTestCase, cls).setUpClass() - cls.patch_copy_db = patch('contentcuration.utils.publish.save_export_database') + cls.patch_copy_db = patch("contentcuration.utils.publish.save_export_database") cls.patch_copy_db.start() def setUp(self): @@ -533,10 +665,9 @@ def setUp(self): fh, output_db = tempfile.mkstemp(suffix=".sqlite3") self.output_db = output_db set_active_content_database(self.output_db) - call_command("migrate", - "content", - database=get_active_content_database(), - no_input=True) + call_command( + "migrate", "content", database=get_active_content_database(), no_input=True + ) def tearDown(self): # Clean up datbase connection after the test @@ -549,10 +680,14 @@ def tearDown(self): def test_nonexistent_prerequisites(self): channel = cc.Channel.objects.create(actor_id=self.admin_user.id) - node1 = cc.ContentNode.objects.create(kind_id="exercise", parent_id=channel.main_tree.pk, complete=True) + node1 = cc.ContentNode.objects.create( + kind_id="exercise", parent_id=channel.main_tree.pk, complete=True + ) exercise = cc.ContentNode.objects.create(kind_id="exercise", complete=True) - cc.PrerequisiteContentRelationship.objects.create(target_node=exercise, prerequisite=node1) + cc.PrerequisiteContentRelationship.objects.create( + target_node=exercise, prerequisite=node1 + ) map_prerequisites(node1) @@ -564,7 +699,7 @@ def test_fill_published_fields(self): fill_published_fields(channel, version_notes) self.assertTrue(channel.published_data) self.assertIsNotNone(channel.published_data.get(0)) - self.assertEqual(channel.published_data[0]['version_notes'], version_notes) + self.assertEqual(channel.published_data[0]["version_notes"], version_notes) class PublishFailCleansUpTaskObjects(StudioTestCase): @@ -573,12 +708,14 @@ def setUp(self): def test_failed_task_objects_cleaned_up_when_publishing(self): channel_id = self.channel.id - task_name = 'export-channel' + task_name = "export-channel" task_id = uuid.uuid4().hex - pk = 'ab684452f2ad4ba6a1426d6410139f60' - table = 'channel' - task_kwargs = json.dumps({'pk': pk, 'table': table}) - signature = generate_task_signature(task_name, task_kwargs=task_kwargs, channel_id=channel_id) + pk = "ab684452f2ad4ba6a1426d6410139f60" + table = "channel" + task_kwargs = json.dumps({"pk": pk, "table": table}) + signature = generate_task_signature( + task_name, task_kwargs=task_kwargs, channel_id=channel_id + ) TaskResult.objects.create( task_id=task_id, @@ -587,10 +724,7 @@ def test_failed_task_objects_cleaned_up_when_publishing(self): ) CustomTaskMetadata.objects.create( - task_id=task_id, - channel_id=channel_id, - user=self.user, - signature=signature + task_id=task_id, channel_id=channel_id, user=self.user, signature=signature ) assert TaskResult.objects.filter(task_id=task_id).exists() @@ -599,15 +733,20 @@ def test_failed_task_objects_cleaned_up_when_publishing(self): with create_change_tracker(pk, table, channel_id, self.user, task_name): assert not TaskResult.objects.filter(task_id=task_id).exists() assert not CustomTaskMetadata.objects.filter(task_id=task_id).exists() - new_task_result = TaskResult.objects.filter(task_name=task_name, status=states.STARTED).first() - new_custom_task_metadata = CustomTaskMetadata.objects.get(channel_id=channel_id, user=self.user, signature=signature) + new_task_result = TaskResult.objects.filter( + task_name=task_name, status=states.STARTED + ).first() + new_custom_task_metadata = CustomTaskMetadata.objects.get( + channel_id=channel_id, user=self.user, signature=signature + ) assert new_custom_task_metadata.task_id == new_task_result.task_id + class PublishStagingTreeTestCase(StudioTestCase): @classmethod def setUpClass(cls): super(PublishStagingTreeTestCase, cls).setUpClass() - cls.patch_copy_db = patch('contentcuration.utils.publish.save_export_database') + cls.patch_copy_db = patch("contentcuration.utils.publish.save_export_database") cls.mock_save_export = cls.patch_copy_db.start() @classmethod @@ -619,10 +758,10 @@ def setUp(self): super(PublishStagingTreeTestCase, self).setUp() self.channel_version = 3 - self.incomplete_video_in_staging = 'Incomplete video in staging tree' - self.complete_video_in_staging = 'Complete video in staging tree' - self.incomplete_video_in_main = 'Incomplete video in main tree' - self.complete_video_in_main = 'Complete video in main tree' + self.incomplete_video_in_staging = "Incomplete video in staging tree" + self.complete_video_in_staging = "Complete video in staging tree" + self.incomplete_video_in_main = "Incomplete video in main tree" + self.complete_video_in_main = "Complete video in main tree" self.content_channel = channel() self.content_channel.staging_tree = tree() @@ -630,28 +769,44 @@ def setUp(self): self.content_channel.save() # Incomplete node should be excluded. - new_node = create_node({'kind_id': 'video', 'title': self.incomplete_video_in_staging, 'children': []}) + new_node = create_node( + { + "kind_id": "video", + "title": self.incomplete_video_in_staging, + "children": [], + } + ) new_node.complete = False new_node.parent = self.content_channel.staging_tree new_node.published = False new_node.save() # Complete node should be included. - new_video = create_node({'kind_id': 'video', 'title': self.complete_video_in_staging, 'children': []}) + new_video = create_node( + { + "kind_id": "video", + "title": self.complete_video_in_staging, + "children": [], + } + ) new_video.complete = True new_video.parent = self.content_channel.staging_tree new_node.published = False new_video.save() # Incomplete node in main_tree. - new_node = create_node({'kind_id': 'video', 'title': self.incomplete_video_in_main, 'children': []}) + new_node = create_node( + {"kind_id": "video", "title": self.incomplete_video_in_main, "children": []} + ) new_node.complete = False new_node.parent = self.content_channel.main_tree new_node.published = False new_node.save() # Complete node in main_tree. - new_node = create_node({'kind_id': 'video', 'title': self.complete_video_in_main, 'children': []}) + new_node = create_node( + {"kind_id": "video", "title": self.complete_video_in_main, "children": []} + ) new_node.complete = True new_node.parent = self.content_channel.main_tree new_node.published = False @@ -667,7 +822,7 @@ def run_publish_channel(self): send_email=False, progress_tracker=None, language="fr", - use_staging_tree=True + use_staging_tree=True, ) def test_none_staging_tree(self): @@ -715,7 +870,9 @@ def test_staging_tree_used_for_publish(self): set_active_content_database(self.tempdb) nodes = kolibri_models.ContentNode.objects.all() - self.assertEqual(nodes.filter(title=self.incomplete_video_in_staging).count(), 0) + self.assertEqual( + nodes.filter(title=self.incomplete_video_in_staging).count(), 0 + ) self.assertEqual(nodes.filter(title=self.complete_video_in_staging).count(), 1) self.assertEqual(nodes.filter(title=self.incomplete_video_in_main).count(), 0) self.assertEqual(nodes.filter(title=self.complete_video_in_main).count(), 0) diff --git a/contentcuration/contentcuration/tests/test_format_preset_model.py b/contentcuration/contentcuration/tests/test_format_preset_model.py index 555b43937d..465070fa02 100644 --- a/contentcuration/contentcuration/tests/test_format_preset_model.py +++ b/contentcuration/contentcuration/tests/test_format_preset_model.py @@ -3,7 +3,6 @@ class GetPresetTestCase(StudioTestCase): - def test_accepts_string(self): """ Check that if we pass in a string, we won't error out. @@ -29,7 +28,6 @@ def test_returns_none_if_called_with_nonexistent_preset(self): class GuessFormatPresetTestCase(StudioTestCase): - def test_accepts_string(self): """ Make sure we don't raise an error if we pass a string. diff --git a/contentcuration/contentcuration/tests/test_forms.py b/contentcuration/contentcuration/tests/test_forms.py index e004edf75b..7b2e7f1157 100644 --- a/contentcuration/contentcuration/tests/test_forms.py +++ b/contentcuration/contentcuration/tests/test_forms.py @@ -9,16 +9,11 @@ class ForgotPasswordFormTest(StudioAPITestCase): def setUp(self): self.request = mock.Mock() - self.data = dict( - email="tester@tester.com" - ) + self.data = dict(email="tester@tester.com") self.form = ForgotPasswordForm(data=self.data) self.form.full_clean() self.form.get_activation_key = mock.Mock() - self.extra_email_context = dict( - site="LE", - domain="test.learningequality.org" - ) + self.extra_email_context = dict(site="LE", domain="test.learningequality.org") @mock.patch("contentcuration.forms.PasswordResetForm.save") def test_save__active(self, parent_save): @@ -26,12 +21,12 @@ def test_save__active(self, parent_save): self.form.save( request=self.request, extra_email_context=self.extra_email_context, - from_email="another@tester.com" + from_email="another@tester.com", ) parent_save.assert_called_once_with( request=self.request, extra_email_context=self.extra_email_context, - from_email="another@tester.com" + from_email="another@tester.com", ) @mock.patch("contentcuration.forms.render_to_string") @@ -42,64 +37,58 @@ def test_save__inactive(self, email_user, render_to_string): user.save() self.form.get_activation_key.return_value = "activation key" - render_to_string.side_effect = [ - "Subject", - "Message" - ] + render_to_string.side_effect = ["Subject", "Message"] self.form.save( request=self.request, extra_email_context=self.extra_email_context, - from_email="another@tester.com" + from_email="another@tester.com", ) context = { - 'activation_key': "activation key", - 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS, - 'site': "LE", - 'user': user, - 'domain': "test.learningequality.org", + "activation_key": "activation key", + "expiration_days": settings.ACCOUNT_ACTIVATION_DAYS, + "site": "LE", + "user": user, + "domain": "test.learningequality.org", } render_to_string.assert_any_call( - 'registration/password_reset_subject.txt', - context + "registration/password_reset_subject.txt", context ) render_to_string.assert_any_call( - 'registration/activation_needed_email.txt', - context + "registration/activation_needed_email.txt", context + ) + email_user.assert_called_once_with( + "Subject", "Message", settings.DEFAULT_FROM_EMAIL ) - email_user.assert_called_once_with("Subject", "Message", settings.DEFAULT_FROM_EMAIL) @mock.patch("contentcuration.forms.render_to_string") @mock.patch("contentcuration.forms.User.email_user") def test_save__inactive__no_password(self, email_user, render_to_string): user = testdata.user("tester@tester.com") user.is_active = False - user.password = '' + user.password = "" user.save() - render_to_string.side_effect = [ - "Subject", - "Message" - ] + render_to_string.side_effect = ["Subject", "Message"] self.form.save( request=self.request, extra_email_context=self.extra_email_context, - from_email="another@tester.com" + from_email="another@tester.com", ) self.form.get_activation_key.assert_not_called() context = { - 'site': "LE", - 'user': user, - 'domain': "test.learningequality.org", + "site": "LE", + "user": user, + "domain": "test.learningequality.org", } render_to_string.assert_any_call( - 'registration/password_reset_subject.txt', - context + "registration/password_reset_subject.txt", context ) render_to_string.assert_any_call( - 'registration/registration_needed_email.txt', - context + "registration/registration_needed_email.txt", context + ) + email_user.assert_called_once_with( + "Subject", "Message", settings.DEFAULT_FROM_EMAIL ) - email_user.assert_called_once_with("Subject", "Message", settings.DEFAULT_FROM_EMAIL) @mock.patch("contentcuration.forms.render_to_string") @mock.patch("contentcuration.forms.User.email_user") @@ -108,7 +97,7 @@ def test_save__missing(self, parent_save, email_user, render_to_string): self.form.save( request=self.request, extra_email_context=self.extra_email_context, - from_email="another@tester.com" + from_email="another@tester.com", ) parent_save.assert_not_called() self.form.get_activation_key.assert_not_called() diff --git a/contentcuration/contentcuration/tests/test_gcs_storage.py b/contentcuration/contentcuration/tests/test_gcs_storage.py index 165877f9ac..a58420873e 100755 --- a/contentcuration/contentcuration/tests/test_gcs_storage.py +++ b/contentcuration/contentcuration/tests/test_gcs_storage.py @@ -21,7 +21,9 @@ def setUp(self): self.blob_class = mock.create_autospec(Blob) self.blob_obj = self.blob_class("blob", "blob") self.mock_client = mock.create_autospec(Client) - self.storage = GoogleCloudStorage(client=self.mock_client(), bucket_name="bucket") + self.storage = GoogleCloudStorage( + client=self.mock_client(), bucket_name="bucket" + ) self.content = BytesIO(b"content") def test_calls_upload_from_file(self): @@ -41,7 +43,9 @@ def test_calls_upload_from_file_with_a_file_object_and_content_type(self): self.storage.save("myfile.jpg", self.content, blob_object=self.blob_obj) # Check that we pass self.content file_object to upload_from_file - self.blob_obj.upload_from_file.assert_called_once_with(self.content, content_type="image/jpeg") + self.blob_obj.upload_from_file.assert_called_once_with( + self.content, content_type="image/jpeg" + ) def test_checks_does_not_upload_file_if_empty(self): """ @@ -71,7 +75,10 @@ def test_uploads_cache_control_private_if_content_database(self): assert "private" in self.blob_obj.cache_control @mock.patch("contentcuration.utils.gcs_storage.BytesIO") - @mock.patch("contentcuration.utils.gcs_storage.GoogleCloudStorage._is_file_empty", return_value=False) + @mock.patch( + "contentcuration.utils.gcs_storage.GoogleCloudStorage._is_file_empty", + return_value=False, + ) def test_gzip_if_content_database(self, bytesio_mock, file_empty_mock): """ Check that if we're uploading a gzipped content database and @@ -92,6 +99,7 @@ class RandomFileSchema: """ A schema for a file we're about to upload. """ + contents = str filename = str @@ -99,7 +107,9 @@ def setUp(self): self.blob_class = mock.create_autospec(Blob) self.blob_obj = self.blob_class("blob", "blob") self.mock_client = mock.create_autospec(Client) - self.storage = GoogleCloudStorage(client=self.mock_client(), bucket_name="bucket") + self.storage = GoogleCloudStorage( + client=self.mock_client(), bucket_name="bucket" + ) self.local_file = mixer.blend(self.RandomFileSchema) def test_raises_error_if_mode_is_not_rb(self): @@ -147,8 +157,13 @@ def setUp(self): self.mock_anon_bucket = bucket_cls(self.mock_anon_client, "bucket") self.mock_anon_client.get_bucket.return_value = self.mock_anon_bucket - with mock.patch("contentcuration.utils.gcs_storage._create_default_client", return_value=self.mock_default_client), \ - mock.patch("contentcuration.utils.gcs_storage.Client.create_anonymous_client", return_value=self.mock_anon_client): + with mock.patch( + "contentcuration.utils.gcs_storage._create_default_client", + return_value=self.mock_default_client, + ), mock.patch( + "contentcuration.utils.gcs_storage.Client.create_anonymous_client", + return_value=self.mock_anon_client, + ): self.storage = CompositeGCS() def test_get_writeable_backend(self): @@ -207,8 +222,13 @@ def test_url(self): mock_blob = self.blob_cls("blob", "blob") self.mock_default_bucket.get_blob.return_value = mock_blob mock_blob.public_url = "https://storage.googleapis.com/bucket/blob" - self.assertEqual(self.storage.url("blob"), "https://storage.googleapis.com/bucket/blob") + self.assertEqual( + self.storage.url("blob"), "https://storage.googleapis.com/bucket/blob" + ) def test_get_created_time(self): self.mock_default_bucket.get_blob.return_value = self.blob_cls("blob", "blob") - self.assertEqual(self.storage.get_created_time("blob"), self.blob_cls.return_value.time_created) + self.assertEqual( + self.storage.get_created_time("blob"), + self.blob_cls.return_value.time_created, + ) diff --git a/contentcuration/contentcuration/tests/test_models.py b/contentcuration/contentcuration/tests/test_models.py index 57ee0438b5..2fb728e4a3 100644 --- a/contentcuration/contentcuration/tests/test_models.py +++ b/contentcuration/contentcuration/tests/test_models.py @@ -42,25 +42,25 @@ def object_storage_name_tests(): "no_extension", # filename "8818ed27d0a84b016eb7907b5b4766c4", # checksum "vtt", # file_format_id - "storage/8/8/8818ed27d0a84b016eb7907b5b4766c4.vtt" # expected + "storage/8/8/8818ed27d0a84b016eb7907b5b4766c4.vtt", # expected ), ( "no_extension", # filename "8818ed27d0a84b016eb7907b5b4766c4", # checksum "", # file_format_id - "storage/8/8/8818ed27d0a84b016eb7907b5b4766c4" # expected + "storage/8/8/8818ed27d0a84b016eb7907b5b4766c4", # expected ), ( "has_extension.txt", # filename "8818ed27d0a84b016eb7907b5b4766c4", # checksum "vtt", # file_format_id - "storage/8/8/8818ed27d0a84b016eb7907b5b4766c4.txt" # expected + "storage/8/8/8818ed27d0a84b016eb7907b5b4766c4.txt", # expected ), ( "has_extension.txt", # filename "8818ed27d0a84b016eb7907b5b4766c4", # checksum "", # file_format_id - "storage/8/8/8818ed27d0a84b016eb7907b5b4766c4.txt" # expected + "storage/8/8/8818ed27d0a84b016eb7907b5b4766c4.txt", # expected ), ] @@ -71,22 +71,26 @@ def test_object_storage_name(object_storage_name_tests): actual_name = object_storage_name(test_file, filename) - assert actual_name == expected_name, \ - "Storage names don't match: Expected: '{}' Actual '{}'".format(expected_name, - actual_name) + assert ( + actual_name == expected_name + ), "Storage names don't match: Expected: '{}' Actual '{}'".format( + expected_name, actual_name + ) def test_generate_object_storage_name(object_storage_name_tests): for filename, checksum, file_format_id, expected_name in object_storage_name_tests: - default_ext = '' + default_ext = "" if file_format_id: - default_ext = '.{}'.format(file_format_id) + default_ext = ".{}".format(file_format_id) actual_name = generate_object_storage_name(checksum, filename, default_ext) - assert actual_name == expected_name, \ - "Storage names don't match: Expected: '{}' Actual '{}'".format(expected_name, - actual_name) + assert ( + actual_name == expected_name + ), "Storage names don't match: Expected: '{}' Actual '{}'".format( + expected_name, actual_name + ) def create_contentnode(parent_id): @@ -101,21 +105,15 @@ def create_contentnode(parent_id): def create_assessment_item(parent_id): - return AssessmentItem.objects.create( - contentnode=create_contentnode(parent_id) - ) + return AssessmentItem.objects.create(contentnode=create_contentnode(parent_id)) def create_assessment_item_file(parent_id): - return File.objects.create( - assessment_item=create_assessment_item(parent_id) - ) + return File.objects.create(assessment_item=create_assessment_item(parent_id)) def create_file(parent_id): - return File.objects.create( - contentnode=create_contentnode(parent_id) - ) + return File.objects.create(contentnode=create_contentnode(parent_id)) class PermissionQuerysetTestCase(StudioTestCase): @@ -139,12 +137,18 @@ def forbidden_user(self): return user def assertQuerysetContains(self, queryset, **filters): - self.assertGreater(queryset.filter(**filters).count(), 0, - "Queryset does not contain objects for: {}".format(filters)) + self.assertGreater( + queryset.filter(**filters).count(), + 0, + "Queryset does not contain objects for: {}".format(filters), + ) def assertQuerysetDoesNotContain(self, queryset, **filters): - self.assertEqual(queryset.filter(**filters).count(), 0, - "Queryset contains objects for: {}".format(filters)) + self.assertEqual( + queryset.filter(**filters).count(), + 0, + "Queryset contains objects for: {}".format(filters), + ) class ChannelTestCase(PermissionQuerysetTestCase): @@ -155,7 +159,9 @@ def base_queryset(self): def test_filter_view_queryset__public_channel(self): channel = self.public_channel - queryset = Channel.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = Channel.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetContains(queryset, pk=channel.id) user = testdata.user() @@ -168,7 +174,9 @@ def test_filter_view_queryset__public_channel__deleted(self): channel.deleted = True channel.save(actor_id=self.admin_user.id) - queryset = Channel.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = Channel.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=channel.id) user = testdata.user() @@ -179,13 +187,17 @@ def test_filter_view_queryset__public_channel__deleted(self): def test_filter_view_queryset__public_channel__anonymous(self): channel = self.public_channel - queryset = Channel.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = Channel.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetContains(queryset, pk=channel.id) def test_filter_view_queryset__private_channel(self): channel = testdata.channel() - queryset = Channel.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = Channel.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=channel.id) user = testdata.user() @@ -207,13 +219,17 @@ def test_filter_view_queryset__private_channel__pending_editor(self): def test_filter_view_queryset__private_channel__anonymous(self): channel = testdata.channel() - queryset = Channel.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = Channel.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=channel.id) def test_filter_edit_queryset__public_channel(self): channel = self.public_channel - queryset = Channel.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = Channel.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=channel.id) user = testdata.user() @@ -228,13 +244,17 @@ def test_filter_edit_queryset__public_channel(self): def test_filter_edit_queryset__public_channel__anonymous(self): channel = self.public_channel - queryset = Channel.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = Channel.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=channel.id) def test_filter_edit_queryset__private_channel(self): channel = testdata.channel() - queryset = Channel.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = Channel.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=channel.id) user = testdata.user() @@ -249,7 +269,9 @@ def test_filter_edit_queryset__private_channel(self): def test_filter_edit_queryset__private_channel__anonymous(self): channel = testdata.channel() - queryset = Channel.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = Channel.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=channel.id) def test_get_server_rev(self): @@ -267,11 +289,13 @@ def create_change(server_rev, applied): kwargs={}, ) - Change.objects.bulk_create([ - create_change(1, True), - create_change(2, True), - create_change(3, False), - ]) + Change.objects.bulk_create( + [ + create_change(1, True), + create_change(2, True), + create_change(3, False), + ] + ) self.assertEqual(channel.get_server_rev(), 2) @@ -285,7 +309,9 @@ def test_filter_view_queryset__public_channel(self): channel = self.public_channel contentnode = create_contentnode(channel.main_tree_id) - queryset = ContentNode.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = ContentNode.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetContains(queryset, pk=contentnode.id) @@ -299,7 +325,9 @@ def test_filter_view_queryset__public_channel__anonymous(self): channel = self.public_channel contentnode = create_contentnode(channel.main_tree_id) - queryset = ContentNode.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = ContentNode.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetContains(queryset, pk=contentnode.id) @@ -307,7 +335,9 @@ def test_filter_view_queryset__private_channel(self): channel = testdata.channel() contentnode = create_contentnode(channel.main_tree_id) - queryset = ContentNode.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = ContentNode.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetDoesNotContain(queryset, pk=contentnode.id) @@ -321,7 +351,9 @@ def test_filter_view_queryset__private_channel__anonymous(self): channel = testdata.channel() contentnode = create_contentnode(channel.main_tree_id) - queryset = ContentNode.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = ContentNode.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetDoesNotContain(queryset, pk=contentnode.id) @@ -336,7 +368,9 @@ def test_filter_view_queryset__orphan_tree(self): def test_filter_view_queryset__orphan_tree__anonymous(self): contentnode = create_contentnode(settings.ORPHANAGE_ROOT_ID) - queryset = ContentNode.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = ContentNode.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetDoesNotContain(queryset, pk=contentnode.id) @@ -344,7 +378,9 @@ def test_filter_edit_queryset__public_channel(self): channel = self.public_channel contentnode = create_contentnode(channel.main_tree_id) - queryset = ContentNode.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = ContentNode.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetDoesNotContain(queryset, pk=contentnode.id) @@ -363,7 +399,9 @@ def test_filter_edit_queryset__public_channel__anonymous(self): channel = self.public_channel contentnode = create_contentnode(channel.main_tree_id) - queryset = ContentNode.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = ContentNode.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetDoesNotContain(queryset, pk=contentnode.id) @@ -371,7 +409,9 @@ def test_filter_edit_queryset__private_channel(self): channel = testdata.channel() contentnode = create_contentnode(channel.main_tree_id) - queryset = ContentNode.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = ContentNode.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetDoesNotContain(queryset, pk=contentnode.id) @@ -390,7 +430,9 @@ def test_filter_edit_queryset__private_channel__anonymous(self): channel = testdata.channel() contentnode = create_contentnode(channel.main_tree_id) - queryset = ContentNode.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = ContentNode.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetDoesNotContain(queryset, pk=contentnode.id) @@ -405,7 +447,9 @@ def test_filter_edit_queryset__orphan_tree(self): def test_filter_edit_queryset__orphan_tree__anonymous(self): contentnode = create_contentnode(settings.ORPHANAGE_ROOT_ID) - queryset = ContentNode.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = ContentNode.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=settings.ORPHANAGE_ROOT_ID) self.assertQuerysetDoesNotContain(queryset, pk=contentnode.id) @@ -437,7 +481,9 @@ def test_filter_by_pk__sets_cache(self): with self.settings(IS_CONTENTNODE_TABLE_PARTITIONED=True): node = ContentNode.filter_by_pk(pk=contentnode.id).first() - tree_id_from_cache = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=contentnode.id)) + tree_id_from_cache = cache.get( + CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=contentnode.id) + ) self.assertEqual(node.tree_id, tree_id_from_cache) def test_filter_by_pk__doesnot_query_db_when_cache_hit(self): @@ -466,9 +512,13 @@ def test_filter_by_pk__tree_id_updated_on_move(self): sourcenode.move_to(targetnode, "last-child") after_move_sourcenode = ContentNode.filter_by_pk(sourcenode.id).first() - tree_id_from_cache = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=sourcenode.id)) + tree_id_from_cache = cache.get( + CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=sourcenode.id) + ) - self.assertEqual(after_move_sourcenode.tree_id, testchannel.trash_tree.tree_id) + self.assertEqual( + after_move_sourcenode.tree_id, testchannel.trash_tree.tree_id + ) self.assertEqual(tree_id_from_cache, testchannel.trash_tree.tree_id) def test_make_content_id_unique(self): @@ -506,7 +556,9 @@ def test_filter_view_queryset__public_channel(self): channel = self.public_channel assessment_item = create_assessment_item(channel.main_tree_id) - queryset = AssessmentItem.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = AssessmentItem.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetContains(queryset, pk=assessment_item.id) user = testdata.user() @@ -518,14 +570,18 @@ def test_filter_view_queryset__public_channel__anonymous(self): channel = self.public_channel assessment_item = create_assessment_item(channel.main_tree_id) - queryset = AssessmentItem.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = AssessmentItem.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetContains(queryset, pk=assessment_item.id) def test_filter_view_queryset__private_channel(self): channel = testdata.channel() assessment_item = create_assessment_item(channel.main_tree_id) - queryset = AssessmentItem.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = AssessmentItem.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_item.id) user = testdata.user() @@ -537,14 +593,18 @@ def test_filter_view_queryset__private_channel__anonymous(self): channel = testdata.channel() assessment_item = create_assessment_item(channel.main_tree_id) - queryset = AssessmentItem.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = AssessmentItem.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_item.id) def test_filter_edit_queryset__public_channel(self): channel = self.public_channel assessment_item = create_assessment_item(channel.main_tree_id) - queryset = AssessmentItem.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = AssessmentItem.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_item.id) user = testdata.user() @@ -560,14 +620,18 @@ def test_filter_edit_queryset__public_channel__anonymous(self): channel = self.public_channel assessment_item = create_assessment_item(channel.main_tree_id) - queryset = AssessmentItem.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = AssessmentItem.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_item.id) def test_filter_edit_queryset__private_channel(self): channel = testdata.channel() assessment_item = create_assessment_item(channel.main_tree_id) - queryset = AssessmentItem.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = AssessmentItem.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_item.id) user = testdata.user() @@ -583,7 +647,9 @@ def test_filter_edit_queryset__private_channel__anonymous(self): channel = testdata.channel() assessment_item = create_assessment_item(channel.main_tree_id) - queryset = AssessmentItem.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = AssessmentItem.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_item.id) @@ -596,7 +662,9 @@ def test_filter_view_queryset__public_channel(self): channel = self.public_channel node_file = create_file(channel.main_tree_id) - queryset = File.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetContains(queryset, pk=node_file.id) user = testdata.user() @@ -608,14 +676,18 @@ def test_filter_view_queryset__public_channel__anonymous(self): channel = self.public_channel node_file = create_file(channel.main_tree_id) - queryset = File.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetContains(queryset, pk=node_file.id) def test_filter_view_queryset__private_channel(self): channel = testdata.channel() node_file = create_file(channel.main_tree_id) - queryset = File.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=node_file.id) user = testdata.user() @@ -627,14 +699,18 @@ def test_filter_view_queryset__private_channel__anonymous(self): channel = testdata.channel() node_file = create_file(channel.main_tree_id) - queryset = File.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=node_file.id) def test_filter_view_queryset__uploaded_by(self): user = testdata.user() node_file = File.objects.create(uploaded_by=user) - queryset = File.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=node_file.id) queryset = File.filter_view_queryset(self.base_queryset, user=user) @@ -644,7 +720,9 @@ def test_filter_edit_queryset__public_channel(self): channel = self.public_channel node_file = create_file(channel.main_tree_id) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=node_file.id) user = testdata.user() @@ -660,14 +738,18 @@ def test_filter_edit_queryset__public_channel__anonymous(self): channel = self.public_channel node_file = create_file(channel.main_tree_id) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=node_file.id) def test_filter_edit_queryset__private_channel(self): channel = testdata.channel() node_file = create_file(channel.main_tree_id) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=node_file.id) user = testdata.user() @@ -683,14 +765,18 @@ def test_filter_edit_queryset__private_channel__anonymous(self): channel = testdata.channel() node_file = create_file(channel.main_tree_id) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=node_file.id) def test_filter_edit_queryset__uploaded_by(self): user = testdata.user() node_file = File.objects.create(uploaded_by=user) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=node_file.id) queryset = File.filter_edit_queryset(self.base_queryset, user=user) @@ -733,7 +819,7 @@ def test_invalid_file_format(self): File.objects.create( contentnode=create_contentnode(channel.main_tree_id), preset_id=format_presets.EPUB, - file_format_id='pptx', + file_format_id="pptx", ) @@ -746,7 +832,9 @@ def test_filter_view_queryset__public_channel(self): channel = self.public_channel assessment_file = create_assessment_item_file(channel.main_tree_id) - queryset = File.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetContains(queryset, pk=assessment_file.id) user = testdata.user() @@ -758,14 +846,18 @@ def test_filter_view_queryset__public_channel__anonymous(self): channel = self.public_channel assessment_file = create_assessment_item_file(channel.main_tree_id) - queryset = File.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetContains(queryset, pk=assessment_file.id) def test_filter_view_queryset__private_channel(self): channel = testdata.channel() assessment_file = create_assessment_item_file(channel.main_tree_id) - queryset = File.filter_view_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_file.id) user = testdata.user() @@ -777,14 +869,18 @@ def test_filter_view_queryset__private_channel__anonymous(self): channel = testdata.channel() assessment_file = create_assessment_item_file(channel.main_tree_id) - queryset = File.filter_view_queryset(self.base_queryset, user=self.anonymous_user) + queryset = File.filter_view_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_file.id) def test_filter_edit_queryset__public_channel(self): channel = self.public_channel assessment_file = create_assessment_item_file(channel.main_tree_id) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_file.id) user = testdata.user() @@ -800,14 +896,18 @@ def test_filter_edit_queryset__public_channel__anonymous(self): channel = self.public_channel assessment_file = create_assessment_item_file(channel.main_tree_id) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_file.id) def test_filter_edit_queryset__private_channel(self): channel = testdata.channel() assessment_file = create_assessment_item_file(channel.main_tree_id) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.forbidden_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.forbidden_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_file.id) user = testdata.user() @@ -823,12 +923,14 @@ def test_filter_edit_queryset__private_channel__anonymous(self): channel = testdata.channel() assessment_file = create_assessment_item_file(channel.main_tree_id) - queryset = File.filter_edit_queryset(self.base_queryset, user=self.anonymous_user) + queryset = File.filter_edit_queryset( + self.base_queryset, user=self.anonymous_user + ) self.assertQuerysetDoesNotContain(queryset, pk=assessment_file.id) class UserTestCase(StudioTestCase): - def _create_user(self, email, password='password', is_active=True): + def _create_user(self, email, password="password", is_active=True): user = User.objects.create(email=email) user.set_password(password) user.is_active = is_active @@ -840,15 +942,20 @@ def _setup_user_related_data(self): user_b = self._create_user("b@tester.com") # Create a sole editor non-public channel. - sole_editor_channel = Channel.objects.create(name="sole-editor", actor_id=user_a.id) + sole_editor_channel = Channel.objects.create( + name="sole-editor", actor_id=user_a.id + ) sole_editor_channel.editors.add(user_a) # Create sole-editor channel nodes. for i in range(0, 3): - testdata.node({ - "title": "sole-editor-channel-node", - "kind_id": "video", - }, parent=sole_editor_channel.main_tree) + testdata.node( + { + "title": "sole-editor-channel-node", + "kind_id": "video", + }, + parent=sole_editor_channel.main_tree, + ) # Create a sole editor public channel. public_channel = testdata.channel("public") @@ -917,7 +1024,9 @@ def test_delete(self): # Sets is_active to False? self.assertEqual(user.is_active, False) # Creates user history? - user_delete_history = UserHistory.objects.filter(user_id=user.id, action=user_history.DELETION).first() + user_delete_history = UserHistory.objects.filter( + user_id=user.id, action=user_history.DELETION + ).first() self.assertIsNotNone(user_delete_history) def test_recover(self): @@ -930,7 +1039,9 @@ def test_recover(self): # Keeps is_active to False? self.assertEqual(user.is_active, False) # Creates user history? - user_recover_history = UserHistory.objects.filter(user_id=user.id, action=user_history.RECOVERY).first() + user_recover_history = UserHistory.objects.filter( + user_id=user.id, action=user_history.RECOVERY + ).first() self.assertIsNotNone(user_recover_history) def test_hard_delete_user_related_data(self): @@ -945,7 +1056,11 @@ def test_hard_delete_user_related_data(self): self.assertTrue(Channel.objects.filter(name="public").exists()) # Deletes all user related invitations. - self.assertFalse(Invitation.objects.filter(Q(sender_id=user.id) | Q(invited_id=user.id)).exists()) + self.assertFalse( + Invitation.objects.filter( + Q(sender_id=user.id) | Q(invited_id=user.id) + ).exists() + ) # Deletes sole-editor channelsets. self.assertFalse(ChannelSet.objects.filter(name="sole-editor").exists()) @@ -955,10 +1070,16 @@ def test_hard_delete_user_related_data(self): self.assertTrue(ChannelSet.objects.filter(name="public").exists()) # All contentnodes of sole-editor channel points to ORPHANGE ROOT NODE? - self.assertFalse(ContentNode.objects.filter(~Q(parent_id=settings.ORPHANAGE_ROOT_ID) - & Q(title="sole-editor-channel-node")).exists()) + self.assertFalse( + ContentNode.objects.filter( + ~Q(parent_id=settings.ORPHANAGE_ROOT_ID) + & Q(title="sole-editor-channel-node") + ).exists() + ) # Creates user history? - user_hard_delete_history = UserHistory.objects.filter(user_id=user.id, action=user_history.RELATED_DATA_HARD_DELETION).first() + user_hard_delete_history = UserHistory.objects.filter( + user_id=user.id, action=user_history.RELATED_DATA_HARD_DELETION + ).first() self.assertIsNotNone(user_hard_delete_history) def test_get_server_rev(self): @@ -975,11 +1096,13 @@ def create_change(server_rev, applied): kwargs={}, ) - Change.objects.bulk_create([ - create_change(1, True), - create_change(2, True), - create_change(3, False), - ]) + Change.objects.bulk_create( + [ + create_change(1, True), + create_change(2, True), + create_change(3, False), + ] + ) self.assertEqual(user.get_server_rev(), 2) @@ -990,29 +1113,40 @@ def setUp(self): self.channel = testdata.channel() def test_mark_channel_created(self): - self.assertEqual(1, self.channel.history.filter(action=channel_history.CREATION).count()) + self.assertEqual( + 1, self.channel.history.filter(action=channel_history.CREATION).count() + ) def test_mark_channel_deleted(self): self.assertEqual(0, self.channel.deletion_history.count()) self.channel.deleted = True self.channel.save(actor_id=self.admin_user.id) - self.assertEqual(1, self.channel.deletion_history.filter(actor=self.admin_user).count()) + self.assertEqual( + 1, self.channel.deletion_history.filter(actor=self.admin_user).count() + ) def test_mark_channel_recovered(self): - self.assertEqual(0, self.channel.history.filter(actor=self.admin_user, action=channel_history.RECOVERY).count()) + self.assertEqual( + 0, + self.channel.history.filter( + actor=self.admin_user, action=channel_history.RECOVERY + ).count(), + ) self.channel.deleted = True self.channel.save(actor_id=self.admin_user.id) self.channel.deleted = False self.channel.save(actor_id=self.admin_user.id) - self.assertEqual(1, self.channel.history.filter(actor=self.admin_user, action=channel_history.RECOVERY).count()) + self.assertEqual( + 1, + self.channel.history.filter( + actor=self.admin_user, action=channel_history.RECOVERY + ).count(), + ) def test_prune(self): i = 10 now = timezone.now() - channels = [ - self.channel, - testdata.channel() - ] + channels = [self.channel, testdata.channel()] last_history_ids = [] ChannelHistory.objects.all().delete() @@ -1033,11 +1167,12 @@ def test_prune(self): self.assertEqual(20, ChannelHistory.objects.count()) ChannelHistory.prune() self.assertEqual(2, ChannelHistory.objects.count()) - self.assertEqual(2, ChannelHistory.objects.filter(id__in=last_history_ids).count()) + self.assertEqual( + 2, ChannelHistory.objects.filter(id__in=last_history_ids).count() + ) class FeedbackModelTests(StudioTestCase): - @classmethod def setUpClass(cls): super(FeedbackModelTests, cls).setUpClass() @@ -1048,25 +1183,34 @@ def setUp(self): def _create_base_feedback_data(self, context, contentnode_id, content_id): base_feedback_data = { - 'context': context, - 'contentnode_id': contentnode_id, - 'content_id': content_id, + "context": context, + "contentnode_id": contentnode_id, + "content_id": content_id, } return base_feedback_data def _create_recommendation_event(self): channel = testdata.channel() - node_where_import_was_initiated = testdata.node({"kind_id": content_kinds.TOPIC, "title": "recomendations provided here"}) + node_where_import_was_initiated = testdata.node( + {"kind_id": content_kinds.TOPIC, "title": "recomendations provided here"} + ) base_feedback_data = self._create_base_feedback_data( - {'model_version': 1, 'breadcrums': "#Title#->Random"}, + {"model_version": 1, "breadcrums": "#Title#->Random"}, node_where_import_was_initiated.id, - node_where_import_was_initiated.content_id + node_where_import_was_initiated.content_id, ) recommendations_event = RecommendationsEvent.objects.create( user=self.user, target_channel_id=channel.id, time_hidden=timezone.now(), - content=[{'content_id': str(uuid.uuid4()), 'node_id': str(uuid.uuid4()), 'channel_id': str(uuid.uuid4()), 'score': 4}], + content=[ + { + "content_id": str(uuid.uuid4()), + "node_id": str(uuid.uuid4()), + "channel_id": str(uuid.uuid4()), + "score": 4, + } + ], **base_feedback_data ) @@ -1074,52 +1218,67 @@ def _create_recommendation_event(self): def test_create_flag_feedback_event(self): channel = testdata.channel("testchannel") - flagged_node = testdata.node({"kind_id": content_kinds.TOPIC, "title": "SuS ContentNode"}) + flagged_node = testdata.node( + {"kind_id": content_kinds.TOPIC, "title": "SuS ContentNode"} + ) base_feedback_data = self._create_base_feedback_data( - {'spam': 'Spam or misleading'}, - flagged_node.id, - flagged_node.content_id + {"spam": "Spam or misleading"}, flagged_node.id, flagged_node.content_id ) flag_feedback_event = FlagFeedbackEvent.objects.create( - user=self.user, - target_channel_id=channel.id, - **base_feedback_data + user=self.user, target_channel_id=channel.id, **base_feedback_data ) self.assertEqual(flag_feedback_event.user, self.user) - self.assertEqual(flag_feedback_event.context['spam'], 'Spam or misleading') + self.assertEqual(flag_feedback_event.context["spam"], "Spam or misleading") def test_create_recommendations_interaction_event(self): # This represents a node that was recommended by the model and was interacted by user! - recommended_node = testdata.node({"kind_id": content_kinds.TOPIC, "title": "This node was recommended by the model"}) + recommended_node = testdata.node( + { + "kind_id": content_kinds.TOPIC, + "title": "This node was recommended by the model", + } + ) base_feedback_data = self._create_base_feedback_data( {"comment": "explicit reason given by user why he rejected this node!"}, recommended_node.id, - recommended_node.content_id - ) + recommended_node.content_id, + ) fk = self._create_recommendation_event().id rec_interaction_event = RecommendationsInteractionEvent.objects.create( - feedback_type='rejected', - feedback_reason='some predefined reasons like (not related)', + feedback_type="rejected", + feedback_reason="some predefined reasons like (not related)", recommendation_event_id=fk, **base_feedback_data ) - self.assertEqual(rec_interaction_event.feedback_type, 'rejected') - self.assertEqual(rec_interaction_event.feedback_reason, 'some predefined reasons like (not related)') + self.assertEqual(rec_interaction_event.feedback_type, "rejected") + self.assertEqual( + rec_interaction_event.feedback_reason, + "some predefined reasons like (not related)", + ) def test_create_recommendations_event(self): channel = testdata.channel() - node_where_import_was_initiated = testdata.node({"kind_id": content_kinds.TOPIC, "title": "recomendations provided here"}) + node_where_import_was_initiated = testdata.node( + {"kind_id": content_kinds.TOPIC, "title": "recomendations provided here"} + ) base_feedback_data = self._create_base_feedback_data( - {'model_version': 1, 'breadcrums': "#Title#->Random"}, + {"model_version": 1, "breadcrums": "#Title#->Random"}, node_where_import_was_initiated.id, - node_where_import_was_initiated.content_id + node_where_import_was_initiated.content_id, ) recommendations_event = RecommendationsEvent.objects.create( user=self.user, target_channel_id=channel.id, time_hidden=timezone.now(), - content=[{'content_id': str(uuid.uuid4()), 'node_id': str(uuid.uuid4()), 'channel_id': str(uuid.uuid4()), 'score': 4}], + content=[ + { + "content_id": str(uuid.uuid4()), + "node_id": str(uuid.uuid4()), + "channel_id": str(uuid.uuid4()), + "score": 4, + } + ], **base_feedback_data ) self.assertEqual(len(recommendations_event.content), 1) - self.assertEqual(recommendations_event.content[0]['score'], 4) + self.assertEqual(recommendations_event.content[0]["score"], 4) diff --git a/contentcuration/contentcuration/tests/test_parser.py b/contentcuration/contentcuration/tests/test_parser.py index 9579db7904..68892df16e 100644 --- a/contentcuration/contentcuration/tests/test_parser.py +++ b/contentcuration/contentcuration/tests/test_parser.py @@ -42,16 +42,20 @@ def number_tests(): def json_tests(): return [ ("{'a': 'b'}", {"a": "b"}), # Test single quotes -> double quotes - ("{\"a\": False}", {"a": False}), # Test False -> false - ("{\"a\": True}", {"a": True}), # Test True -> true + ('{"a": False}', {"a": False}), # Test False -> false + ('{"a": True}', {"a": True}), # Test True -> true ] def test_numbers(number_tests): for val1, val2 in number_tests: - assert extract_value(val1) == val2, "Numbers don't match: {} != {}".format(val1, val2) + assert extract_value(val1) == val2, "Numbers don't match: {} != {}".format( + val1, val2 + ) def test_jsons(json_tests): for val1, val2 in json_tests: - assert load_json_string(val1) == val2, "JSONs don't match: {} != {}".format(val1, val2) + assert load_json_string(val1) == val2, "JSONs don't match: {} != {}".format( + val1, val2 + ) diff --git a/contentcuration/contentcuration/tests/test_rectify_source_field_migraiton_command.py b/contentcuration/contentcuration/tests/test_rectify_source_field_migraiton_command.py index 96382e25af..745ba4a5d2 100644 --- a/contentcuration/contentcuration/tests/test_rectify_source_field_migraiton_command.py +++ b/contentcuration/contentcuration/tests/test_rectify_source_field_migraiton_command.py @@ -15,7 +15,6 @@ class TestRectifyMigrationCommand(StudioAPITestCase): - @classmethod def setUpClass(cls): super(TestRectifyMigrationCommand, cls).setUpClass() @@ -36,7 +35,7 @@ def setUp(self): license_description=self.license_description_original, original_channel_id=None, source_channel_id=None, - author="old author" + author="old author", ) self.user = testdata.user() self.original_channel.editors.add(self.user) @@ -82,15 +81,21 @@ def create_source_channel_and_contentnode(self): return source_node, source_channel def run_migrations(self): - call_command('rectify_incorrect_contentnode_source_fields') + call_command("rectify_incorrect_contentnode_source_fields") def test_two_node_case(self): - base_node, base_channel = self.create_base_channel_and_contentnode(self.original_contentnode, self.original_channel) + base_node, base_channel = self.create_base_channel_and_contentnode( + self.original_contentnode, self.original_channel + ) publish_channel(self.user.id, Channel.objects.get(pk=base_channel.pk).id) # main_tree node still has changed=true even after the publish - for node in Channel.objects.get(pk=base_channel.pk).main_tree.get_family().filter(changed=True): + for node in ( + Channel.objects.get(pk=base_channel.pk) + .main_tree.get_family() + .filter(changed=True) + ): node.changed = False # This should probably again change the changed=true but suprisingly it doesnot # Meaning the changed boolean doesnot change for the main_tree no matter what we do @@ -98,17 +103,28 @@ def test_two_node_case(self): node.save() ContentNode.objects.filter(pk=base_node.pk).update( - modified=datetime.datetime(2023, 7, 5, tzinfo=timezone.utc) + modified=datetime.datetime(2023, 7, 5, tzinfo=timezone.utc) ) self.run_migrations() updated_base_node = ContentNode.objects.get(pk=base_node.pk) - self.assertEqual(updated_base_node.license_description, self.original_contentnode.license_description) - self.assertEqual(Channel.objects.get(pk=base_channel.id).main_tree.get_family().filter(changed=True).exists(), True) + self.assertEqual( + updated_base_node.license_description, + self.original_contentnode.license_description, + ) + self.assertEqual( + Channel.objects.get(pk=base_channel.id) + .main_tree.get_family() + .filter(changed=True) + .exists(), + True, + ) def test_three_node_case_implicit(self): source_node, source_channel = self.create_source_channel_and_contentnode() - base_node, base_channel = self.create_base_channel_and_contentnode(source_node, source_channel) + base_node, base_channel = self.create_base_channel_and_contentnode( + source_node, source_channel + ) source_node.aggregator = "Nami" source_node.save() # Implicit case @@ -119,12 +135,16 @@ def test_three_node_case_implicit(self): publish_channel(self.user.id, Channel.objects.get(pk=base_channel.pk).id) - for node in Channel.objects.get(pk=base_channel.pk).main_tree.get_family().filter(changed=True): + for node in ( + Channel.objects.get(pk=base_channel.pk) + .main_tree.get_family() + .filter(changed=True) + ): node.changed = False node.save() ContentNode.objects.filter(pk=base_node.pk).update( - modified=datetime.datetime(2023, 7, 5, tzinfo=timezone.utc) + modified=datetime.datetime(2023, 7, 5, tzinfo=timezone.utc) ) ContentNode.objects.filter(pk=source_node.pk).update( @@ -134,25 +154,43 @@ def test_three_node_case_implicit(self): self.run_migrations() updated_base_node = ContentNode.objects.get(pk=base_node.pk) updated_source_node = ContentNode.objects.get(pk=source_node.pk) - self.assertEqual(updated_base_node.license_description, self.original_contentnode.license_description) - self.assertEqual(updated_source_node.license_description, self.original_contentnode.license_description) - self.assertEqual(Channel.objects.get(pk=base_channel.id).main_tree.get_family().filter(changed=True).exists(), True) + self.assertEqual( + updated_base_node.license_description, + self.original_contentnode.license_description, + ) + self.assertEqual( + updated_source_node.license_description, + self.original_contentnode.license_description, + ) + self.assertEqual( + Channel.objects.get(pk=base_channel.id) + .main_tree.get_family() + .filter(changed=True) + .exists(), + True, + ) def test_three_node_case_explicit(self): source_node, source_channel = self.create_source_channel_and_contentnode() - base_node, base_channel = self.create_base_channel_and_contentnode(source_node, source_channel) + base_node, base_channel = self.create_base_channel_and_contentnode( + source_node, source_channel + ) source_node.license_description = "luffy" base_node.license_description = "zoro" base_node.save() source_node.save() publish_channel(self.user.id, Channel.objects.get(pk=base_channel.pk).id) - for node in Channel.objects.get(pk=base_channel.pk).main_tree.get_family().filter(changed=True): + for node in ( + Channel.objects.get(pk=base_channel.pk) + .main_tree.get_family() + .filter(changed=True) + ): node.changed = False node.save() ContentNode.objects.filter(pk=base_node.pk).update( - modified=datetime.datetime(2023, 7, 5, tzinfo=timezone.utc) + modified=datetime.datetime(2023, 7, 5, tzinfo=timezone.utc) ) ContentNode.objects.filter(pk=source_node.pk).update( @@ -162,6 +200,18 @@ def test_three_node_case_explicit(self): self.run_migrations() updated_base_node = ContentNode.objects.get(pk=base_node.pk) updated_source_node = ContentNode.objects.get(pk=source_node.pk) - self.assertEqual(updated_base_node.license_description, self.original_contentnode.license_description) - self.assertEqual(updated_source_node.license_description, self.original_contentnode.license_description) - self.assertEqual(Channel.objects.get(pk=base_channel.id).main_tree.get_family().filter(changed=True).exists(), True) + self.assertEqual( + updated_base_node.license_description, + self.original_contentnode.license_description, + ) + self.assertEqual( + updated_source_node.license_description, + self.original_contentnode.license_description, + ) + self.assertEqual( + Channel.objects.get(pk=base_channel.id) + .main_tree.get_family() + .filter(changed=True) + .exists(), + True, + ) diff --git a/contentcuration/contentcuration/tests/test_restore_channel.py b/contentcuration/contentcuration/tests/test_restore_channel.py index a4d1e13a39..6c5e1500ff 100644 --- a/contentcuration/contentcuration/tests/test_restore_channel.py +++ b/contentcuration/contentcuration/tests/test_restore_channel.py @@ -23,58 +23,61 @@ thumbnail_path = "/content/thumbnail.png" ASSESSMENT_DATA = { - 'input-question-test': { - 'template': 'perseus/input_question.json', - 'type': exercises.INPUT_QUESTION, - 'question': "Input question", - 'question_images': [{"name": "test.jpg", "width": 12.71, "height": 12.12}], - 'hints': [{'hint': 'Hint 1'}], - 'answers': [ - {'answer': '1', 'correct': True, 'images': []}, - {'answer': '2', 'correct': True, 'images': []} + "input-question-test": { + "template": "perseus/input_question.json", + "type": exercises.INPUT_QUESTION, + "question": "Input question", + "question_images": [{"name": "test.jpg", "width": 12.71, "height": 12.12}], + "hints": [{"hint": "Hint 1"}], + "answers": [ + {"answer": "1", "correct": True, "images": []}, + {"answer": "2", "correct": True, "images": []}, ], - 'order': 0 + "order": 0, }, - 'multiple-selection-test': { - 'template': 'perseus/multiple_selection.json', - 'type': exercises.MULTIPLE_SELECTION, - 'question': "Multiple selection question", - 'question_images': [], - 'hints': [], - 'answers': [ - {'answer': 'A', 'correct': True, 'images': []}, - {'answer': 'B', 'correct': True, 'images': []}, - {'answer': 'C', 'correct': False, 'images': []}, + "multiple-selection-test": { + "template": "perseus/multiple_selection.json", + "type": exercises.MULTIPLE_SELECTION, + "question": "Multiple selection question", + "question_images": [], + "hints": [], + "answers": [ + {"answer": "A", "correct": True, "images": []}, + {"answer": "B", "correct": True, "images": []}, + {"answer": "C", "correct": False, "images": []}, ], - 'multiple_select': True, - 'order': 1, - 'randomize': False + "multiple_select": True, + "order": 1, + "randomize": False, }, - 'single-selection-test': { - 'template': 'perseus/multiple_selection.json', - 'type': exercises.SINGLE_SELECTION, - 'question': "Single select question", - 'question_images': [], - 'hints': [{'hint': 'Hint test'}], - 'answers': [ - {'answer': 'Correct answer', 'correct': True, 'images': []}, - {'answer': 'Incorrect answer', 'correct': False, 'images': []}, + "single-selection-test": { + "template": "perseus/multiple_selection.json", + "type": exercises.SINGLE_SELECTION, + "question": "Single select question", + "question_images": [], + "hints": [{"hint": "Hint test"}], + "answers": [ + {"answer": "Correct answer", "correct": True, "images": []}, + {"answer": "Incorrect answer", "correct": False, "images": []}, ], - 'multiple_select': False, - 'order': 2, - 'randomize': True + "multiple_select": False, + "order": 2, + "randomize": True, + }, + "perseus-question-test": { + "template": "perseus/perseus_question.json", + "type": exercises.PERSEUS_QUESTION, + "order": 3, + "raw_data": "{}", }, - 'perseus-question-test': { - 'template': 'perseus/perseus_question.json', - 'type': exercises.PERSEUS_QUESTION, - 'order': 3, - 'raw_data': '{}' - } } class ChannelRestoreUtilityFunctionTestCase(StudioTestCase): - @patch("contentcuration.utils.import_tools.write_to_thumbnail_file", return_value=thumbnail_path) + @patch( + "contentcuration.utils.import_tools.write_to_thumbnail_file", + return_value=thumbnail_path, + ) def setUp(self, thumb_mock): self.id = uuid.uuid4().hex self.name = "test name" @@ -108,7 +111,9 @@ def test_restore_channel_thumbnail(self): self.assertEqual(self.channel.thumbnail, thumbnail_path) def test_restore_channel_thumbnail_encoding(self): - self.assertEqual(self.channel.thumbnail_encoding["base64"], self.thumbnail_encoding) + self.assertEqual( + self.channel.thumbnail_encoding["base64"], self.thumbnail_encoding + ) def test_restore_channel_version(self): self.assertEqual(self.channel.version, self.version) @@ -117,63 +122,69 @@ def test_restore_channel_version(self): class PerseusRestoreTestCase(StudioTestCase): def setUp(self): super(PerseusRestoreTestCase, self).setUp() - image_path = generate_object_storage_name('test', 'test.png') - default_storage.save(image_path, BytesIO(b'test')) + image_path = generate_object_storage_name("test", "test.png") + default_storage.save(image_path, BytesIO(b"test")) def test_process_content(self): tests = [ + {"content": "test 1", "output": "test 1", "images": {}}, { - "content": 'test 1', - "output": 'test 1', - 'images': {} + "content": "test 2 ![test](${☣ LOCALPATH}/images/test.png)", + "output": "test 2 ![test](${☣ CONTENTSTORAGE}/test.png)", + "images": {}, }, { - "content": 'test 2 ![test](${☣ LOCALPATH}/images/test.png)', - "output": 'test 2 ![test](${☣ CONTENTSTORAGE}/test.png)', - 'images': {} + "content": "test 3 ![](${☣ LOCALPATH}/images/test.png)", + "output": "test 3 ![](${☣ CONTENTSTORAGE}/test.png =50x50)", + "images": { + "${☣ LOCALPATH}/images/test.png": {"width": 50, "height": 50} + }, }, { - "content": 'test 3 ![](${☣ LOCALPATH}/images/test.png)', - "output": 'test 3 ![](${☣ CONTENTSTORAGE}/test.png =50x50)', - 'images': { - '${☣ LOCALPATH}/images/test.png': { - 'width': 50, - 'height': 50 - } - } + "content": "test 4 ![](${☣ LOCALPATH}/images/test.png) ![](${☣ LOCALPATH}/images/test.png)", + "output": "test 4 ![](${☣ CONTENTSTORAGE}/test.png) ![](${☣ CONTENTSTORAGE}/test.png)", + "images": {}, }, { - "content": 'test 4 ![](${☣ LOCALPATH}/images/test.png) ![](${☣ LOCALPATH}/images/test.png)', - "output": 'test 4 ![](${☣ CONTENTSTORAGE}/test.png) ![](${☣ CONTENTSTORAGE}/test.png)', - 'images': {} + "content": "test 5  $\\sqrt{36}+\\frac{1}{2}$ ", + "output": "test 5 $$\\sqrt{36}+\\frac{1}{2}$$", + "images": {}, }, { - "content": 'test 5  $\\sqrt{36}+\\frac{1}{2}$ ', - "output": 'test 5 $$\\sqrt{36}+\\frac{1}{2}$$', - 'images': {} + "content": "test 6 $\\frac{1}{2}$ $\\frac{3}{2}$", + "output": "test 6 $$\\frac{1}{2}$$ $$\\frac{3}{2}$$", + "images": {}, }, - { - "content": 'test 6 $\\frac{1}{2}$ $\\frac{3}{2}$', - "output": 'test 6 $$\\frac{1}{2}$$ $$\\frac{3}{2}$$', - 'images': {} - } ] for test in tests: result = process_content(test, mixer.blend(AssessmentItem)) - self.assertEqual(result, test['output']) + self.assertEqual(result, test["output"]) def test_generate_assessment_item(self): # Run in Spanish to ensure we are properly creating JSON with non-localized numbers activate("es-es") for assessment_id, data in list(ASSESSMENT_DATA.items()): - assessment_data = json.loads(render_to_string(data['template'], data).encode('utf-8', "ignore")) - assessment_item = generate_assessment_item(assessment_id, data['order'], data['type'], assessment_data) - self.assertEqual(assessment_item.type, data['type']) - self.assertEqual(assessment_item.question, data.get('question', '')) - self.assertEqual(assessment_item.randomize, bool(data.get('randomize'))) - self.assertEqual(assessment_item.raw_data, data.get('raw_data', '')) + assessment_data = json.loads( + render_to_string(data["template"], data).encode("utf-8", "ignore") + ) + assessment_item = generate_assessment_item( + assessment_id, data["order"], data["type"], assessment_data + ) + self.assertEqual(assessment_item.type, data["type"]) + self.assertEqual(assessment_item.question, data.get("question", "")) + self.assertEqual(assessment_item.randomize, bool(data.get("randomize"))) + self.assertEqual(assessment_item.raw_data, data.get("raw_data", "")) for hint in json.loads(assessment_item.hints): - self.assertTrue(any(h for h in data['hints'] if h['hint'] == hint['hint'])) + self.assertTrue( + any(h for h in data["hints"] if h["hint"] == hint["hint"]) + ) for answer in json.loads(assessment_item.answers): - self.assertTrue(any(a for a in data['answers'] if a['answer'] == str(answer['answer']) and a['correct'] == answer['correct'])) + self.assertTrue( + any( + a + for a in data["answers"] + if a["answer"] == str(answer["answer"]) + and a["correct"] == answer["correct"] + ) + ) deactivate() diff --git a/contentcuration/contentcuration/tests/test_secrettoken_model.py b/contentcuration/contentcuration/tests/test_secrettoken_model.py index c799b1a42e..fa0ecfadf3 100755 --- a/contentcuration/contentcuration/tests/test_secrettoken_model.py +++ b/contentcuration/contentcuration/tests/test_secrettoken_model.py @@ -1,5 +1,4 @@ #!/usr/bin/env python - from django.test import TestCase from le_utils import proquint diff --git a/contentcuration/contentcuration/tests/test_serializers.py b/contentcuration/contentcuration/tests/test_serializers.py index 64ec90072a..d8730a2bd8 100644 --- a/contentcuration/contentcuration/tests/test_serializers.py +++ b/contentcuration/contentcuration/tests/test_serializers.py @@ -33,7 +33,9 @@ def ensure_no_querysets_in_serializer(object): class ContentNodeSerializerTestCase(BaseAPITestCase): def setUp(self): super(ContentNodeSerializerTestCase, self).setUp() - self.data = dict(extra_fields=dict(options=dict(modality="QUIZ")), complete=True) + self.data = dict( + extra_fields=dict(options=dict(modality="QUIZ")), complete=True + ) self.node = ContentNode(kind_id=content_kinds.VIDEO) @property @@ -44,7 +46,13 @@ def test_no_completion_criteria(self): self.assertTrue(self.serializer.is_valid()) def test_completion_criteria__valid(self): - self.data["extra_fields"]["options"].update(completion_criteria={"model": "time", "threshold": 10, "learner_managed": True}) + self.data["extra_fields"]["options"].update( + completion_criteria={ + "model": "time", + "threshold": 10, + "learner_managed": True, + } + ) serializer = self.serializer serializer.is_valid() try: @@ -53,7 +61,9 @@ def test_completion_criteria__valid(self): self.fail("Completion criteria should be valid") def test_completion_criteria__invalid(self): - self.data["extra_fields"]["options"].update(completion_criteria={"model": "time", "threshold": "test"}) + self.data["extra_fields"]["options"].update( + completion_criteria={"model": "time", "threshold": "test"} + ) serializer = self.serializer serializer.is_valid() with self.assertRaises(serializers.ValidationError): @@ -69,17 +79,19 @@ def test_repr_doesnt_evaluate_querysets(self): ContentNode.objects.filter(node_id__in=node_ids), many=True ) - object = ContentNodeSerializer( - ContentNode.objects.get(node_id=node_ids[0]) - ) + object = ContentNodeSerializer(ContentNode.objects.get(node_id=node_ids[0])) # Ensure we don't evaluate querysets when repr is called on a Serializer. See docs for # no_field_eval_repr in contentcuration/serializers.py for more info. obj_string = repr(object) - assert "QuerySet" not in obj_string, "object __repr__ contains queryset: {}".format(obj_string) + assert ( + "QuerySet" not in obj_string + ), "object __repr__ contains queryset: {}".format(obj_string) objs_string = repr(objects) - assert "QuerySet" not in objs_string, "objects __repr__ contains queryset: {}".format(objs_string) + assert ( + "QuerySet" not in objs_string + ), "objects __repr__ contains queryset: {}".format(objs_string) class ContentDefaultsSerializerTestCase(BaseAPITestCase): @@ -121,13 +133,25 @@ def test_update(self): self.assertEqual(defaults, s.save()) def test_update__merge(self): - defaults = dict(author="Buster", aggregator="Aggregators R US", provider="USA",) + defaults = dict( + author="Buster", + aggregator="Aggregators R US", + provider="USA", + ) s = ContentDefaultsSerializer( - defaults, data=dict(author="Duster", provider="Canada",) + defaults, + data=dict( + author="Duster", + provider="Canada", + ), ) self.assertTrue(s.is_valid()) self.assertEqual( - dict(author="Duster", aggregator="Aggregators R US", provider="Canada",), + dict( + author="Duster", + aggregator="Aggregators R US", + provider="Canada", + ), s.save(), ) @@ -203,32 +227,32 @@ def setUp(self): def _create_base_feedback_data(self, context, contentnode_id, content_id): base_feedback_data = { - 'context': context, - 'contentnode_id': contentnode_id, - 'content_id': content_id, + "context": context, + "contentnode_id": contentnode_id, + "content_id": content_id, } return base_feedback_data def test_deserialization_and_validation(self): data = { - 'user': self.user.id, - 'target_channel_id': str(self.channel.id), - 'context': {'test_key': 'test_value'}, - 'contentnode_id': str(self.flagged_node.id), - 'content_id': str(self.flagged_node.content_id), - 'feedback_type': 'FLAGGED', - 'feedback_reason': 'Reason1.....' + "user": self.user.id, + "target_channel_id": str(self.channel.id), + "context": {"test_key": "test_value"}, + "contentnode_id": str(self.flagged_node.id), + "content_id": str(self.flagged_node.content_id), + "feedback_type": "FLAGGED", + "feedback_reason": "Reason1.....", } serializer = FlagFeedbackEventSerializer(data=data) self.assertTrue(serializer.is_valid(), serializer.errors) instance = serializer.save() - self.assertEqual(instance.context, data['context']) - self.assertEqual(instance.user.id, data['user']) - self.assertEqual(instance.feedback_type, data['feedback_type']) - self.assertEqual(instance.feedback_reason, data['feedback_reason']) + self.assertEqual(instance.context, data["context"]) + self.assertEqual(instance.user.id, data["user"]) + self.assertEqual(instance.feedback_type, data["feedback_type"]) + self.assertEqual(instance.feedback_reason, data["feedback_reason"]) def test_invalid_data(self): - data = {'context': 'invalid'} + data = {"context": "invalid"} serializer = FlagFeedbackEventSerializer(data=data) self.assertFalse(serializer.is_valid()) @@ -254,39 +278,48 @@ def setUp(self): target_channel_id=self.channel.id, content_id=self.node_where_import_is_initiated.content_id, contentnode_id=self.node_where_import_is_initiated.id, - context={'model_version': 1, 'breadcrumbs': "#Title#->Random"}, + context={"model_version": 1, "breadcrumbs": "#Title#->Random"}, time_hidden=timezone.now(), - content=[{'content_id': str(uuid.uuid4()), 'node_id': str(uuid.uuid4()), 'channel_id': str(uuid.uuid4()), 'score': 4}] + content=[ + { + "content_id": str(uuid.uuid4()), + "node_id": str(uuid.uuid4()), + "channel_id": str(uuid.uuid4()), + "score": 4, + } + ], ) def test_deserialization_and_validation(self): data = { - 'context': {'test_key': 'test_value'}, - 'contentnode_id': str(self.interaction_node.id), - 'content_id': str(self.interaction_node.content_id), - 'feedback_type': 'IGNORED', - 'feedback_reason': '----', - 'recommendation_event_id': str(self.recommendation_event.id) + "context": {"test_key": "test_value"}, + "contentnode_id": str(self.interaction_node.id), + "content_id": str(self.interaction_node.content_id), + "feedback_type": "IGNORED", + "feedback_reason": "----", + "recommendation_event_id": str(self.recommendation_event.id), } serializer = RecommendationsInteractionEventSerializer(data=data) self.assertTrue(serializer.is_valid(), serializer.errors) instance = serializer.save() - self.assertEqual(instance.context, data['context']) - self.assertEqual(instance.feedback_type, data['feedback_type']) - self.assertEqual(str(instance.recommendation_event_id), data['recommendation_event_id']) + self.assertEqual(instance.context, data["context"]) + self.assertEqual(instance.feedback_type, data["feedback_type"]) + self.assertEqual( + str(instance.recommendation_event_id), data["recommendation_event_id"] + ) def test_invalid_data(self): - data = {'context': 'invalid'} + data = {"context": "invalid"} serializer = RecommendationsInteractionEventSerializer(data=data) self.assertFalse(serializer.is_valid()) data = { - 'context': {'test_key': 'test_value'}, - 'contentnode_id': str(self.interaction_node.id), - 'content_id': str(self.interaction_node.content_id), - 'feedback_type': 'INVALID_TYPE', - 'feedback_reason': '-----', - 'recommendation_event_id': 'invalid-uuid' + "context": {"test_key": "test_value"}, + "contentnode_id": str(self.interaction_node.id), + "content_id": str(self.interaction_node.content_id), + "feedback_type": "INVALID_TYPE", + "feedback_reason": "-----", + "recommendation_event_id": "invalid-uuid", } serializer = RecommendationsInteractionEventSerializer(data=data) self.assertFalse(serializer.is_valid()) @@ -305,37 +338,54 @@ def setUp(self): def test_deserialization_and_validation(self): data = { - 'user': self.user.id, - 'target_channel_id': str(self.channel.id), - 'context': {'model_version': 1, 'breadcrumbs': "#Title#->Random"}, - 'contentnode_id': str(self.node_where_import_is_initiated.id), - 'content_id': str(self.node_where_import_is_initiated.content_id), - 'time_hidden': timezone.now().isoformat(), - 'content': [{'content_id': str(uuid.uuid4()), 'node_id': str(uuid.uuid4()), 'channel_id': str(uuid.uuid4()), 'score': 4}] + "user": self.user.id, + "target_channel_id": str(self.channel.id), + "context": {"model_version": 1, "breadcrumbs": "#Title#->Random"}, + "contentnode_id": str(self.node_where_import_is_initiated.id), + "content_id": str(self.node_where_import_is_initiated.content_id), + "time_hidden": timezone.now().isoformat(), + "content": [ + { + "content_id": str(uuid.uuid4()), + "node_id": str(uuid.uuid4()), + "channel_id": str(uuid.uuid4()), + "score": 4, + } + ], } serializer = RecommendationsEventSerializer(data=data) self.assertTrue(serializer.is_valid(), serializer.errors) instance = serializer.save() - self.assertEqual(instance.context, data['context']) - self.assertEqual(instance.user.id, data['user']) - self.assertEqual(str(instance.contentnode_id).replace('-', ''), data['contentnode_id'].replace('-', '')) - self.assertEqual(instance.content, data['content']) + self.assertEqual(instance.context, data["context"]) + self.assertEqual(instance.user.id, data["user"]) + self.assertEqual( + str(instance.contentnode_id).replace("-", ""), + data["contentnode_id"].replace("-", ""), + ) + self.assertEqual(instance.content, data["content"]) def test_invalid_data(self): # Test with missing required fields - data = {'context': 'invalid'} + data = {"context": "invalid"} serializer = RecommendationsEventSerializer(data=data) self.assertFalse(serializer.is_valid()) # Test with invalid contentnode_id data = { - 'user': self.user.id, - 'target_channel_id': str(self.channel.id), - 'context': {'model_version': 1, 'breadcrumbs': "#Title#->Random"}, - 'contentnode_id': 'invalid-uuid', - 'content_id': str(self.node_where_import_is_initiated.content_id), - 'time_hidden': timezone.now().isoformat(), - 'content': [{'content_id': str(uuid.uuid4()), 'node_id': str(uuid.uuid4()), 'channel_id': str(uuid.uuid4()), 'score': 4}] + "user": self.user.id, + "target_channel_id": str(self.channel.id), + "context": {"model_version": 1, "breadcrumbs": "#Title#->Random"}, + "contentnode_id": "invalid-uuid", + "content_id": str(self.node_where_import_is_initiated.content_id), + "time_hidden": timezone.now().isoformat(), + "content": [ + { + "content_id": str(uuid.uuid4()), + "node_id": str(uuid.uuid4()), + "channel_id": str(uuid.uuid4()), + "score": 4, + } + ], } serializer = RecommendationsEventSerializer(data=data) self.assertFalse(serializer.is_valid()) diff --git a/contentcuration/contentcuration/tests/test_setlanguage.py b/contentcuration/contentcuration/tests/test_setlanguage.py index 941db98f5d..9654f351c7 100644 --- a/contentcuration/contentcuration/tests/test_setlanguage.py +++ b/contentcuration/contentcuration/tests/test_setlanguage.py @@ -36,7 +36,11 @@ def test_setlang(self): The set_language view can be used to change the session language. """ lang_code = self._get_inactive_language_code() - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -51,7 +55,11 @@ def test_setlang_next_valid(self): """ lang_code = self._get_inactive_language_code() next_url = reverse("channels") - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code, next_url), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code, next_url), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -66,7 +74,11 @@ def test_setlang_next_invalid(self): """ lang_code = self._get_inactive_language_code() next_url = "/not/a/real/url" - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code, next_url), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code, next_url), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -79,7 +91,11 @@ def test_setlang_null(self): Test language code set to null which shoul direct to default language "en" """ lang_code = self._get_inactive_language_code() - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -87,7 +103,11 @@ def test_setlang_null(self): ) self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code) lang_code = None - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -101,7 +121,11 @@ def test_setlang_null_next_valid(self): The user is redirected to the "next" argument. """ lang_code = self._get_inactive_language_code() - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -110,7 +134,11 @@ def test_setlang_null_next_valid(self): self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code) next_url = reverse("channels") lang_code = None - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code, next_url), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code, next_url), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -124,7 +152,11 @@ def test_setlang_null_next_invalid(self): The user is redirected to user redirect if the "next" argument is invalid. """ lang_code = self._get_inactive_language_code() - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -133,7 +165,11 @@ def test_setlang_null_next_invalid(self): self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code) next_url = "/not/a/real/url" lang_code = None - response = self.client.post(reverse("set_language"), self.set_post_data(lang_code, next_url), content_type='application/json') + response = self.client.post( + reverse("set_language"), + self.set_post_data(lang_code, next_url), + content_type="application/json", + ) self.assertEqual(response.status_code, 200) self.assertEqual( response.content.decode("utf-8"), @@ -146,5 +182,9 @@ def test_setlang_get(self): The set_language view is forbidden to be accessed via GET """ lang_code = self._get_inactive_language_code() - response = self.client.get(reverse("set_language"), params=self.set_post_data(lang_code), content_type='application/json') + response = self.client.get( + reverse("set_language"), + params=self.set_post_data(lang_code), + content_type="application/json", + ) self.assertEqual(type(response), HttpResponseNotAllowed) diff --git a/contentcuration/contentcuration/tests/test_settings.py b/contentcuration/contentcuration/tests/test_settings.py index 48b1b39db6..30f4931db7 100644 --- a/contentcuration/contentcuration/tests/test_settings.py +++ b/contentcuration/contentcuration/tests/test_settings.py @@ -11,7 +11,12 @@ class SettingsTestCase(BaseAPITestCase): def test_username_change(self): - data = json.dumps({"first_name": "New firstname", "last_name": "New lastname", }) + data = json.dumps( + { + "first_name": "New firstname", + "last_name": "New lastname", + } + ) request = self.create_post_request( reverse_lazy("update_user_full_name"), data=data, diff --git a/contentcuration/contentcuration/tests/test_storage_common.py b/contentcuration/contentcuration/tests/test_storage_common.py index 29ad9f59c9..f89534c194 100644 --- a/contentcuration/contentcuration/tests/test_storage_common.py +++ b/contentcuration/contentcuration/tests/test_storage_common.py @@ -16,6 +16,7 @@ from contentcuration.utils.storage_common import determine_content_type from contentcuration.utils.storage_common import get_presigned_upload_url from contentcuration.utils.storage_common import UnknownStorageBackendError + # The modules we'll test @@ -77,7 +78,11 @@ def test_raises_error(self): """ with pytest.raises(UnknownStorageBackendError): get_presigned_upload_url( - "nice", "err", 5, 0, storage=self.STORAGE, + "nice", + "err", + 5, + 0, + storage=self.STORAGE, ) @@ -187,12 +192,16 @@ def test_can_upload_file_to_presigned_url(self): # S3 expects a base64-encoded MD5 checksum md5 = hashlib.md5(file_contents) md5_checksum = md5.hexdigest() - md5_checksum_base64 = codecs.encode(codecs.decode(md5_checksum, "hex"), "base64").decode() + md5_checksum_base64 = codecs.encode( + codecs.decode(md5_checksum, "hex"), "base64" + ).decode() filename = "blahfile.jpg" filepath = generate_object_storage_name(md5_checksum, filename) - ret = get_presigned_upload_url(filepath, md5_checksum_base64, 1000, len(file_contents)) + ret = get_presigned_upload_url( + filepath, md5_checksum_base64, 1000, len(file_contents) + ) url = ret["uploadURL"] content_type = ret["mimetype"] @@ -201,6 +210,6 @@ def test_can_upload_file_to_presigned_url(self): data=file, headers={ "Content-Type": content_type, - } + }, ) resp.raise_for_status() diff --git a/contentcuration/contentcuration/tests/test_sync.py b/contentcuration/contentcuration/tests/test_sync.py index f51613c865..8d011cc1db 100644 --- a/contentcuration/contentcuration/tests/test_sync.py +++ b/contentcuration/contentcuration/tests/test_sync.py @@ -36,7 +36,9 @@ class SyncTestCase(StudioTestCase): def setUp(self): super(SyncTestCase, self).setUpBase() - self.derivative_channel = Channel.objects.create(name="testchannel", actor_id=self.admin_user.id) + self.derivative_channel = Channel.objects.create( + name="testchannel", actor_id=self.admin_user.id + ) self.channel.main_tree.copy_to(self.derivative_channel.main_tree) self.derivative_channel.main_tree.refresh_from_db() self.derivative_channel.save() @@ -128,10 +130,11 @@ def test_sync_files_remove(self): """ Tests whether sync_files remove additional files from the copied node or not. """ - video_node = (self.channel.main_tree.get_descendants() - .filter(kind_id=content_kinds.VIDEO) - .first() - ) + video_node = ( + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.VIDEO) + .first() + ) video_node_copy = self.derivative_channel.main_tree.get_descendants().get( source_node_id=video_node.node_id ) @@ -147,7 +150,9 @@ def test_sync_files_remove(self): self.assertEqual(video_node.files.count(), video_node_copy.files.count()) for file in File.objects.filter(contentnode=video_node.id): - self.assertTrue(video_node_copy.files.filter(checksum=file.checksum).exists()) + self.assertTrue( + video_node_copy.files.filter(checksum=file.checksum).exists() + ) def test_sync_assessment_item_add(self): """ @@ -220,29 +225,21 @@ def test_sync_tags_add(self): ) self.assertIsNotNone(target_child) - self.assertEqual( - target_child.tags.count(), contentnode.tags.count() - ) + self.assertEqual(target_child.tags.count(), contentnode.tags.count()) tag = ContentTag.objects.create(tag_name="tagname") contentnode.tags.add(tag) - self.assertNotEqual( - target_child.tags.count(), contentnode.tags.count() - ) + self.assertNotEqual(target_child.tags.count(), contentnode.tags.count()) sync_channel(self.derivative_channel, sync_resource_details=True) self.derivative_channel.main_tree.refresh_from_db() - self.assertEqual( - target_child.tags.count(), contentnode.tags.count() - ) + self.assertEqual(target_child.tags.count(), contentnode.tags.count()) self.assertEqual( - target_child.tags.filter( - tag_name=tag.tag_name - ).count(), + target_child.tags.filter(tag_name=tag.tag_name).count(), 1, ) @@ -267,9 +264,7 @@ def test_sync_tags_add_multiple_tags(self): ) self.assertIsNotNone(target_child) - self.assertEqual( - target_child.tags.count(), contentnode.tags.count() - ) + self.assertEqual(target_child.tags.count(), contentnode.tags.count()) # Create the same tag twice ContentTag.objects.create(tag_name="tagname") @@ -278,23 +273,19 @@ def test_sync_tags_add_multiple_tags(self): contentnode.tags.add(tag) - self.assertNotEqual( - target_child.tags.count(), contentnode.tags.count() - ) + self.assertNotEqual(target_child.tags.count(), contentnode.tags.count()) try: sync_channel(self.derivative_channel, sync_resource_details=True) except Exception as e: - self.fail("Could not run sync_channel without raising exception: {}".format(e)) + self.fail( + "Could not run sync_channel without raising exception: {}".format(e) + ) self.derivative_channel.main_tree.refresh_from_db() - self.assertEqual( - target_child.tags.count(), contentnode.tags.count() - ) + self.assertEqual(target_child.tags.count(), contentnode.tags.count()) self.assertEqual( - target_child.tags.filter( - tag_name=tag.tag_name - ).count(), + target_child.tags.filter(tag_name=tag.tag_name).count(), 1, ) @@ -359,7 +350,9 @@ def test_sync_license_description(self): .first() ) - special_permissions_license = License.objects.get(license_name="Special Permissions") + special_permissions_license = License.objects.get( + license_name="Special Permissions" + ) contentnode.license = special_permissions_license contentnode.license_description = "You cannot use this content on a Thursday" @@ -379,8 +372,13 @@ def test_sync_license_description(self): ) self.assertEqual(target_child.license, special_permissions_license) - self.assertEqual(target_child.license_description, "You cannot use this content on a Thursday") - self.assertEqual(target_child.copyright_holder, "Thursday's child has far to go") + self.assertEqual( + target_child.license_description, + "You cannot use this content on a Thursday", + ) + self.assertEqual( + target_child.copyright_holder, "Thursday's child has far to go" + ) def test_sync_channel_other_metadata_labels(self): """ @@ -443,7 +441,8 @@ def setUp(self): def _get_assessmentitem_metadata(self, assessment_id=None, contentnode_id=None): return { "assessment_id": assessment_id or uuid.uuid4().hex, - "contentnode_id": contentnode_id or self.channel.main_tree.get_descendants() + "contentnode_id": contentnode_id + or self.channel.main_tree.get_descendants() .filter(kind_id=content_kinds.EXERCISE) .first() .id, @@ -467,16 +466,25 @@ def _upload_file_to_contentnode(self, file_metadata=None, contentnode_id=None): to point to the contentnode. """ file = file_metadata or self._get_file_metadata() - self.client.post(reverse("file-upload-url"), file, format="json",) + self.client.post( + reverse("file-upload-url"), + file, + format="json", + ) file_from_db = File.objects.get(checksum=file["checksum"]) self.sync_changes( - [generate_update_event( - file_from_db.id, - FILE, - { - "contentnode": contentnode_id or self.channel.main_tree.get_descendants().first().id - }, - channel_id=self.channel.id)],) + [ + generate_update_event( + file_from_db.id, + FILE, + { + "contentnode": contentnode_id + or self.channel.main_tree.get_descendants().first().id + }, + channel_id=self.channel.id, + ) + ], + ) file_from_db.refresh_from_db() return file_from_db @@ -494,19 +502,29 @@ def _create_assessmentitem(self, assessmentitem, channel_id): def test_content_id__becomes_equal_on_channel_sync_assessment_item(self): # Make a copy of an existing assessmentitem contentnode. - assessmentitem_node = self.channel.main_tree.get_descendants().filter(kind_id=content_kinds.EXERCISE).first() - assessmentitem_node_copy = assessmentitem_node.copy_to(target=self.channel.main_tree) + assessmentitem_node = ( + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.EXERCISE) + .first() + ) + assessmentitem_node_copy = assessmentitem_node.copy_to( + target=self.channel.main_tree + ) # Create a new assessmentitem. self._create_assessmentitem( - assessmentitem=self._get_assessmentitem_metadata(contentnode_id=assessmentitem_node_copy.id), - channel_id=self.channel.id + assessmentitem=self._get_assessmentitem_metadata( + contentnode_id=assessmentitem_node_copy.id + ), + channel_id=self.channel.id, ) # Assert after creating a new assessmentitem on copied node, it's content_id is changed. assessmentitem_node.refresh_from_db() assessmentitem_node_copy.refresh_from_db() - self.assertNotEqual(assessmentitem_node.content_id, assessmentitem_node_copy.content_id) + self.assertNotEqual( + assessmentitem_node.content_id, assessmentitem_node_copy.content_id + ) # Syncs channel. self.channel.main_tree.refresh_from_db() @@ -519,7 +537,9 @@ def test_content_id__becomes_equal_on_channel_sync_assessment_item(self): # Now after syncing the original and copied node should have same content_id. assessmentitem_node.refresh_from_db() assessmentitem_node_copy.refresh_from_db() - self.assertEqual(assessmentitem_node.content_id, assessmentitem_node_copy.content_id) + self.assertEqual( + assessmentitem_node.content_id, assessmentitem_node_copy.content_id + ) def test_content_id__becomes_equal_on_channel_sync_file(self): file = self._upload_file_to_contentnode() @@ -531,7 +551,9 @@ def test_content_id__becomes_equal_on_channel_sync_file(self): # Assert after new file upload, content_id changes. file.contentnode.refresh_from_db() file_contentnode_copy.refresh_from_db() - self.assertNotEqual(file.contentnode.content_id, file_contentnode_copy.content_id) + self.assertNotEqual( + file.contentnode.content_id, file_contentnode_copy.content_id + ) # Syncs channel. self.channel.main_tree.refresh_from_db() diff --git a/contentcuration/contentcuration/tests/test_utils.py b/contentcuration/contentcuration/tests/test_utils.py index 82bec7738b..f4924e92ec 100644 --- a/contentcuration/contentcuration/tests/test_utils.py +++ b/contentcuration/contentcuration/tests/test_utils.py @@ -41,7 +41,9 @@ def setUp(self): # Upload some pieces of content, as our test data self.existing_content = "dowereallyexist.jpg" - self.existing_content_path = generate_object_storage_name("dowereallyexist", self.existing_content) + self.existing_content_path = generate_object_storage_name( + "dowereallyexist", self.existing_content + ) storage.save(self.existing_content_path, BytesIO(b"maybe")) def test_returns_empty_if_content_already_exists(self): @@ -61,10 +63,7 @@ def test_returns_file_not_uploaded_yet(self): Test if a list with a nonexistent file passed in to get_file_diff would return that file. """ - files = [ - self.existing_content, - "rando" - ] + files = [self.existing_content, "rando"] assert get_file_diff(files) == ["rando"] @@ -75,8 +74,7 @@ class FileFormatsTestCase(StudioTestCase): def test_unsupported_files_raise_error(self): unsupported_file = File.objects.create( - file_on_disk=ContentFile(b"test"), - checksum='aaa' + file_on_disk=ContentFile(b"test"), checksum="aaa" ) with self.assertRaises(Exception): @@ -91,14 +89,18 @@ def test_guess_format_from_extension(self): for ext in known_extensions: file_with_ext = File.objects.create( - file_on_disk=ContentFile(b"test"), - checksum="aaa" + file_on_disk=ContentFile(b"test"), checksum="aaa" ) try: - file_with_ext.file_on_disk.save("aaa.{}".format(ext), ContentFile("aaa")) + file_with_ext.file_on_disk.save( + "aaa.{}".format(ext), ContentFile("aaa") + ) except Exception as e: - raise type(e)(e.message + " ... (hint: make sure that the version of le-utils you're using has its file formats synced).") + raise type(e)( + e.message + + " ... (hint: make sure that the version of le-utils you're using has its file formats synced)." + ) class LEUtilsListsTestCase(TestCase): @@ -107,38 +109,39 @@ class LEUtilsListsTestCase(TestCase): """ def test_le_utils_has_all_consstants_lists(self): - assert licenses.LICENSELIST, 'licenses.LICENSELIST missing from LE-UTILS!' - assert content_kinds.KINDLIST, 'content_kinds.KINDLIST missing from LE-UTILS!' - assert languages.LANGUAGELIST, 'languages.LANGUAGELIST missing from LE-UTILS!' - assert file_formats.FORMATLIST, 'file_formats.FORMATLIST missing from LE-UTILS!' - assert format_presets.PRESETLIST, 'format_presets.PRESETLIST missing from LE-UTILS!' + assert licenses.LICENSELIST, "licenses.LICENSELIST missing from LE-UTILS!" + assert content_kinds.KINDLIST, "content_kinds.KINDLIST missing from LE-UTILS!" + assert languages.LANGUAGELIST, "languages.LANGUAGELIST missing from LE-UTILS!" + assert file_formats.FORMATLIST, "file_formats.FORMATLIST missing from LE-UTILS!" + assert ( + format_presets.PRESETLIST + ), "format_presets.PRESETLIST missing from LE-UTILS!" def test_le_utils_has_all_choices(self): """Used for django model choices fields to provide validation.""" - assert content_kinds.choices, 'content_kinds.choices missing from LE-UTILS!' - assert format_presets.choices, 'format_presets.choices missing from LE-UTILS!' - assert file_formats.choices, 'file_formats.choices missing from LE-UTILS!' + assert content_kinds.choices, "content_kinds.choices missing from LE-UTILS!" + assert format_presets.choices, "format_presets.choices missing from LE-UTILS!" + assert file_formats.choices, "file_formats.choices missing from LE-UTILS!" class LoadConstantsManagementCommandTestCase(TestCase): """ Check `loadconstants` works. """ - models = [ - ContentKind, - FileFormat, - FormatPreset, - Language, - License - ] + + models = [ContentKind, FileFormat, FormatPreset, Language, License] def test_starting_from_empty_db(self): for model in self.models: qset = model.objects.all() - assert len(list(qset)) == 0, 'Constants of type {} already exist.'.format(str(model)) + assert len(list(qset)) == 0, "Constants of type {} already exist.".format( + str(model) + ) def test_models_exist_after_loadconstants(self): call_command("loadconstants") for model in self.models: qset = model.objects.all() - assert len(list(qset)) > 3, 'Only {} constants of type {} created.'.format(len(list(qset)), str(model)) + assert len(list(qset)) > 3, "Only {} constants of type {} created.".format( + len(list(qset)), str(model) + ) diff --git a/contentcuration/contentcuration/tests/test_zipcontentview.py b/contentcuration/contentcuration/tests/test_zipcontentview.py index 7d6056d945..d872a3e5fc 100644 --- a/contentcuration/contentcuration/tests/test_zipcontentview.py +++ b/contentcuration/contentcuration/tests/test_zipcontentview.py @@ -6,10 +6,9 @@ class ZipFileTestCase(StudioTestCase): - def setUp(self): super(ZipFileTestCase, self).setUpBase() - self.zipfile_url = '/zipcontent/' + self.zipfile_url = "/zipcontent/" self.temp_files = [] @@ -18,18 +17,21 @@ def tearDown(self): os.remove(temp_file) def do_create_zip(self): - zip_handle, zip_filename = tempfile.mkstemp(suffix='.zip') + zip_handle, zip_filename = tempfile.mkstemp(suffix=".zip") self.temp_files.append(zip_filename) os.close(zip_handle) - with zipfile.ZipFile(zip_filename, 'w') as zip: - zip.writestr("index.html", "

Hello World!

") + with zipfile.ZipFile(zip_filename, "w") as zip: + zip.writestr( + "index.html", + "

Hello World!

", + ) return zip_filename def test_invalid_zip(self): temp_file, response = self.upload_temp_file(b"Hello!", ext="zip") - url = '{}{}/'.format(self.zipfile_url, temp_file['name']) + url = "{}{}/".format(self.zipfile_url, temp_file["name"]) response = self.get(url) assert response.status_code == 500 @@ -37,9 +39,11 @@ def test_valid_zipfile(self): myzip = self.do_create_zip() self.sign_in() - temp_file, response = self.upload_temp_file(open(myzip, 'rb').read(), preset='html5_zip', ext='zip') + temp_file, response = self.upload_temp_file( + open(myzip, "rb").read(), preset="html5_zip", ext="zip" + ) assert response.status_code == 200 - url = '{}{}/'.format(self.zipfile_url, temp_file['name']) + url = "{}{}/".format(self.zipfile_url, temp_file["name"]) response = self.get(url) assert response.status_code == 200 @@ -47,9 +51,11 @@ def test_valid_zipfile_file_access(self): myzip = self.do_create_zip() self.sign_in() - temp_file, response = self.upload_temp_file(open(myzip, 'rb').read(), preset='html5_zip', ext='zip') + temp_file, response = self.upload_temp_file( + open(myzip, "rb").read(), preset="html5_zip", ext="zip" + ) assert response.status_code == 200 - url = '{}{}/index.html'.format(self.zipfile_url, temp_file['name']) + url = "{}{}/index.html".format(self.zipfile_url, temp_file["name"]) response = self.get(url) assert response.status_code == 200 @@ -57,9 +63,11 @@ def test_valid_zipfile_missing_file(self): myzip = self.do_create_zip() self.sign_in() - temp_file, response = self.upload_temp_file(open(myzip, 'rb').read(), preset='html5_zip', ext='zip') + temp_file, response = self.upload_temp_file( + open(myzip, "rb").read(), preset="html5_zip", ext="zip" + ) assert response.status_code == 200 - url = '{}{}/iamjustanillusion.txt'.format(self.zipfile_url, temp_file['name']) + url = "{}{}/iamjustanillusion.txt".format(self.zipfile_url, temp_file["name"]) response = self.get(url) assert response.status_code == 404 @@ -67,8 +75,10 @@ def test_valid_zipfile_access_outside_zip_fails(self): myzip = self.do_create_zip() self.sign_in() - temp_file, response = self.upload_temp_file(open(myzip, 'rb').read(), preset='html5_zip', ext='zip') + temp_file, response = self.upload_temp_file( + open(myzip, "rb").read(), preset="html5_zip", ext="zip" + ) assert response.status_code == 200 - url = '{}{}/../outsidejson.js'.format(self.zipfile_url, temp_file['name']) + url = "{}{}/../outsidejson.js".format(self.zipfile_url, temp_file["name"]) response = self.get(url) assert response.status_code == 404 diff --git a/contentcuration/contentcuration/tests/testdata.py b/contentcuration/contentcuration/tests/testdata.py index ed05189f0d..9c9fdc6068 100644 --- a/contentcuration/contentcuration/tests/testdata.py +++ b/contentcuration/contentcuration/tests/testdata.py @@ -19,63 +19,67 @@ pytestmark = pytest.mark.django_db -thumbnail_bytes = b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x06\x00\x00\x00\x1f\x15\xc4\x89\x00\x00\x00\nIDATx\x9cc\x00\x01\x00\x00\x05\x00\x01\r\n-\xb4\x00\x00\x00\x00IEND\xaeB`\x82' # noqa E501 +thumbnail_bytes = b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x06\x00\x00\x00\x1f\x15\xc4\x89\x00\x00\x00\nIDATx\x9cc\x00\x01\x00\x00\x05\x00\x01\r\n-\xb4\x00\x00\x00\x00IEND\xaeB`\x82" # noqa E501 def video(): """ Create a video content kind entry. """ - return mixer.blend(cc.ContentKind, kind='video') + return mixer.blend(cc.ContentKind, kind="video") def preset_video(): """ Create a video format preset. """ - return mixer.blend(cc.FormatPreset, id='high_res_video', kind=video()) + return mixer.blend(cc.FormatPreset, id="high_res_video", kind=video()) def topic(): """ Create a topic content kind. """ - return mixer.blend(cc.ContentKind, kind='topic') + return mixer.blend(cc.ContentKind, kind="topic") def exercise(): """ Create a exercise content kind. """ - return mixer.blend(cc.ContentKind, kind='exercise') + return mixer.blend(cc.ContentKind, kind="exercise") def slideshow(): """ Returns a slideshow content kind object. """ - return mixer.blend(cc.ContentKind, kind='slideshow') + return mixer.blend(cc.ContentKind, kind="slideshow") def fileformat_perseus(): """ Create a perseus FileFormat entry. """ - return mixer.blend(cc.FileFormat, extension='perseus', mimetype='application/exercise') + return mixer.blend( + cc.FileFormat, extension="perseus", mimetype="application/exercise" + ) def fileformat_mp4(): """ Create an mp4 FileFormat entry. """ - return mixer.blend(cc.FileFormat, extension='mp4', mimetype='application/video') + return mixer.blend(cc.FileFormat, extension="mp4", mimetype="application/video") def license_wtfpl(): """ Create a license object called WTF License. """ - return cc.License.objects.first() or mixer.blend(cc.License, license_name="WTF License") + return cc.License.objects.first() or mixer.blend( + cc.License, license_name="WTF License" + ) def fileobj_video(contents=None): @@ -89,10 +93,12 @@ def fileobj_video(contents=None): logging.warning("input = {}".format(contents)) filecontents = contents else: - filecontents = "".join(random.sample(string.printable, 20)).encode('utf-8') + filecontents = "".join(random.sample(string.printable, 20)).encode("utf-8") logging.warning("contents = {}".format(filecontents)) - temp_file_dict = create_studio_file(filecontents, preset=format_presets.VIDEO_HIGH_RES, ext='mp4') - return temp_file_dict['db_file'] + temp_file_dict = create_studio_file( + filecontents, preset=format_presets.VIDEO_HIGH_RES, ext="mp4" + ) + return temp_file_dict["db_file"] def node_json(data): @@ -102,11 +108,11 @@ def node_json(data): "content_id": "aa480b60a7f4526f886e7df9f4e9b8cc", "description": "Recipes for various dishes.", "author": "Bradley Smoker", - "kind": data['kind'], - "license": data['license'], + "kind": data["kind"], + "license": data["license"], "extra_fields": {}, "files": [], - "questions": [] + "questions": [], } return node_data @@ -115,36 +121,36 @@ def node_json(data): def node(data, parent=None): # noqa: C901 new_node = None # Create topics - if 'node_id' not in data: - data['node_id'] = uuid.uuid4() - if data['kind_id'] == "topic": + if "node_id" not in data: + data["node_id"] = uuid.uuid4() + if data["kind_id"] == "topic": new_node = cc.ContentNode( kind=topic(), parent=parent, - title=data['title'], - node_id=data['node_id'], - content_id=data.get('content_id') or data['node_id'], - sort_order=data.get('sort_order', 1), + title=data["title"], + node_id=data["node_id"], + content_id=data.get("content_id") or data["node_id"], + sort_order=data.get("sort_order", 1), complete=True, ) new_node.save() - if 'children' in data: - for child in data['children']: + if "children" in data: + for child in data["children"]: node(child, parent=new_node) # Create videos - elif data['kind_id'] == "video": + elif data["kind_id"] == "video": new_node = cc.ContentNode( kind=video(), parent=parent, - title=data['title'], - node_id=data['node_id'], + title=data["title"], + node_id=data["node_id"], license=license_wtfpl(), - content_id=data.get('content_id') or data['node_id'], - sort_order=data.get('sort_order', 1), + content_id=data.get("content_id") or data["node_id"], + sort_order=data.get("sort_order", 1), complete=True, - extra_fields=data.get('extra_fields'), + extra_fields=data.get("extra_fields"), ) new_node.save() video_file = fileobj_video(contents=b"Video File") @@ -154,43 +160,43 @@ def node(data, parent=None): # noqa: C901 video_file.save() # Create exercises - elif data['kind_id'] == "exercise": + elif data["kind_id"] == "exercise": if "extra_fields" in data: extra_fields = data["extra_fields"] else: extra_fields = { - 'mastery_model': data['mastery_model'], - 'randomize': True, - 'm': data.get('m') or 0, - 'n': data.get('n') or 0 + "mastery_model": data["mastery_model"], + "randomize": True, + "m": data.get("m") or 0, + "n": data.get("n") or 0, } new_node = cc.ContentNode( kind=exercise(), parent=parent, - title=data['title'], - node_id=data['node_id'], + title=data["title"], + node_id=data["node_id"], license=license_wtfpl(), extra_fields=extra_fields, - content_id=data.get('content_id') or data['node_id'], - sort_order=data.get('sort_order', 1), + content_id=data.get("content_id") or data["node_id"], + sort_order=data.get("sort_order", 1), complete=True, ) new_node.save() - for assessment_item in data.get('assessment_items', []): + for assessment_item in data.get("assessment_items", []): ai = cc.AssessmentItem( contentnode=new_node, - assessment_id=assessment_item['assessment_id'], - question=assessment_item['question'], - type=assessment_item['type'], - answers=json.dumps(assessment_item['answers']), - hints=json.dumps(assessment_item.get('hints') or []) + assessment_id=assessment_item["assessment_id"], + question=assessment_item["question"], + type=assessment_item["type"], + answers=json.dumps(assessment_item["answers"]), + hints=json.dumps(assessment_item.get("hints") or []), ) ai.save() - if data.get('tags'): - for tag in data['tags']: - t = cc.ContentTag(tag_name=tag['tag_name']) + if data.get("tags"): + for tag in data["tags"]: + t = cc.ContentTag(tag_name=tag["tag_name"]) t.save() new_node.tags.add(t) new_node.save() @@ -224,13 +230,15 @@ def random_string(chars=10): :param chars: Number of characters in string :return: A string with [chars] random characters. """ - return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(chars)) + return "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(chars) + ) -def user(email='user@test.com', feature_flags=None): +def user(email="user@test.com", feature_flags=None): user, is_new = cc.User.objects.get_or_create(email=email) if is_new: - user.set_password('password') + user.set_password("password") user.is_active = True user.save() if feature_flags is not None: @@ -239,14 +247,19 @@ def user(email='user@test.com', feature_flags=None): return user -def create_temp_file(filebytes, preset='document', ext='pdf', original_filename=None): +def create_temp_file(filebytes, preset="document", ext="pdf", original_filename=None): """Old name for create_studio_file.""" import warnings - warnings.warn('Deprecated function; use create_studio_file instead.', DeprecationWarning) - return create_studio_file(filebytes, preset='document', ext='pdf', original_filename=None) + warnings.warn( + "Deprecated function; use create_studio_file instead.", DeprecationWarning + ) + return create_studio_file( + filebytes, preset="document", ext="pdf", original_filename=None + ) -def create_studio_file(filebytes, preset='document', ext='pdf', original_filename=None): + +def create_studio_file(filebytes, preset="document", ext="pdf", original_filename=None): """ Create a file with contents of `filebytes` and the associated cc.File object for it. :param filebytes: The data to be stored in the file (as bytes) @@ -260,7 +273,7 @@ def create_studio_file(filebytes, preset='document', ext='pdf', original_filenam - db_file (cc.File): a Studio File object saved in DB """ try: - filebytes = filebytes.encode('utf-8') + filebytes = filebytes.encode("utf-8") except: # noqa pass @@ -283,25 +296,27 @@ def create_studio_file(filebytes, preset='document', ext='pdf', original_filenam preset = cc.FormatPreset.objects.get(id=preset) file_format = cc.FileFormat.objects.get(extension=ext) if original_filename is None: - original_filename = 'somefile.' + ext + original_filename = "somefile." + ext # 3. Create a File object - db_file_obj = mixer.blend(cc.File, - checksum=checksum, - file_format=file_format, - preset=preset, - original_filename=original_filename, - file_on_disk=storage_file_path) + db_file_obj = mixer.blend( + cc.File, + checksum=checksum, + file_format=file_format, + preset=preset, + original_filename=original_filename, + file_on_disk=storage_file_path, + ) return { - 'name': os.path.basename(storage_file_path), - 'data': filebytes, - 'file': fileobj, - 'db_file': db_file_obj + "name": os.path.basename(storage_file_path), + "data": filebytes, + "file": fileobj, + "db_file": db_file_obj, } -def create_test_file(filebytes, ext='pdf'): +def create_test_file(filebytes, ext="pdf"): """ Create a temporary file with contents of `filebytes` for use in tests. :param filebytes: The data to be stored in the file (as bytes) @@ -321,11 +336,11 @@ def create_test_file(filebytes, ext='pdf'): fileobj.write(filebytes) fileobj.seek(0) return { - 'checksum': checksum, - 'name': os.path.basename(storage_file_path), - 'storagepath': storage_file_path, - 'data': filebytes, - 'file': fileobj + "checksum": checksum, + "name": os.path.basename(storage_file_path), + "storagepath": storage_file_path, + "data": filebytes, + "file": fileobj, } @@ -341,36 +356,31 @@ def create_test_file(filebytes, ext='pdf'): "description": "Practice counting up to 10 objects.", "author": "Khan Academy", "extra_fields": {}, - "exercise_data": { - "m": 5, - "n": 7, - "mastery_model": "m_of_n" - }, + "exercise_data": {"m": 5, "n": 7, "mastery_model": "m_of_n"}, "license": "CC-BY", "files": [], "questions": [ { - 'type': 'single_selection', - 'question': 'What is your quest?', - 'hints': ['Holy', 'Coconuts'], - 'answers': [ - 'To seek the grail', - 'To eat some hail', - 'To spectacularly fail', - 'To post bail' + "type": "single_selection", + "question": "What is your quest?", + "hints": ["Holy", "Coconuts"], + "answers": [ + "To seek the grail", + "To eat some hail", + "To spectacularly fail", + "To post bail", ], - 'files': [ + "files": [ { - 'filename': 'nonexistant.mp4', - 'size': 0, + "filename": "nonexistant.mp4", + "size": 0, } ], - 'source_url': '', - 'raw_data': '', - 'assessment_id': '1' + "source_url": "", + "raw_data": "", + "assessment_id": "1", } - ] - + ], } ] @@ -380,8 +390,10 @@ def fileobj_exercise_image(): Create a generic exercise image file in storage and return a File model pointing to it. """ filecontents = "".join(random.sample(string.printable, 20)) - temp_file_dict = create_studio_file(filecontents, preset=format_presets.EXERCISE_IMAGE, ext='jpg') - return temp_file_dict['db_file'] + temp_file_dict = create_studio_file( + filecontents, preset=format_presets.EXERCISE_IMAGE, ext="jpg" + ) + return temp_file_dict["db_file"] def fileobj_exercise_graphie(): @@ -389,42 +401,51 @@ def fileobj_exercise_graphie(): Create an graphi exercise image file in storage and return a File model pointing to it. """ filecontents = "".join(random.sample(string.printable, 20)) - temp_file_dict = create_studio_file(filecontents, preset=format_presets.EXERCISE_GRAPHIE, ext='graphie', original_filename='theoriginalfilename') - return temp_file_dict['db_file'] + temp_file_dict = create_studio_file( + filecontents, + preset=format_presets.EXERCISE_GRAPHIE, + ext="graphie", + original_filename="theoriginalfilename", + ) + return temp_file_dict["db_file"] def base64encoding(): - return "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/" \ - "9hAAACk0lEQVR4AaWTA7TbbABA8/+zreMdzbYOZtu2bbt4rpPUtvlebbezbdvMvsxmG99740" \ - "CDF6z4p/G3RYkFLQPGmvj8mx30m7uo1LhNO6ou50r++zrkMoj/cRWUJmIz0gvEDXIVvP/Hbd" \ - "xRte+chaXe7gDDsP9WwqLJixicgqWwsNrncZFJ2UnmM+Xy1awlqDz/LVsKC6oDtxA0k/B1aD" \ - "Oi6rMBVVi2ys1Td+qd5NU8ZV0cWEKeWsZ4IKbdn3ikOJTogm9bw1PWw50twAWNFbS9oK1UlX" \ - "Y337KA6sxwiBb/NIJYM3KrRNOSppD1YNtM9wwHUs+S188M38hXtCKKNSOAM4PmzKCgWQhaNU" \ - "SiGCIE1DKGYozyJc5EW47ZZ2Ka3U0oNieTbLNjruOHsCO3LvNgq6cZznAHuAICah5DohjDUEG" \ - "+OciQRsbQlFGKUOvrw9d6uSiiKcu3h9S86F7Me/oMtv/yFVsofaQCYHyhxtcLuFSGNDwatCGI" \ - "SrZE6EzXIJYkoqILPR0k2oCMo/b1EOpcQqEnjkXPnseOX71uEuqDvQCTAqfjW5fhGkQlWyMQf" \ - "acZYRHs61jc4HKOJAGXBE+1F1vjdRiwegEstrywB9OYK5zdITZH6xUHTnUADgLcpaBZD1omxCY" \ - "5m6K7HRaEUDxDZjoyWOs9Xwu/43lbWTUKSfwwzNGfROX2hvg2wGrLjEcGIwTHTHR3sQW0jSEcIN" \ - "tsnembjYu2z0fKfngHaEXm2jzYmXaUHL7k3H+z6YftOxagZXEXNJ2+eJV3zGF/8RZyWZ6RakH8ad" \ - "Z9AksmLmz6nO2cy/3vl9+CnJdYZJRmn+x1HsOOh07BkcTF0p/z39hBuoJNuW9U2nF01rngydo/+xr" \ + return ( + "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/" + "9hAAACk0lEQVR4AaWTA7TbbABA8/+zreMdzbYOZtu2bbt4rpPUtvlebbezbdvMvsxmG99740" + "CDF6z4p/G3RYkFLQPGmvj8mx30m7uo1LhNO6ou50r++zrkMoj/cRWUJmIz0gvEDXIVvP/Hbd" + "xRte+chaXe7gDDsP9WwqLJixicgqWwsNrncZFJ2UnmM+Xy1awlqDz/LVsKC6oDtxA0k/B1aD" + "Oi6rMBVVi2ys1Td+qd5NU8ZV0cWEKeWsZ4IKbdn3ikOJTogm9bw1PWw50twAWNFbS9oK1UlX" + "Y337KA6sxwiBb/NIJYM3KrRNOSppD1YNtM9wwHUs+S188M38hXtCKKNSOAM4PmzKCgWQhaNU" + "SiGCIE1DKGYozyJc5EW47ZZ2Ka3U0oNieTbLNjruOHsCO3LvNgq6cZznAHuAICah5DohjDUEG" + "+OciQRsbQlFGKUOvrw9d6uSiiKcu3h9S86F7Me/oMtv/yFVsofaQCYHyhxtcLuFSGNDwatCGI" + "SrZE6EzXIJYkoqILPR0k2oCMo/b1EOpcQqEnjkXPnseOX71uEuqDvQCTAqfjW5fhGkQlWyMQf" + "acZYRHs61jc4HKOJAGXBE+1F1vjdRiwegEstrywB9OYK5zdITZH6xUHTnUADgLcpaBZD1omxCY" + "5m6K7HRaEUDxDZjoyWOs9Xwu/43lbWTUKSfwwzNGfROX2hvg2wGrLjEcGIwTHTHR3sQW0jSEcIN" + "tsnembjYu2z0fKfngHaEXm2jzYmXaUHL7k3H+z6YftOxagZXEXNJ2+eJV3zGF/8RZyWZ6RakH8ad" + "Z9AksmLmz6nO2cy/3vl9+CnJdYZJRmn+x1HsOOh07BkcTF0p/z39hBuoJNuW9U2nF01rngydo/+xr" "/aXwDY2vpQfdHLrIAAAAASUVORK5CYII=" + ) def generated_base64encoding(): - return "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAA"\ - "C8klEQVR4nKWTSWhVZxiGv/N//3+Ge+49d8gdkphYOyxMGm+p1QQSm40KIgqKoKUuKu0idFMIWRWKC7"\ - "G4sqEDxZUEQciwMsaEihsV0ThAojYmahK8NjXJzXCH3DP955zfRUkWIljwW368z7t6H+nA953wPkf/b"\ - "/DY/q0MACIAUO4bnuTrfwIAwH0X9UTM+OSL7dKb4KFPU9Kh9g8ahBDtAKC8WqO+Ho8ZrucgAIAkhJC6"\ - "zl047vju54js1MzD8eI6vHtfS0va0I44+bmX3DMvXL45V/wom435vndSQfnB04djF6WfzvXt9aXgBxb"\ - "RB6iqNpZWV36ZvD+62PH1gSqf0SEvpGY5wp6Lf/TebtjRkonEE53ctie8cuUoCtJNiAMdOgsPVyU3fUm"\ - "Z/CTOcNf21tbs7D/zjYvLhUaUCP04lc5kdzZ/FmfYSpk8lUpuatNZeJg40EE0IddIHJaE6WC9oj1Kx5Lf"\ - "ZKJxHhipr1aAGWElJEdQOVifTnupWPJEvaKNB6YjS1zkNaHUEtlDP6ongNhQ8ktmFboiT/9dnTYkLZWK"\ - "1wLSEHBHqm6qrp1BVyz7RTNObChF9YSQPSII9SQURdOkXNSU14ICA9RIItlCLNtEywaVIKgEvelcvpUB"\ - "yuVKUKZcVIuCZVGPEEpc8QgLvAkU/7aqhL9Np5PdC6X8i9LL3ChW7OMFRmmFkDFC6eNUNPOrbS19xx3n"\ - "Fhb5NvCDMaIw9TcU0i6yYBZDhnGl7LHZ/it9eevVUq81lx99MZWbnsnN9/SPDCys+Ww2FDGGyEJlDQVpU5"\ - "j6OxnMjUwIHvzMLTv0bOT61Z6B7mUAACVeh9FYnbpl81btw6ZmDQCgZ6B76flfN65yy9EE908P5kYmKQDA0"\ - "OK1Ozu9htH7dEqsjyik6O0RVW/KIFM8yzoMABMAAPdg0m1exD/v4t9iY8oAAPfokw34v4JwjcxkQYIAYq5b9"\ + return ( + "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAA" + "C8klEQVR4nKWTSWhVZxiGv/N//3+Ge+49d8gdkphYOyxMGm+p1QQSm40KIgqKoKUuKu0idFMIWRWKC7" + "G4sqEDxZUEQciwMsaEihsV0ThAojYmahK8NjXJzXCH3DP955zfRUkWIljwW368z7t6H+nA953wPkf/b" + "/DY/q0MACIAUO4bnuTrfwIAwH0X9UTM+OSL7dKb4KFPU9Kh9g8ahBDtAKC8WqO+Ho8ZrucgAIAkhJC6" + "zl047vju54js1MzD8eI6vHtfS0va0I44+bmX3DMvXL45V/wom435vndSQfnB04djF6WfzvXt9aXgBxb" + "RB6iqNpZWV36ZvD+62PH1gSqf0SEvpGY5wp6Lf/TebtjRkonEE53ctie8cuUoCtJNiAMdOgsPVyU3fUm" + "Z/CTOcNf21tbs7D/zjYvLhUaUCP04lc5kdzZ/FmfYSpk8lUpuatNZeJg40EE0IddIHJaE6WC9oj1Kx5Lf" + "ZKJxHhipr1aAGWElJEdQOVifTnupWPJEvaKNB6YjS1zkNaHUEtlDP6ongNhQ8ktmFboiT/9dnTYkLZWK" + "1wLSEHBHqm6qrp1BVyz7RTNObChF9YSQPSII9SQURdOkXNSU14ICA9RIItlCLNtEywaVIKgEvelcvpUB" + "yuVKUKZcVIuCZVGPEEpc8QgLvAkU/7aqhL9Np5PdC6X8i9LL3ChW7OMFRmmFkDFC6eNUNPOrbS19xx3n" + "Fhb5NvCDMaIw9TcU0i6yYBZDhnGl7LHZ/it9eevVUq81lx99MZWbnsnN9/SPDCys+Ww2FDGGyEJlDQVpU5" + "j6OxnMjUwIHvzMLTv0bOT61Z6B7mUAACVeh9FYnbpl81btw6ZmDQCgZ6B76flfN65yy9EE908P5kYmKQDA0" + "OK1Ozu9htH7dEqsjyik6O0RVW/KIFM8yzoMABMAAPdg0m1exD/v4t9iY8oAAPfokw34v4JwjcxkQYIAYq5b9" "+OJrg1v1uF3yITnGcV5zxcxRYhLZ3rOem9LSe+r82vB1kP1vFwEDQAAAABJRU5ErkJggg==" + ) def srt_subtitle(): diff --git a/contentcuration/contentcuration/tests/utils/__init__.py b/contentcuration/contentcuration/tests/utils/__init__.py index c9d7c65893..b89c103587 100644 --- a/contentcuration/contentcuration/tests/utils/__init__.py +++ b/contentcuration/contentcuration/tests/utils/__init__.py @@ -1,8 +1,10 @@ #!/usr/bin/env python import sys -from .migration_test_case import * # noqa + import pytest from mixer.backend.django import mixer + +from .migration_test_case import * # noqa from contentcuration.models import ContentNode # Mark the test class or function as a slow test, where we avoid running it @@ -10,10 +12,7 @@ # Use py.test --includeslowtests to run these kinds of tests. slowtest = pytest.mark.skipif( "--includeslowtests" not in sys.argv, - reason="Skipping because this test is a slow test." + reason="Skipping because this test is a slow test.", ) -mixer.register( - ContentNode, - extra_fields=lambda: {'a': 1, 'b': 2, 'c': {'d': 3}} -) +mixer.register(ContentNode, extra_fields=lambda: {"a": 1, "b": 2, "c": {"d": 3}}) diff --git a/contentcuration/contentcuration/tests/utils/celery/test_tasks.py b/contentcuration/contentcuration/tests/utils/celery/test_tasks.py index 0b203c41a0..dbe6928cb6 100644 --- a/contentcuration/contentcuration/tests/utils/celery/test_tasks.py +++ b/contentcuration/contentcuration/tests/utils/celery/test_tasks.py @@ -16,7 +16,9 @@ def test_set_total(self): self.assertEqual(200, self.tracker.total) def test_increment(self): - with mock.patch("contentcuration.utils.celery.tasks.ProgressTracker.track") as track: + with mock.patch( + "contentcuration.utils.celery.tasks.ProgressTracker.track" + ) as track: self.tracker.increment() track.assert_called_with(1.0) self.tracker.progress = 1 diff --git a/contentcuration/contentcuration/tests/utils/migration_test_case.py b/contentcuration/contentcuration/tests/utils/migration_test_case.py index c330fdada7..92fa984916 100644 --- a/contentcuration/contentcuration/tests/utils/migration_test_case.py +++ b/contentcuration/contentcuration/tests/utils/migration_test_case.py @@ -1,5 +1,5 @@ -from django.db import connection from django.core import management +from django.db import connection from django.db.migrations.executor import MigrationExecutor from django.test import TransactionTestCase @@ -12,8 +12,11 @@ class MigrationTestCase(TransactionTestCase): app = None def setUp(self): - assert self.migrate_from and self.migrate_to, \ - "TestCase '{}' must define migrate_from and migrate_to properties".format(type(self).__name__) + assert ( + self.migrate_from and self.migrate_to + ), "TestCase '{}' must define migrate_from and migrate_to properties".format( + type(self).__name__ + ) migrate_from = [(self.app, self.migrate_from)] migrate_to = [(self.app, self.migrate_to)] diff --git a/contentcuration/contentcuration/tests/utils/test_cache.py b/contentcuration/contentcuration/tests/utils/test_cache.py index d16570648a..6eab570d0f 100644 --- a/contentcuration/contentcuration/tests/utils/test_cache.py +++ b/contentcuration/contentcuration/tests/utils/test_cache.py @@ -31,7 +31,9 @@ def test_size_key(self): self.assertEqual("abcdefghijklmnopqrstuvwxyz:value", self.helper.size_key) def test_modified_key(self): - self.assertEqual("abcdefghijklmnopqrstuvwxyz:modified", self.helper.modified_key) + self.assertEqual( + "abcdefghijklmnopqrstuvwxyz:modified", self.helper.modified_key + ) def test_cache_get(self): self.redis_client.hget.return_value = 123 @@ -42,11 +44,15 @@ def test_cache_get__not_redis(self): self.cache.client = mock.Mock() self.cache.get.return_value = 123 self.assertEqual(123, self.helper.cache_get("test_key")) - self.cache.get.assert_called_once_with("{}:{}".format(self.helper.hash_key, "test_key")) + self.cache.get.assert_called_once_with( + "{}:{}".format(self.helper.hash_key, "test_key") + ) def test_cache_set(self): self.helper.cache_set("test_key", 123) - self.redis_client.hset.assert_called_once_with(self.helper.hash_key, "test_key", 123) + self.redis_client.hset.assert_called_once_with( + self.helper.hash_key, "test_key", 123 + ) def test_cache_set__delete(self): self.helper.cache_set("test_key", None) @@ -55,28 +61,32 @@ def test_cache_set__delete(self): def test_cache_set__not_redis(self): self.cache.client = mock.Mock() self.helper.cache_set("test_key", 123) - self.cache.set.assert_called_once_with("{}:{}".format(self.helper.hash_key, "test_key"), 123) + self.cache.set.assert_called_once_with( + "{}:{}".format(self.helper.hash_key, "test_key"), 123 + ) def test_get_size(self): - with mock.patch.object(self.helper, 'cache_get') as cache_get: + with mock.patch.object(self.helper, "cache_get") as cache_get: cache_get.return_value = 123 self.assertEqual(123, self.helper.get_size()) cache_get.assert_called_once_with(self.helper.size_key) def test_set_size(self): - with mock.patch.object(self.helper, 'cache_set') as cache_set: + with mock.patch.object(self.helper, "cache_set") as cache_set: self.helper.set_size(123) cache_set.assert_called_once_with(self.helper.size_key, 123) def test_get_modified(self): - with mock.patch.object(self.helper, 'cache_get') as cache_get: - cache_get.return_value = '2021-01-01 00:00:00' + with mock.patch.object(self.helper, "cache_get") as cache_get: + cache_get.return_value = "2021-01-01 00:00:00" modified = self.helper.get_modified() self.assertIsNotNone(modified) - self.assertEqual('2021-01-01T00:00:00', modified.isoformat()) + self.assertEqual("2021-01-01T00:00:00", modified.isoformat()) cache_get.assert_called_once_with(self.helper.modified_key) def test_set_modified(self): - with mock.patch.object(self.helper, 'cache_set') as cache_set: - self.helper.set_modified('2021-01-01 00:00:00') - cache_set.assert_called_once_with(self.helper.modified_key, '2021-01-01 00:00:00') + with mock.patch.object(self.helper, "cache_set") as cache_set: + self.helper.set_modified("2021-01-01 00:00:00") + cache_set.assert_called_once_with( + self.helper.modified_key, "2021-01-01 00:00:00" + ) diff --git a/contentcuration/contentcuration/tests/utils/test_garbage_collect.py b/contentcuration/contentcuration/tests/utils/test_garbage_collect.py index f67daf8c28..b12c25cd31 100644 --- a/contentcuration/contentcuration/tests/utils/test_garbage_collect.py +++ b/contentcuration/contentcuration/tests/utils/test_garbage_collect.py @@ -170,7 +170,11 @@ def _create_deleted_user_in_past(deletion_datetime, email="test@test.com"): user = create_user(email, "password", "test", "test") user.delete() - user_latest_delete_history = UserHistory.objects.filter(user_id=user.id, action=user_history.DELETION).order_by("-performed_at").first() + user_latest_delete_history = ( + UserHistory.objects.filter(user_id=user.id, action=user_history.DELETION) + .order_by("-performed_at") + .first() + ) user_latest_delete_history.performed_at = deletion_datetime user_latest_delete_history.save() return user @@ -180,28 +184,46 @@ class CleanUpSoftDeletedExpiredUsersTestCase(StudioTestCase): def test_cleanup__all_expired_soft_deleted_users(self): expired_users = [] for i in range(0, 5): - expired_users.append(_create_deleted_user_in_past(deletion_datetime=THREE_MONTHS_AGO, email=f"test-{i}@test.com")) + expired_users.append( + _create_deleted_user_in_past( + deletion_datetime=THREE_MONTHS_AGO, email=f"test-{i}@test.com" + ) + ) clean_up_soft_deleted_users() for user in expired_users: - assert UserHistory.objects.filter(user_id=user.id, action=user_history.RELATED_DATA_HARD_DELETION).exists() is True + assert ( + UserHistory.objects.filter( + user_id=user.id, action=user_history.RELATED_DATA_HARD_DELETION + ).exists() + is True + ) def test_no_cleanup__unexpired_soft_deleted_users(self): two_months_ago = datetime.now() - timedelta(days=63) user = _create_deleted_user_in_past(deletion_datetime=two_months_ago) clean_up_soft_deleted_users() - assert UserHistory.objects.filter(user_id=user.id, action=user_history.RELATED_DATA_HARD_DELETION).exists() is False + assert ( + UserHistory.objects.filter( + user_id=user.id, action=user_history.RELATED_DATA_HARD_DELETION + ).exists() + is False + ) def test_no_cleanup__undeleted_users(self): user = create_user("test@test.com", "password", "test", "test") clean_up_soft_deleted_users() assert user.deleted is False - assert UserHistory.objects.filter(user_id=user.id, action=user_history.RELATED_DATA_HARD_DELETION).exists() is False + assert ( + UserHistory.objects.filter( + user_id=user.id, action=user_history.RELATED_DATA_HARD_DELETION + ).exists() + is False + ) class CleanUpContentNodesTestCase(StudioTestCase): - def test_delete_all_contentnodes_in_orphanage_tree(self): """ Make sure that by default, all nodes created with a timestamp of 3 months @@ -214,11 +236,17 @@ def test_delete_all_contentnodes_in_orphanage_tree(self): _create_expired_contentnode() # sanity check to see if we have X contentnodes under the garbage tree - assert ContentNode.objects.filter(parent_id=settings.ORPHANAGE_ROOT_ID).count() == num_contentnodes + assert ( + ContentNode.objects.filter(parent_id=settings.ORPHANAGE_ROOT_ID).count() + == num_contentnodes + ) # now clean up our contentnodes, and check that our descendant count is indeed 0 now clean_up_contentnodes() - assert ContentNode.objects.filter(parent_id=settings.ORPHANAGE_ROOT_ID).count() == 0 + assert ( + ContentNode.objects.filter(parent_id=settings.ORPHANAGE_ROOT_ID).count() + == 0 + ) def test_deletes_associated_files(self): @@ -366,15 +394,12 @@ def test_doesnt_delete_file_referenced_by_orphan_and_nonorphan_nodes(self): class CleanUpFeatureFlagsTestCase(StudioTestCase): - def setUp(self): return super(CleanUpFeatureFlagsTestCase, self).setUpBase() def test_clean_up(self): key = "feature_flag_does_not_exist" - self.user.feature_flags = { - key: True - } + self.user.feature_flags = {key: True} self.user.save() clean_up_feature_flags() self.user.refresh_from_db() @@ -382,15 +407,22 @@ def test_clean_up(self): class CleanupTaskTestCase(StudioTestCase): - def setUp(self): - self.pruned_task = TaskResult.objects.create(task_id=uuid.uuid4().hex, status=states.SUCCESS, task_name="pruned_task") - self.failed_task = TaskResult.objects.create(task_id=uuid.uuid4().hex, status=states.FAILURE, task_name="failed_task") - self.recent_task = TaskResult.objects.create(task_id=uuid.uuid4().hex, status=states.SUCCESS, task_name="recent_task") + self.pruned_task = TaskResult.objects.create( + task_id=uuid.uuid4().hex, status=states.SUCCESS, task_name="pruned_task" + ) + self.failed_task = TaskResult.objects.create( + task_id=uuid.uuid4().hex, status=states.FAILURE, task_name="failed_task" + ) + self.recent_task = TaskResult.objects.create( + task_id=uuid.uuid4().hex, status=states.SUCCESS, task_name="recent_task" + ) # `date_done` uses `auto_now`, so manually set it done = datetime.now() - timedelta(days=8) - TaskResult.objects.filter(pk__in=[self.pruned_task.pk, self.failed_task.pk]).update(date_done=done) + TaskResult.objects.filter( + pk__in=[self.pruned_task.pk, self.failed_task.pk] + ).update(date_done=done) # run clean_up_tasks() @@ -413,7 +445,7 @@ def test_recent_task(self): def _create_stale_file(user, modified_date): - checksum = '%32x' % random.getrandbits(16 * 8) + checksum = "%32x" % random.getrandbits(16 * 8) file = File( file_size=5, checksum=checksum, @@ -433,7 +465,6 @@ def _create_stale_file(user, modified_date): class CleanupStaleFilesTestCase(StudioTestCase): - def setUp(self): user = self.admin_user diff --git a/contentcuration/contentcuration/tests/utils/test_nodes.py b/contentcuration/contentcuration/tests/utils/test_nodes.py index be43d295dd..75451d9144 100644 --- a/contentcuration/contentcuration/tests/utils/test_nodes.py +++ b/contentcuration/contentcuration/tests/utils/test_nodes.py @@ -27,13 +27,15 @@ def test_get_size(self): def test_get_size__root_node_simplification(self): self.assertEqual(10, self.helper.get_size()) - with mock.patch.object(self.root, 'is_root_node') as is_root_node: + with mock.patch.object(self.root, "is_root_node") as is_root_node: is_root_node.return_value = False self.assertEqual(10, self.helper.get_size()) @pytest.mark.skip def test_modified_since(self): - max_modified = self.helper.queryset.aggregate(max_modified=Max(F('modified')))['max_modified'] + max_modified = self.helper.queryset.aggregate(max_modified=Max(F("modified")))[ + "max_modified" + ] before_max = max_modified - datetime.timedelta(seconds=1) after_max = max_modified + datetime.timedelta(seconds=1) self.assertTrue(self.helper.modified_since(before_max.isoformat())) @@ -49,7 +51,7 @@ def setUp(self): def assertCalculation(self, cache, helper, force=False): helper().get_size.return_value = 456 - now_val = isoparse('2021-01-01T00:00:00') + now_val = isoparse("2021-01-01T00:00:00") with mock.patch("contentcuration.utils.nodes.timezone.now") as now: now.return_value = now_val size, stale = calculate_resource_size(self.node, force=force) @@ -60,7 +62,7 @@ def assertCalculation(self, cache, helper, force=False): def test_cached(self, cache, helper): cache().get_size.return_value = 123 - cache().get_modified.return_value = '2021-01-01 00:00:00' + cache().get_modified.return_value = "2021-01-01 00:00:00" helper().modified_since.return_value = False size, stale = calculate_resource_size(self.node) self.assertEqual(123, size) @@ -69,7 +71,7 @@ def test_cached(self, cache, helper): def test_stale__too_big__no_force(self, cache, helper): self.node.get_descendant_count.return_value = STALE_MAX_CALCULATION_SIZE + 1 cache().get_size.return_value = 123 - cache().get_modified.return_value = '2021-01-01 00:00:00' + cache().get_modified.return_value = "2021-01-01 00:00:00" helper().modified_since.return_value = True size, stale = calculate_resource_size(self.node) self.assertEqual(123, size) @@ -109,10 +111,15 @@ def db_get_size(): helper().get_size.side_effect = db_get_size - with mock.patch("contentcuration.utils.nodes.report_exception") as report_exception, \ - mock.patch("contentcuration.utils.nodes.SLOW_UNFORCED_CALC_THRESHOLD", 1): + with mock.patch( + "contentcuration.utils.nodes.report_exception" + ) as report_exception, mock.patch( + "contentcuration.utils.nodes.SLOW_UNFORCED_CALC_THRESHOLD", 1 + ): self.assertCalculation(cache, helper) - self.assertIsInstance(report_exception.mock_calls[0][1][0], SlowCalculationError) + self.assertIsInstance( + report_exception.mock_calls[0][1][0], SlowCalculationError + ) class CalculateResourceSizeIntegrationTestCase(StudioTestCase): diff --git a/contentcuration/contentcuration/tests/utils/test_recommendations.py b/contentcuration/contentcuration/tests/utils/test_recommendations.py index 6c791f8741..14e8aa53d0 100644 --- a/contentcuration/contentcuration/tests/utils/test_recommendations.py +++ b/contentcuration/contentcuration/tests/utils/test_recommendations.py @@ -29,89 +29,101 @@ def test_backend_initialization(self): class RecommendationsAdapterTestCase(StudioTestCase): - @classmethod def setUpClass(cls): super(RecommendationsAdapterTestCase, cls).setUpClass() cls.channel_1 = Channel.objects.create( - id='1234567890abcdef1234567890abcdef', - name='Channel 1', - actor_id=cls.admin_user.id + id="1234567890abcdef1234567890abcdef", + name="Channel 1", + actor_id=cls.admin_user.id, ) cls.channel_2 = Channel.objects.create( - id='abcdef1234567890abcdef1234567890', - name='Channel 2', - actor_id=cls.admin_user.id + id="abcdef1234567890abcdef1234567890", + name="Channel 2", + actor_id=cls.admin_user.id, ) @classmethod def setUpTestData(cls): cls.adapter = RecommendationsAdapter(MagicMock()) cls.request_data = { - 'topics': [ + "topics": [ { - 'id': 'topic_id', - 'title': 'topic_title', - 'description': 'topic_description', - 'language': 'en', - 'ancestors': [ + "id": "topic_id", + "title": "topic_title", + "description": "topic_description", + "language": "en", + "ancestors": [ { - 'id': 'ancestor_id', - 'title': 'ancestor_title', - 'description': 'ancestor_description', + "id": "ancestor_id", + "title": "ancestor_title", + "description": "ancestor_description", } - ] + ], } ], - 'metadata': { - 'channel_id': '00000000000000000000000000000010' - } + "metadata": {"channel_id": "00000000000000000000000000000010"}, } - cls.channel_id = 'test_channel_id' + cls.channel_id = "test_channel_id" cls.resources = [MagicMock(spec=ContentNode)] cls.request = EmbedTopicsRequest( - method='POST', - url='http://test.com', - path='/test/path', - params={'override_threshold': False}, - json=cls.request_data + method="POST", + url="http://test.com", + path="/test/path", + params={"override_threshold": False}, + json=cls.request_data, ) - cls.api_response = BackendResponse(data={ - 'topics': [ - {'id': 'abcdef1234567890abcdef1234567890', 'recommendations': [ + cls.api_response = BackendResponse( + data={ + "topics": [ { - 'id': 'abcdef1234567890abcdef1234567890', - 'channel_id': 'abcdef1234567890abcdef1234567890', - 'rank': 8 - } - ]}, - {'id': '1234567890abcdef1234567890abcdef', 'recommendations': [ + "id": "abcdef1234567890abcdef1234567890", + "recommendations": [ + { + "id": "abcdef1234567890abcdef1234567890", + "channel_id": "abcdef1234567890abcdef1234567890", + "rank": 8, + } + ], + }, { - 'id': '1234567890abcdef1234567890abcdef', - 'channel_id': '1234567890abcdef1234567890abcdef', - 'rank': 9 - } - ]} - ] - }) + "id": "1234567890abcdef1234567890abcdef", + "recommendations": [ + { + "id": "1234567890abcdef1234567890abcdef", + "channel_id": "1234567890abcdef1234567890abcdef", + "rank": 9, + } + ], + }, + ] + } + ) PublicContentNode.objects.create( - id='1234567890abcdef1234567890abcdef', - title='Public Content Node 1', + id="1234567890abcdef1234567890abcdef", + title="Public Content Node 1", content_id=uuid.uuid4().hex, - channel_id='ddec09d74e834241a580c480ee37879c', + channel_id="ddec09d74e834241a580c480ee37879c", ) PublicContentNode.objects.create( - id='abcdef1234567890abcdef1234567890', - title='Public Content Node 2', + id="abcdef1234567890abcdef1234567890", + title="Public Content Node 2", content_id=uuid.uuid4().hex, - channel_id='84fcaec1e0514b62899d7f436384c401', + channel_id="84fcaec1e0514b62899d7f436384c401", ) - def assert_backend_call(self, mock_response_exists, response_exists_value, connect_value, - make_request_value, method, *args): + def assert_backend_call( + self, + mock_response_exists, + response_exists_value, + connect_value, + make_request_value, + method, + *args + ): mock_response_exists.return_value = response_exists_value self.adapter.backend.connect.return_value = connect_value self.adapter.backend.make_request.return_value = make_request_value @@ -138,26 +150,50 @@ def test_adapter_initialization(self): self.assertIsNotNone(self.adapter) self.assertIsInstance(self.adapter, RecommendationsAdapter) - @patch('contentcuration.utils.recommendations.RecommendationsAdapter.response_exists') + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.response_exists" + ) def test_generate_embeddings_connect_failure(self, mock_response_exists): mock_response = MagicMock(spec=EmbeddingsResponse) - self.assert_backend_call(mock_response_exists, None, False, mock_response, - self.adapter.generate_embeddings, self.request) + self.assert_backend_call( + mock_response_exists, + None, + False, + mock_response, + self.adapter.generate_embeddings, + self.request, + ) - @patch('contentcuration.utils.recommendations.RecommendationsAdapter.response_exists') + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.response_exists" + ) def test_generate_embeddings(self, mock_response_exists): mock_response = MagicMock(spec=EmbeddingsResponse) mock_response.error = None - response = self.assert_backend_call(mock_response_exists, None, True, mock_response, - self.adapter.generate_embeddings, self.request) + response = self.assert_backend_call( + mock_response_exists, + None, + True, + mock_response, + self.adapter.generate_embeddings, + self.request, + ) self.assertIsInstance(response, EmbeddingsResponse) - @patch('contentcuration.utils.recommendations.RecommendationsAdapter.response_exists') + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.response_exists" + ) def test_generate_embeddings_failure(self, mock_response_exists): mock_response = MagicMock(spec=EmbeddingsResponse) mock_response.error = {} - response = self.assert_backend_call(mock_response_exists, None, True, mock_response, - self.adapter.generate_embeddings, self.request) + response = self.assert_backend_call( + mock_response_exists, + None, + True, + mock_response, + self.adapter.generate_embeddings, + self.request, + ) self.assertIsInstance(response, EmbeddingsResponse) self.assertIsNotNone(response.error) @@ -172,11 +208,17 @@ def test_response_exists(self): def test_response_does_not_exist(self): new_request = EmbedTopicsRequest( - method='POST', - url='http://test.com', - path='/test/path', - params={'override_threshold': True}, - json=[{'id': 'topic_id', 'title': 'topic_title', 'description': 'topic_description'}] + method="POST", + url="http://test.com", + path="/test/path", + params={"override_threshold": True}, + json=[ + { + "id": "topic_id", + "title": "topic_title", + "description": "topic_description", + } + ], ) response = self.adapter.response_exists(new_request) self.assertIsNone(response) @@ -195,96 +237,135 @@ def cache_request_test_helper(self, request_json, response_data, expected_count) def test_cache_embeddings_request_success(self): request_json = { - 'topics': [ - {'id': 'topic_id', 'title': 'topic_title', 'description': 'topic_description'} + "topics": [ + { + "id": "topic_id", + "title": "topic_title", + "description": "topic_description", + } ], - 'metadata': {} + "metadata": {}, } self.cache_request_test_helper(request_json, self.api_response, 2) def test_cache_embeddings_request_empty_data(self): request_json = { - 'topics': [ - {'id': 'topic_id', 'title': 'topic_title', 'description': 'topic_description'} + "topics": [ + { + "id": "topic_id", + "title": "topic_title", + "description": "topic_description", + } ], - 'metadata': {} + "metadata": {}, } self.cache_request_test_helper(request_json, {}, 0) def test_cache_embeddings_request_ignore_duplicates(self): request_json = { - 'topics': [ - {'id': 'topic_id', 'title': 'topic_title', 'description': 'topic_description'} + "topics": [ + { + "id": "topic_id", + "title": "topic_title", + "description": "topic_description", + } ], - 'metadata': {} + "metadata": {}, } - duplicate_data = BackendResponse(data={ - 'topics': [ - {'id': '1234567890abcdef1234567890abcdef', 'recommendations': [ + duplicate_data = BackendResponse( + data={ + "topics": [ { - 'id': '1234567890abcdef1234567890abcdef', - 'channel_id': '1234567890abcdef1234567890abcdef', - 'rank': 1 - } - ]}, - {'id': '1234567890abcdef1234567890abcdef', 'recommendations': [ + "id": "1234567890abcdef1234567890abcdef", + "recommendations": [ + { + "id": "1234567890abcdef1234567890abcdef", + "channel_id": "1234567890abcdef1234567890abcdef", + "rank": 1, + } + ], + }, { - 'id': '1234567890abcdef1234567890abcdef', - 'channel_id': '1234567890abcdef1234567890abcdef', - 'rank': 2 - } - ]} - ] - }) + "id": "1234567890abcdef1234567890abcdef", + "recommendations": [ + { + "id": "1234567890abcdef1234567890abcdef", + "channel_id": "1234567890abcdef1234567890abcdef", + "rank": 2, + } + ], + }, + ] + } + ) self.cache_request_test_helper(request_json, duplicate_data, 1) def test_cache_embeddings_request_invalid_data(self): - invalid_data = BackendResponse(data={ - 'response': [ - {'node_id': '1234567890abcdef1234567890abcdee', 'rank': 0.6} - ] - }) - self.cache_request_test_helper([{'topic': 'new_test_topic_4'}], invalid_data, 0) - - @patch('contentcuration.utils.recommendations.RecommendationsAdapter.cache_embeddings_request') - @patch('contentcuration.utils.recommendations.RecommendationsAdapter.generate_embeddings') - @patch('contentcuration.utils.recommendations.RecommendationsAdapter.response_exists') - @patch('contentcuration.utils.recommendations.EmbedTopicsRequest') - def test_get_recommendations_success(self, mock_embed_topics_request, mock_response_exists, - mock_generate_embeddings, mock_cache_embeddings_request): - channel = testdata.channel('Public Channel') + invalid_data = BackendResponse( + data={ + "response": [ + {"node_id": "1234567890abcdef1234567890abcdee", "rank": 0.6} + ] + } + ) + self.cache_request_test_helper([{"topic": "new_test_topic_4"}], invalid_data, 0) + + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.cache_embeddings_request" + ) + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.generate_embeddings" + ) + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.response_exists" + ) + @patch("contentcuration.utils.recommendations.EmbedTopicsRequest") + def test_get_recommendations_success( + self, + mock_embed_topics_request, + mock_response_exists, + mock_generate_embeddings, + mock_cache_embeddings_request, + ): + channel = testdata.channel("Public Channel") channel.public = True channel.save() public_node_1 = PublicContentNode.objects.create( - id='00000000000000000000000000000003', - title='Video 1', + id="00000000000000000000000000000003", + title="Video 1", content_id=uuid.uuid4().hex, channel_id=channel.id, ) public_node_2 = PublicContentNode.objects.create( - id='00000000000000000000000000000005', - title='Exercise 1', + id="00000000000000000000000000000005", + title="Exercise 1", content_id=uuid.uuid4().hex, channel_id=channel.id, ) response_data = { - 'topics': [ - {'id': '00000000000000000000000000000003', 'recommendations': [ - { - 'id': '00000000000000000000000000000003', - 'channel_id': '00000000000000000000000000000003', - 'rank': 10 - } - ]}, - {'id': '00000000000000000000000000000005', 'recommendations': [ - { - 'id': '00000000000000000000000000000005', - 'channel_id': '00000000000000000000000000000005', - 'rank': 11 - } - ]} + "topics": [ + { + "id": "00000000000000000000000000000003", + "recommendations": [ + { + "id": "00000000000000000000000000000003", + "channel_id": "00000000000000000000000000000003", + "rank": 10, + } + ], + }, + { + "id": "00000000000000000000000000000005", + "recommendations": [ + { + "id": "00000000000000000000000000000005", + "channel_id": "00000000000000000000000000000005", + "rank": 11, + } + ], + }, ] } @@ -292,7 +373,9 @@ def test_get_recommendations_success(self, mock_embed_topics_request, mock_respo mock_response = MagicMock(spec=EmbeddingsResponse) mock_response.data = response_data mock_response.error = None - mock_response.get = lambda key, default=None: getattr(mock_response, key, default) + mock_response.get = lambda key, default=None: getattr( + mock_response, key, default + ) mock_generate_embeddings.return_value = mock_response response = self.adapter.get_recommendations(self.request_data) @@ -306,96 +389,132 @@ def test_get_recommendations_success(self, mock_embed_topics_request, mock_respo self.assertListEqual(expected_node_ids, actual_node_ids) self.assertEqual(len(results), 2) - @patch('contentcuration.utils.recommendations.RecommendationsAdapter._flatten_response') - @patch('contentcuration.utils.recommendations.RecommendationsAdapter.response_exists') - @patch('contentcuration.utils.recommendations.EmbedTopicsRequest') - def test_get_recommendations_failure(self, mock_embed_topics_request, mock_response_exists, - mock_flatten_response): + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter._flatten_response" + ) + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.response_exists" + ) + @patch("contentcuration.utils.recommendations.EmbedTopicsRequest") + def test_get_recommendations_failure( + self, mock_embed_topics_request, mock_response_exists, mock_flatten_response + ): mock_request_instance = MagicMock(spec=EmbedTopicsRequest) mock_embed_topics_request.return_value = mock_request_instance - self.assert_backend_call(mock_response_exists, None, False, None, - self.adapter.get_recommendations, self.request_data) + self.assert_backend_call( + mock_response_exists, + None, + False, + None, + self.adapter.get_recommendations, + self.request_data, + ) - @patch('contentcuration.utils.recommendations.RecommendationsAdapter._flatten_response') - @patch('contentcuration.utils.recommendations.RecommendationsAdapter.response_exists') - @patch('contentcuration.utils.recommendations.EmbedContentRequest') - def test_embed_content_success(self, mock_embed_topics_request, mock_response_exists, - mock_flatten_response): + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter._flatten_response" + ) + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.response_exists" + ) + @patch("contentcuration.utils.recommendations.EmbedContentRequest") + def test_embed_content_success( + self, mock_embed_topics_request, mock_response_exists, mock_flatten_response + ): mock_response = MagicMock(spec=EmbeddingsResponse) mock_response.error = None - response = self.assert_backend_call(mock_response_exists, None, True, mock_response, - self.adapter.embed_content, self.channel_id, - self.resources) + response = self.assert_backend_call( + mock_response_exists, + None, + True, + mock_response, + self.adapter.embed_content, + self.channel_id, + self.resources, + ) self.assertIsInstance(response, bool) self.assertTrue(response) - @patch('contentcuration.utils.recommendations.RecommendationsAdapter.response_exists') - @patch('contentcuration.utils.recommendations.EmbedContentRequest') - def test_embed_content_failure(self, mock_embed_topics_request, mock_response_exists): - response = self.assert_backend_call(mock_response_exists, None, False, - None, self.adapter.embed_content, - self.channel_id, - self.resources) + @patch( + "contentcuration.utils.recommendations.RecommendationsAdapter.response_exists" + ) + @patch("contentcuration.utils.recommendations.EmbedContentRequest") + def test_embed_content_failure( + self, mock_embed_topics_request, mock_response_exists + ): + response = self.assert_backend_call( + mock_response_exists, + None, + False, + None, + self.adapter.embed_content, + self.channel_id, + self.resources, + ) self.assertIsNone(response) - def extract_content_test_helper(self, mock_node, file_return_value, expected_result): - with patch('contentcuration.utils.recommendations.File.objects.filter', - return_value=file_return_value): + def extract_content_test_helper( + self, mock_node, file_return_value, expected_result + ): + with patch( + "contentcuration.utils.recommendations.File.objects.filter", + return_value=file_return_value, + ): result = self.adapter.extract_content(mock_node) self.assertEqual(result, expected_result) def test_extract_content(self): mock_node = MagicMock(spec=ContentNode) - mock_node.node_id = '1234567890abcdef1234567890abcdef' - mock_node.title = 'Sample Title' - mock_node.description = 'Sample Description' - mock_node.language.lang_code = 'en' - mock_node.kind.kind = 'video' + mock_node.node_id = "1234567890abcdef1234567890abcdef" + mock_node.title = "Sample Title" + mock_node.description = "Sample Description" + mock_node.language.lang_code = "en" + mock_node.kind.kind = "video" mock_file_instance = MagicMock() - mock_file_instance.file_on_disk = 'path/to/file.mp4' - mock_file_instance.preset_id = 'video_high_res' - mock_file_instance.language.lang_code = 'en' + mock_file_instance.file_on_disk = "path/to/file.mp4" + mock_file_instance.preset_id = "video_high_res" + mock_file_instance.language.lang_code = "en" expected_result = { - "id": '1234567890abcdef1234567890abcdef', - "title": 'Sample Title', - "description": 'Sample Description', + "id": "1234567890abcdef1234567890abcdef", + "title": "Sample Title", + "description": "Sample Description", "text": "", - "language": 'en', + "language": "en", "files": [ { - 'url': 'path/to/file.mp4', - 'preset': 'video_high_res', - 'language': 'en', + "url": "path/to/file.mp4", + "preset": "video_high_res", + "language": "en", } ], } - self.extract_content_test_helper(mock_node, [mock_file_instance], expected_result) + self.extract_content_test_helper( + mock_node, [mock_file_instance], expected_result + ) def test_extract_content_no_files(self): mock_node = MagicMock(spec=ContentNode) - mock_node.node_id = '1234567890abcdef1234567890abcdef' - mock_node.title = 'Sample Title' - mock_node.description = 'Sample Description' - mock_node.language.lang_code = 'en' - mock_node.kind.kind = 'video' + mock_node.node_id = "1234567890abcdef1234567890abcdef" + mock_node.title = "Sample Title" + mock_node.description = "Sample Description" + mock_node.language.lang_code = "en" + mock_node.kind.kind = "video" expected_result = { - "id": '1234567890abcdef1234567890abcdef', - "title": 'Sample Title', - "description": 'Sample Description', + "id": "1234567890abcdef1234567890abcdef", + "title": "Sample Title", + "description": "Sample Description", "text": "", - "language": 'en', + "language": "en", "files": [], } self.extract_content_test_helper(mock_node, [], expected_result) class RecommendationsBackendFactoryTestCases(TestCase): - def setUp(self): self.factory = RecommendationsBackendFactory() @@ -424,7 +543,7 @@ def test_ensure_url_has_scheme_with_none(self): result = self.factory._ensure_url_has_scheme(url) self.assertEqual(result, url) - @patch('contentcuration.utils.recommendations.settings') + @patch("contentcuration.utils.recommendations.settings") def test_create_backend_with_url_no_scheme(self, mock_settings): mock_settings.CURRICULUM_AUTOMATION_API_URL = "api.example.com" backend = self.factory.create_backend() @@ -433,7 +552,7 @@ def test_create_backend_with_url_no_scheme(self, mock_settings): self.assertEqual(backend.base_url, "http://api.example.com") self.assertEqual(backend.connect_endpoint, "/connect") - @patch('contentcuration.utils.recommendations.settings') + @patch("contentcuration.utils.recommendations.settings") def test_create_backend_with_url_with_scheme(self, mock_settings): mock_settings.CURRICULUM_AUTOMATION_API_URL = "https://api.example.com" backend = self.factory.create_backend() @@ -442,7 +561,7 @@ def test_create_backend_with_url_with_scheme(self, mock_settings): self.assertEqual(backend.base_url, "https://api.example.com") self.assertEqual(backend.connect_endpoint, "/connect") - @patch('contentcuration.utils.recommendations.settings') + @patch("contentcuration.utils.recommendations.settings") def test_create_backend_with_empty_url(self, mock_settings): mock_settings.CURRICULUM_AUTOMATION_API_URL = "" backend = self.factory.create_backend() @@ -451,7 +570,7 @@ def test_create_backend_with_empty_url(self, mock_settings): self.assertEqual(backend.base_url, "") self.assertEqual(backend.connect_endpoint, "/connect") - @patch('contentcuration.utils.recommendations.settings') + @patch("contentcuration.utils.recommendations.settings") def test_create_backend_with_no_url(self, mock_settings): mock_settings.CURRICULUM_AUTOMATION_API_URL = None backend = self.factory.create_backend() diff --git a/contentcuration/contentcuration/tests/views/test_nodes.py b/contentcuration/contentcuration/tests/views/test_nodes.py index a981b84ca7..7099fc1a76 100644 --- a/contentcuration/contentcuration/tests/views/test_nodes.py +++ b/contentcuration/contentcuration/tests/views/test_nodes.py @@ -20,22 +20,30 @@ def tearDown(self): cache.clear() def test_get_node_diff__missing_contentnode(self): - response = self.get(reverse("get_node_diff", kwargs=dict(updated_id="abc123", original_id="def456"))) + response = self.get( + reverse( + "get_node_diff", kwargs=dict(updated_id="abc123", original_id="def456") + ) + ) self.assertEqual(response.status_code, 404) def test_get_node_diff__no_task_processing(self): pk = self.channel.main_tree.pk - response = self.get(reverse("get_node_diff", kwargs=dict(updated_id=pk, original_id=pk))) + response = self.get( + reverse("get_node_diff", kwargs=dict(updated_id=pk, original_id=pk)) + ) self.assertEqual(response.status_code, 404) - @patch.object(generatenodediff_task, 'find_incomplete_ids') + @patch.object(generatenodediff_task, "find_incomplete_ids") def test_get_node_diff__task_processing(self, mock_find_incomplete_ids): qs = Mock(spec="django.db.models.query.QuerySet") mock_find_incomplete_ids.return_value = qs() mock_find_incomplete_ids.return_value.exists.return_value = True pk = self.channel.main_tree.pk - response = self.get(reverse("get_node_diff", kwargs=dict(updated_id=pk, original_id=pk))) + response = self.get( + reverse("get_node_diff", kwargs=dict(updated_id=pk, original_id=pk)) + ) self.assertEqual(response.status_code, 302) @@ -45,7 +53,7 @@ def setUp(self): self.default_details = { "resource_count": 5, "resource_size": 100, - "kind_count": {"document": 3, "video": 2} + "kind_count": {"document": 3, "video": 2}, } # see tree.json for where this comes from self.node = ContentNode.objects.get(node_id="00000000000000000000000000000001") @@ -59,7 +67,11 @@ def tearDown(self): def _set_cache(self, node, last_update=None): data = self.default_details.copy() if last_update is not None: - data.update(last_update=pytz.utc.localize(last_update).strftime(settings.DATE_TIME_FORMAT)) + data.update( + last_update=pytz.utc.localize(last_update).strftime( + settings.DATE_TIME_FORMAT + ) + ) cache_key = "details_{}".format(node.node_id) cache.set(cache_key, json.dumps(data)) @@ -67,9 +79,11 @@ def _set_cache(self, node, last_update=None): @contextmanager def _check_details(self, node=None): endpoint = "get_channel_details" if node is None else "get_node_details" - param = {"channel_id": self.channel.id} \ - if endpoint == "get_channel_details" \ + param = ( + {"channel_id": self.channel.id} + if endpoint == "get_channel_details" else {"node_id": node.id} + ) url = reverse(endpoint, kwargs=param) response = self.get(url) print(response.content) @@ -77,16 +91,16 @@ def _check_details(self, node=None): yield details def assertDetailsEqual(self, details, expected): - self.assertEqual(details['resource_count'], expected['resource_count']) - self.assertEqual(details['resource_size'], expected['resource_size']) - self.assertEqual(details['kind_count'], expected['kind_count']) + self.assertEqual(details["resource_count"], expected["resource_count"]) + self.assertEqual(details["resource_size"], expected["resource_size"]) + self.assertEqual(details["kind_count"], expected["kind_count"]) @patch("contentcuration.models.ContentNode.get_details") def test_get_channel_details__uncached(self, mock_get_details): mock_get_details.return_value = { "resource_count": 7, "resource_size": 200, - "kind_count": {"document": 33, "video": 22} + "kind_count": {"document": 33, "video": 22}, } with self._check_details() as details: self.assertDetailsEqual(details, mock_get_details.return_value) @@ -97,19 +111,25 @@ def test_get_channel_details__uncached(self, mock_get_details): def test_get_channel_details__cached(self, task_mock): # force the cache to update by adding a very old cache entry. Since Celery tasks run sync in the test suite, # get_channel_details will return an updated cache value rather than generate it async. - self._set_cache(self.channel.main_tree, last_update=datetime.datetime(1990, 1, 1)) + self._set_cache( + self.channel.main_tree, last_update=datetime.datetime(1990, 1, 1) + ) with self._check_details() as details: # check cache was returned self.assertDetailsEqual(details, self.default_details) # Check that the outdated cache prompts an asynchronous cache update - task_mock.enqueue.assert_called_once_with(self.user, node_id=self.channel.main_tree.id) + task_mock.enqueue.assert_called_once_with( + self.user, node_id=self.channel.main_tree.id + ) @patch("contentcuration.views.nodes.getnodedetails_task") def test_get_channel_details__cached__not_updated__no_enqueue(self, task_mock): # nothing changed, self.channel.main_tree.get_descendants(include_self=False).update(changed=False) - self._set_cache(self.channel.main_tree, last_update=datetime.datetime(1990, 1, 1)) + self._set_cache( + self.channel.main_tree, last_update=datetime.datetime(1990, 1, 1) + ) with self._check_details() as details: # check cache was returned @@ -119,7 +139,9 @@ def test_get_channel_details__cached__not_updated__no_enqueue(self, task_mock): @patch("contentcuration.views.nodes.getnodedetails_task") def test_get_channel_details__cached__no_enqueue(self, task_mock): # test last update handling - self._set_cache(self.channel.main_tree, last_update=datetime.datetime(2099, 1, 1)) + self._set_cache( + self.channel.main_tree, last_update=datetime.datetime(2099, 1, 1) + ) with self._check_details() as details: # check cache was returned @@ -131,7 +153,7 @@ def test_get_node_details__uncached(self, mock_get_details): mock_get_details.return_value = { "resource_count": 7, "resource_size": 200, - "kind_count": {"document": 33, "video": 22} + "kind_count": {"document": 33, "video": 22}, } with self._check_details(node=self.node) as details: self.assertDetailsEqual(details, mock_get_details.return_value) diff --git a/contentcuration/contentcuration/tests/views/test_settings.py b/contentcuration/contentcuration/tests/views/test_settings.py index 7cf3145e95..ed23fb0d70 100644 --- a/contentcuration/contentcuration/tests/views/test_settings.py +++ b/contentcuration/contentcuration/tests/views/test_settings.py @@ -1,15 +1,14 @@ -from mock import mock - -from django.template.loader import render_to_string from django.conf import settings as ccsettings +from django.template.loader import render_to_string +from mock import mock +from contentcuration.forms import StorageRequestForm from contentcuration.tests import testdata from contentcuration.tests.base import StudioAPITestCase from contentcuration.views.settings import StorageSettingsView -from contentcuration.forms import StorageRequestForm -class StorageSettingsViewTestCase(StudioAPITestCase): +class StorageSettingsViewTestCase(StudioAPITestCase): def setUp(self): super(StorageSettingsViewTestCase, self).setUp() self.view = StorageSettingsView() @@ -17,7 +16,7 @@ def setUp(self): self.view.request.user = testdata.user(email="tester@tester.com") def test_storage_request(self): - + with mock.patch("contentcuration.views.settings.send_mail") as send_mail: data = dict( @@ -35,9 +34,9 @@ def test_storage_request(self): uploading_for="uploading_for", organization_type="organization_type", time_constraint="time_constraint", - message="message" + message="message", ) - self.form = StorageRequestForm(data=data) + self.form = StorageRequestForm(data=data) self.assertTrue(self.form.is_valid()) self.view.form_valid(self.form) @@ -47,7 +46,7 @@ def test_storage_request(self): { "data": self.form.cleaned_data, "user": self.view.request.user, - "channels": ["channel1", "channel2"] + "channels": ["channel1", "channel2"], }, ) diff --git a/contentcuration/contentcuration/tests/views/test_users.py b/contentcuration/contentcuration/tests/views/test_users.py index 5247bf46b7..a17da93f8a 100644 --- a/contentcuration/contentcuration/tests/views/test_users.py +++ b/contentcuration/contentcuration/tests/views/test_users.py @@ -21,16 +21,18 @@ def setUp(self): self.request = mock.Mock() self.request.method = "POST" self.user = testdata.user(email="tester@tester.com") - self.request.body = json.dumps(dict( - username="tester@tester.com", - password="password", - )) + self.request.body = json.dumps( + dict( + username="tester@tester.com", + password="password", + ) + ) def test_login__not_post(self): self.request.method = "GET" redirect = login(self.request) self.assertIsInstance(redirect, HttpResponseRedirectBase) - self.assertIn("accounts", redirect['Location']) + self.assertIn("accounts", redirect["Location"]) def test_login__not_found(self): self.user.email = "different@tester.com" @@ -52,22 +54,24 @@ def test_login__success(self, djangologin): redirect = login(self.request) djangologin.assert_called() self.assertIsInstance(redirect, HttpResponseRedirectBase) - self.assertIn("channels", redirect['Location']) + self.assertIn("channels", redirect["Location"]) def test_login__case_sensitivity(self): with mock.patch("contentcuration.views.users.djangologin") as djangologin: self.user.email = "Tester@tester.com" self.user.save() - self.request.body = json.dumps(dict( - username="tester@Tester.com", - password="password", - )) + self.request.body = json.dumps( + dict( + username="tester@Tester.com", + password="password", + ) + ) redirect = login(self.request) djangologin.assert_called() self.assertIsInstance(redirect, HttpResponseRedirectBase) - self.assertIn("channels", redirect['Location']) + self.assertIn("channels", redirect["Location"]) def test_login__case_sensitivity__multiple(self): with mock.patch("contentcuration.views.users.djangologin") as djangologin: @@ -79,27 +83,31 @@ def test_login__case_sensitivity__multiple(self): user2.set_password("tester") user2.save() - self.request.body = json.dumps(dict( - username="tester@tester.com", - password="tester", - )) + self.request.body = json.dumps( + dict( + username="tester@tester.com", + password="tester", + ) + ) redirect = login(self.request) djangologin.assert_called() self.assertIsInstance(redirect, HttpResponseRedirectBase) - self.assertIn("channels", redirect['Location']) + self.assertIn("channels", redirect["Location"]) def test_login__whitespace(self): with mock.patch("contentcuration.views.users.djangologin") as djangologin: - self.request.body = json.dumps(dict( - username="tester@Tester.com ", - password="password", - )) + self.request.body = json.dumps( + dict( + username="tester@Tester.com ", + password="password", + ) + ) redirect = login(self.request) djangologin.assert_called() self.assertIsInstance(redirect, HttpResponseRedirectBase) - self.assertIn("channels", redirect['Location']) + self.assertIn("channels", redirect["Location"]) def test_after_delete__no_login(self): with mock.patch("contentcuration.views.users.djangologin") as djangologin: @@ -161,9 +169,7 @@ def setUp(self): self.user = testdata.user(email="tester@tester.com") self.user.is_active = False self.user.save() - self.kwargs = dict( - activation_key="activation_key" - ) + self.kwargs = dict(activation_key="activation_key") def test_activate(self): self.view.validate_key.return_value = self.user.email diff --git a/contentcuration/contentcuration/tests/views/test_views_base.py b/contentcuration/contentcuration/tests/views/test_views_base.py index 8bf4b80726..41f4e56e70 100644 --- a/contentcuration/contentcuration/tests/views/test_views_base.py +++ b/contentcuration/contentcuration/tests/views/test_views_base.py @@ -36,8 +36,12 @@ def test_200_get(self): task_name="export-channel", status="QUEUED", ) - CustomTaskMetadata(task_id=task_id, user=self.user, channel_id=self.channel.id).save() - CustomTaskMetadata(task_id=task_id_2, user=self.user, channel_id=channel_2.id).save() + CustomTaskMetadata( + task_id=task_id, user=self.user, channel_id=self.channel.id + ).save() + CustomTaskMetadata( + task_id=task_id_2, user=self.user, channel_id=channel_2.id + ).save() response = self.get( reverse_lazy("publishing_status"), ) @@ -50,6 +54,10 @@ def test_200_get(self): for i, item in enumerate(response.data): self.assertEqual(expected_channel_ids[i], item["channel_id"]) - expected_task_id = task.task_id if item["channel_id"] == self.channel.id else task_2.task_id + expected_task_id = ( + task.task_id + if item["channel_id"] == self.channel.id + else task_2.task_id + ) self.assertEqual(expected_task_id, item["task_id"]) self.assertIn("performed", item) diff --git a/contentcuration/contentcuration/tests/views/test_views_internal.py b/contentcuration/contentcuration/tests/views/test_views_internal.py index 3a8f50a6d2..e43d4fdd75 100644 --- a/contentcuration/contentcuration/tests/views/test_views_internal.py +++ b/contentcuration/contentcuration/tests/views/test_views_internal.py @@ -10,7 +10,9 @@ from django.urls import reverse_lazy from le_utils.constants import content_kinds from le_utils.constants import format_presets -from le_utils.constants.labels.accessibility_categories import ACCESSIBILITYCATEGORIESLIST +from le_utils.constants.labels.accessibility_categories import ( + ACCESSIBILITYCATEGORIESLIST, +) from le_utils.constants.labels.learning_activities import LEARNINGACTIVITIESLIST from le_utils.constants.labels.levels import LEVELSLIST from le_utils.constants.labels.needs import NEEDSLIST @@ -196,9 +198,7 @@ def test_associates_file_with_created_node(self): def test_metadata_properly_created(self): node = ContentNode.objects.get(title="valid_metadata_labels") for label, values in METADATA.items(): - self.assertEqual(getattr(node, label), { - values[0]: True - }) + self.assertEqual(getattr(node, label), {values[0]: True}) @skipIf(True, "Disable until we mark nodes as incomplete rather than just warn") def test_invalid_nodes_are_not_complete(self): @@ -244,7 +244,7 @@ def test_add_nodes__not_a_topic(self): ) # should succeed self.assertEqual(response.status_code, 200, response.content) - resource_node_id = next(iter(response.json().get('root_ids').values())) + resource_node_id = next(iter(response.json().get("root_ids").values())) invalid_child = self._make_node_data() test_data = { @@ -462,7 +462,9 @@ def test_duplicate_assessment_item_returns_400_status_code(self): """ Check that we return 400 if passed in duplicate assessment items. """ - self.sample_data["content_data"][0]["questions"][1]["assessment_id"] = self.sample_data["content_data"][0]["questions"][0]["assessment_id"] + self.sample_data["content_data"][0]["questions"][1][ + "assessment_id" + ] = self.sample_data["content_data"][0]["questions"][0]["assessment_id"] response = self._make_request() # check that we returned 400 with that POST request assert response.status_code == 400, "Got a non-400 request error: {}".format( @@ -743,7 +745,7 @@ def setUp(self): "license": None, "source_domain": "unique domain", "source_id": "unique domain root", - "ricecooker_version": '0.6.46', + "ricecooker_version": "0.6.46", "extra_fields": None, "files": None, } @@ -751,7 +753,9 @@ def setUp(self): def test_401_no_permission(self): client = APIClient() response = client.post( - reverse_lazy("api_create_channel"), data={"channel_data": self.channel_data}, format="json" + reverse_lazy("api_create_channel"), + data={"channel_data": self.channel_data}, + format="json", ) self.assertEqual(response.status_code, 401) @@ -761,16 +765,22 @@ def test_returns_200_status_code(self): """ # check that we returned 200 with that POST request resp = self.admin_client().post( - reverse_lazy("api_create_channel"), data={"channel_data": self.channel_data}, format="json" + reverse_lazy("api_create_channel"), + data={"channel_data": self.channel_data}, + format="json", + ) + self.assertEqual( + resp.status_code, 200, "Got a request error: {}".format(resp.content) ) - self.assertEqual(resp.status_code, 200, "Got a request error: {}".format(resp.content)) def test_creates_channel(self): """ Test that it creates a channel with the given id """ self.admin_client().post( - reverse_lazy("api_create_channel"), data={"channel_data": self.channel_data}, format="json" + reverse_lazy("api_create_channel"), + data={"channel_data": self.channel_data}, + format="json", ) try: Channel.objects.get(id=self.channel_data["id"]) @@ -786,7 +796,9 @@ def test_updates_already_created_channel(self): deleted_channel.save(actor_id=self.user.id) self.channel_data.update({"name": "Updated name", "id": deleted_channel.id}) self.admin_client().post( - reverse_lazy("api_create_channel"), data={"channel_data": self.channel_data}, format="json" + reverse_lazy("api_create_channel"), + data={"channel_data": self.channel_data}, + format="json", ) try: c = Channel.objects.get(id=self.channel_data["id"]) @@ -799,7 +811,9 @@ def test_creates_cheftree(self): Test that it creates a channel with the given id """ self.admin_client().post( - reverse_lazy("api_create_channel"), data={"channel_data": self.channel_data}, format="json" + reverse_lazy("api_create_channel"), + data={"channel_data": self.channel_data}, + format="json", ) try: c = Channel.objects.get(id=self.channel_data["id"]) @@ -813,18 +827,22 @@ def test_associates_file_with_created_channel(self): Check that the file we passed is now associated with the chef_tree we just created. """ - dummy_file = create_studio_file(b"aaaaaaaaaaaaaaa", preset=format_presets.HTML5_ZIP, ext="zip") + dummy_file = create_studio_file( + b"aaaaaaaaaaaaaaa", preset=format_presets.HTML5_ZIP, ext="zip" + ) test_file = { - 'size': len(dummy_file["data"]), - 'preset': format_presets.HTML5_ZIP, - 'filename': dummy_file["name"], - 'original_filename': 'test_file', - 'language': "as", - 'source_url': "https://justatest.com/test_file.zip", + "size": len(dummy_file["data"]), + "preset": format_presets.HTML5_ZIP, + "filename": dummy_file["name"], + "original_filename": "test_file", + "language": "as", + "source_url": "https://justatest.com/test_file.zip", } self.channel_data.update({"files": [test_file]}) self.admin_client().post( - reverse_lazy("api_create_channel"), data={"channel_data": self.channel_data}, format="json" + reverse_lazy("api_create_channel"), + data={"channel_data": self.channel_data}, + format="json", ) try: @@ -838,9 +856,13 @@ def test_associates_extra_fields_with_root_node(self): """ Check that extra_fields information is put on the chef_tree root node """ - self.channel_data.update({"extra_fields": json.dumps({"modality": "CUSTOM_NAVIGATION"})}) + self.channel_data.update( + {"extra_fields": json.dumps({"modality": "CUSTOM_NAVIGATION"})} + ) self.admin_client().post( - reverse_lazy("api_create_channel"), data={"channel_data": self.channel_data}, format="json" + reverse_lazy("api_create_channel"), + data={"channel_data": self.channel_data}, + format="json", ) try: @@ -895,13 +917,19 @@ def _make_node_data(self): def setUp(self): super(ApiAddRemoteNodesToTreeTestCase, self).setUp() self.source_channel = channel() - self.source_video = self.source_channel.main_tree.get_descendants().filter(kind_id=content_kinds.VIDEO).first() + self.source_video = ( + self.source_channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.VIDEO) + .first() + ) # first setup a test channel... self.channel = channel() self.root_node = self.channel.main_tree - temp_file_dict = create_studio_file(thumbnail_bytes, preset=format_presets.VIDEO_THUMBNAIL, ext='jpg') + temp_file_dict = create_studio_file( + thumbnail_bytes, preset=format_presets.VIDEO_THUMBNAIL, ext="jpg" + ) # File used for every node self.fileobj = temp_file_dict["db_file"] @@ -983,9 +1011,7 @@ def test_metadata_properly_created(self): node = ContentNode.objects.get(title="valid_metadata_labels") for label, values in METADATA.items(): - self.assertEqual(getattr(node, label), { - values[0]: True - }) + self.assertEqual(getattr(node, label), {values[0]: True}) def test_metadata_properly_screened_viewer(self): self.root_node.get_descendants().delete() @@ -1012,7 +1038,10 @@ def test_metadata_properly_screened_viewer(self): if key not in METADATA: if hasattr(node, key): # These will be matching even though we don't overwrite them. - if key in ALLOWED_OVERRIDES or key in {"source_channel_id", "source_node_id"}: + if key in ALLOWED_OVERRIDES or key in { + "source_channel_id", + "source_node_id", + }: self.assertEqual(getattr(node, key), value, key) else: self.assertNotEqual(getattr(node, key), value, key) @@ -1028,7 +1057,10 @@ def test_metadata_properly_screened_editor(self): if key not in METADATA: if hasattr(node, key): # These will be matching even though we don't overwrite them. - if key in EDIT_ALLOWED_OVERRIDES or key in {"source_channel_id", "source_node_id"}: + if key in EDIT_ALLOWED_OVERRIDES or key in { + "source_channel_id", + "source_node_id", + }: self.assertEqual(getattr(node, key), value, key) else: self.assertNotEqual(getattr(node, key), value, key) diff --git a/contentcuration/contentcuration/tests/viewsets/base.py b/contentcuration/contentcuration/tests/viewsets/base.py index 8d34afeb24..617d23bb26 100644 --- a/contentcuration/contentcuration/tests/viewsets/base.py +++ b/contentcuration/contentcuration/tests/viewsets/base.py @@ -8,14 +8,30 @@ from contentcuration.viewsets.sync.constants import CHANNEL from contentcuration.viewsets.sync.constants import SYNCED from contentcuration.viewsets.sync.utils import _generate_event as base_generate_event -from contentcuration.viewsets.sync.utils import generate_copy_event as base_generate_copy_event -from contentcuration.viewsets.sync.utils import generate_create_event as base_generate_create_event -from contentcuration.viewsets.sync.utils import generate_delete_event as base_generate_delete_event -from contentcuration.viewsets.sync.utils import generate_deploy_event as base_generate_deploy_event -from contentcuration.viewsets.sync.utils import generate_publish_event as base_generate_publish_event -from contentcuration.viewsets.sync.utils import generate_update_event as base_generate_update_event -from contentcuration.viewsets.sync.utils import generate_update_descendants_event as base_generate_update_descendants_event -from contentcuration.viewsets.sync.utils import generate_publish_next_event as base_generate_publish_next_event +from contentcuration.viewsets.sync.utils import ( + generate_copy_event as base_generate_copy_event, +) +from contentcuration.viewsets.sync.utils import ( + generate_create_event as base_generate_create_event, +) +from contentcuration.viewsets.sync.utils import ( + generate_delete_event as base_generate_delete_event, +) +from contentcuration.viewsets.sync.utils import ( + generate_deploy_event as base_generate_deploy_event, +) +from contentcuration.viewsets.sync.utils import ( + generate_publish_event as base_generate_publish_event, +) +from contentcuration.viewsets.sync.utils import ( + generate_publish_next_event as base_generate_publish_next_event, +) +from contentcuration.viewsets.sync.utils import ( + generate_update_descendants_event as base_generate_update_descendants_event, +) +from contentcuration.viewsets.sync.utils import ( + generate_update_event as base_generate_update_event, +) def generate_copy_event(*args, **kwargs): @@ -42,8 +58,16 @@ def generate_update_event(*args, **kwargs): return event -def generate_sync_channel_event(channel_id, titles_and_descriptions, resource_details, files, assessment_items): - event = base_generate_event(key=channel_id, table=CHANNEL, event_type=SYNCED, channel_id=channel_id, user_id=None) +def generate_sync_channel_event( + channel_id, titles_and_descriptions, resource_details, files, assessment_items +): + event = base_generate_event( + key=channel_id, + table=CHANNEL, + event_type=SYNCED, + channel_id=channel_id, + user_id=None, + ) event["rev"] = random.randint(1, 10000000) event["titles_and_descriptions"] = titles_and_descriptions event["resource_details"] = resource_details @@ -57,16 +81,19 @@ def generate_deploy_channel_event(channel_id, user_id): event["rev"] = random.randint(1, 10000000) return event + def generate_update_descendants_event(*args, **kwargs): event = base_generate_update_descendants_event(*args, **kwargs) event["rev"] = random.randint(1, 10000000) return event + def generate_publish_channel_event(channel_id): event = base_generate_publish_event(channel_id) event["rev"] = random.randint(1, 10000000) return event + def generate_publish_next_event(channel_id): event = base_generate_publish_next_event(channel_id) event["rev"] = random.randint(1, 10000000) @@ -109,4 +136,6 @@ def sync_changes(self, changes): ) def get_allowed_changes(self, response): - return Change.objects.filter(server_rev__in=[c['server_rev'] for c in response.json()["allowed"]]) + return Change.objects.filter( + server_rev__in=[c["server_rev"] for c in response.json()["allowed"]] + ) diff --git a/contentcuration/contentcuration/tests/viewsets/test_assessmentitem.py b/contentcuration/contentcuration/tests/viewsets/test_assessmentitem.py index ebc04ac7c2..98ec45517e 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_assessmentitem.py +++ b/contentcuration/contentcuration/tests/viewsets/test_assessmentitem.py @@ -16,11 +16,9 @@ class SyncTestCase(SyncTestMixin, StudioAPITestCase): - @property def assessmentitem_metadata(self): return { - "assessment_id": uuid.uuid4().hex, "contentnode": self.channel.main_tree.get_descendants() .filter(kind_id=content_kinds.EXERCISE) @@ -139,7 +137,7 @@ def test_create_assessmentitem_with_file_answers(self): exercises.IMG_PLACEHOLDER, image_file.checksum, image_file.file_format_id ) - answers = [{'answer': answer, 'correct': False, 'order': 1}] + answers = [{"answer": answer, "correct": False, "order": 1}] assessmentitem["answers"] = json.dumps(answers) @@ -300,13 +298,14 @@ def test_attempt_update_missing_assessmentitem(self): self.client.force_authenticate(user=self.user) response = self.sync_changes( [ - generate_update_event([ - self.channel.main_tree.get_descendants() - .filter(kind_id=content_kinds.EXERCISE) - .first() - .id, - uuid.uuid4().hex - ], + generate_update_event( + [ + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.EXERCISE) + .first() + .id, + uuid.uuid4().hex, + ], ASSESSMENTITEM, {"question": "but why is it missing in the first place?"}, channel_id=self.channel.id, @@ -540,7 +539,12 @@ def test_delete_assessmentitems(self): def test_valid_hints_assessmentitem(self): self.client.force_authenticate(user=self.user) assessmentitem = self.assessmentitem_metadata - assessmentitem["hints"] = json.dumps([{'hint': 'asdasdwdqasd', 'order': 1}, {'hint': 'testing the hint', 'order': 2}]) + assessmentitem["hints"] = json.dumps( + [ + {"hint": "asdasdwdqasd", "order": 1}, + {"hint": "testing the hint", "order": 2}, + ] + ) response = self.sync_changes( [ generate_create_event( @@ -576,10 +580,15 @@ def test_invalid_hints_assessmentitem(self): ) self.assertEqual(response.json()["errors"][0]["table"], "assessmentitem") - self.assertEqual(response.json()["errors"][0]["errors"]["hints"][0], "JSON Data Invalid for hints") + self.assertEqual( + response.json()["errors"][0]["errors"]["hints"][0], + "JSON Data Invalid for hints", + ) self.assertEqual(len(response.json()["errors"]), 1) - with self.assertRaises(models.AssessmentItem.DoesNotExist, msg="AssessmentItem was created"): + with self.assertRaises( + models.AssessmentItem.DoesNotExist, msg="AssessmentItem was created" + ): models.AssessmentItem.objects.get( assessment_id=assessmentitem["assessment_id"] ) @@ -587,10 +596,13 @@ def test_invalid_hints_assessmentitem(self): def test_valid_answers_assessmentitem(self): self.client.force_authenticate(user=self.user) assessmentitem = self.assessmentitem_metadata - assessmentitem["answers"] = json.dumps([{'answer': 'test answer 1 :)', 'correct': False, 'order': 1}, - {'answer': 'test answer 2 :)', 'correct': False, 'order': 2}, - {'answer': 'test answer 3 :)', 'correct': True, 'order': 3} - ]) + assessmentitem["answers"] = json.dumps( + [ + {"answer": "test answer 1 :)", "correct": False, "order": 1}, + {"answer": "test answer 2 :)", "correct": False, "order": 2}, + {"answer": "test answer 3 :)", "correct": True, "order": 3}, + ] + ) response = self.sync_changes( [ generate_create_event( @@ -626,10 +638,15 @@ def test_invalid_answers_assessmentitem(self): ) self.assertEqual(response.json()["errors"][0]["table"], "assessmentitem") - self.assertEqual(response.json()["errors"][0]["errors"]["answers"][0], "JSON Data Invalid for answers") + self.assertEqual( + response.json()["errors"][0]["errors"]["answers"][0], + "JSON Data Invalid for answers", + ) self.assertEqual(len(response.json()["errors"]), 1) - with self.assertRaises(models.AssessmentItem.DoesNotExist, msg="AssessmentItem was created"): + with self.assertRaises( + models.AssessmentItem.DoesNotExist, msg="AssessmentItem was created" + ): models.AssessmentItem.objects.get( assessment_id=assessmentitem["assessment_id"] ) @@ -666,7 +683,9 @@ def test_create_assessmentitem(self): self.client.force_authenticate(user=self.user) assessmentitem = self.assessmentitem_metadata response = self.client.post( - reverse("assessmentitem-list"), assessmentitem, format="json", + reverse("assessmentitem-list"), + assessmentitem, + format="json", ) self.assertEqual(response.status_code, 405, response.content) @@ -707,7 +726,8 @@ def setUp(self): def _get_assessmentitem_metadata(self, assessment_id=None, contentnode_id=None): return { "assessment_id": assessment_id or uuid.uuid4().hex, - "contentnode_id": contentnode_id or self.channel.main_tree.get_descendants() + "contentnode_id": contentnode_id + or self.channel.main_tree.get_descendants() .filter(kind_id=content_kinds.EXERCISE) .first() .id, @@ -750,69 +770,133 @@ def _delete_assessmentitem(self, assessmentitem): def test_content_id__same_on_copy(self): # Make a copy of an existing assessmentitem contentnode. - assessmentitem_node = self.channel.main_tree.get_descendants().filter(kind_id=content_kinds.EXERCISE).first() - assessmentitem_node_copy = assessmentitem_node.copy_to(target=self.channel.main_tree) + assessmentitem_node = ( + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.EXERCISE) + .first() + ) + assessmentitem_node_copy = assessmentitem_node.copy_to( + target=self.channel.main_tree + ) # Assert after copying content_id is same. assessmentitem_node.refresh_from_db() assessmentitem_node_copy.refresh_from_db() - self.assertEqual(assessmentitem_node.content_id, assessmentitem_node_copy.content_id) + self.assertEqual( + assessmentitem_node.content_id, assessmentitem_node_copy.content_id + ) def test_content_id__changes_on_new_assessmentitem(self): # Make a copy of an existing assessmentitem contentnode. - assessmentitem_node = self.channel.main_tree.get_descendants().filter(kind_id=content_kinds.EXERCISE).first() - assessmentitem_node_copy = assessmentitem_node.copy_to(target=self.channel.main_tree) + assessmentitem_node = ( + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.EXERCISE) + .first() + ) + assessmentitem_node_copy = assessmentitem_node.copy_to( + target=self.channel.main_tree + ) # Create a new assessmentitem. - self._create_assessmentitem(self._get_assessmentitem_metadata(contentnode_id=assessmentitem_node_copy.id)) + self._create_assessmentitem( + self._get_assessmentitem_metadata( + contentnode_id=assessmentitem_node_copy.id + ) + ) # Assert after creating a new assessmentitem on copied node, it's content_id should change. assessmentitem_node.refresh_from_db() assessmentitem_node_copy.refresh_from_db() - self.assertNotEqual(assessmentitem_node.content_id, assessmentitem_node_copy.content_id) + self.assertNotEqual( + assessmentitem_node.content_id, assessmentitem_node_copy.content_id + ) def test_content_id__changes_on_deleting_assessmentitem(self): # Make a copy of an existing assessmentitem contentnode. - assessmentitem_node = self.channel.main_tree.get_descendants().filter(kind_id=content_kinds.EXERCISE).first() - assessmentitem_node_copy = assessmentitem_node.copy_to(target=self.channel.main_tree) + assessmentitem_node = ( + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.EXERCISE) + .first() + ) + assessmentitem_node_copy = assessmentitem_node.copy_to( + target=self.channel.main_tree + ) # Delete an already present assessmentitem from copied contentnode. - assessmentitem_from_db = models.AssessmentItem.objects.filter(contentnode=assessmentitem_node_copy.id).first() - self._delete_assessmentitem(self._get_assessmentitem_metadata(assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id)) + assessmentitem_from_db = models.AssessmentItem.objects.filter( + contentnode=assessmentitem_node_copy.id + ).first() + self._delete_assessmentitem( + self._get_assessmentitem_metadata( + assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id + ) + ) # Assert after deleting assessmentitem on copied node, it's content_id should change. assessmentitem_node.refresh_from_db() assessmentitem_node_copy.refresh_from_db() - self.assertNotEqual(assessmentitem_node.content_id, assessmentitem_node_copy.content_id) + self.assertNotEqual( + assessmentitem_node.content_id, assessmentitem_node_copy.content_id + ) def test_content_id__changes_on_updating_assessmentitem(self): # Make a copy of an existing assessmentitem contentnode. - assessmentitem_node = self.channel.main_tree.get_descendants().filter(kind_id=content_kinds.EXERCISE).first() - assessmentitem_node_copy = assessmentitem_node.copy_to(target=self.channel.main_tree) + assessmentitem_node = ( + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.EXERCISE) + .first() + ) + assessmentitem_node_copy = assessmentitem_node.copy_to( + target=self.channel.main_tree + ) # Update an already present assessmentitem from copied contentnode. - assessmentitem_from_db = models.AssessmentItem.objects.filter(contentnode=assessmentitem_node_copy.id).first() - self._update_assessmentitem(self._get_assessmentitem_metadata(assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id), - {"question": "New Question!"}) + assessmentitem_from_db = models.AssessmentItem.objects.filter( + contentnode=assessmentitem_node_copy.id + ).first() + self._update_assessmentitem( + self._get_assessmentitem_metadata( + assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id + ), + {"question": "New Question!"}, + ) # Assert after updating assessmentitem on copied node, it's content_id should change. assessmentitem_node.refresh_from_db() assessmentitem_node_copy.refresh_from_db() - self.assertNotEqual(assessmentitem_node.content_id, assessmentitem_node_copy.content_id) + self.assertNotEqual( + assessmentitem_node.content_id, assessmentitem_node_copy.content_id + ) def test_content_id__doesnot_changes_of_original_node(self): # Make a copy of an existing assessmentitem contentnode. - assessmentitem_node = self.channel.main_tree.get_descendants().filter(kind_id=content_kinds.EXERCISE).first() + assessmentitem_node = ( + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.EXERCISE) + .first() + ) assessmentitem_node.copy_to(target=self.channel.main_tree) content_id_before_updates = assessmentitem_node.content_id # Create, update and delete assessmentitems from original contentnode. - assessmentitem_from_db = models.AssessmentItem.objects.filter(contentnode=assessmentitem_node.id).first() - self._update_assessmentitem(self._get_assessmentitem_metadata(assessmentitem_from_db.assessment_id, assessmentitem_node.id), - {"question": "New Question!"}) - self._delete_assessmentitem(self._get_assessmentitem_metadata(assessmentitem_from_db.assessment_id, assessmentitem_node.id)) - self._create_assessmentitem(self._get_assessmentitem_metadata(contentnode_id=assessmentitem_node.id)) + assessmentitem_from_db = models.AssessmentItem.objects.filter( + contentnode=assessmentitem_node.id + ).first() + self._update_assessmentitem( + self._get_assessmentitem_metadata( + assessmentitem_from_db.assessment_id, assessmentitem_node.id + ), + {"question": "New Question!"}, + ) + self._delete_assessmentitem( + self._get_assessmentitem_metadata( + assessmentitem_from_db.assessment_id, assessmentitem_node.id + ) + ) + self._create_assessmentitem( + self._get_assessmentitem_metadata(contentnode_id=assessmentitem_node.id) + ) # Assert content_id before and after updates remain same. assessmentitem_node.refresh_from_db() @@ -821,25 +905,59 @@ def test_content_id__doesnot_changes_of_original_node(self): def test_content_id__doesnot_changes_if_already_unique(self): # Make a copy of an existing assessmentitem contentnode. - assessmentitem_node = self.channel.main_tree.get_descendants().filter(kind_id=content_kinds.EXERCISE).first() - assessmentitem_node_copy = assessmentitem_node.copy_to(target=self.channel.main_tree) + assessmentitem_node = ( + self.channel.main_tree.get_descendants() + .filter(kind_id=content_kinds.EXERCISE) + .first() + ) + assessmentitem_node_copy = assessmentitem_node.copy_to( + target=self.channel.main_tree + ) # Create, update and delete assessmentitems of copied contentnode. - assessmentitem_from_db = models.AssessmentItem.objects.filter(contentnode=assessmentitem_node_copy.id).first() - self._update_assessmentitem(self._get_assessmentitem_metadata(assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id), - {"question": "New Question!"}) - self._delete_assessmentitem(self._get_assessmentitem_metadata(assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id)) - self._create_assessmentitem(self._get_assessmentitem_metadata(contentnode_id=assessmentitem_node_copy.id)) + assessmentitem_from_db = models.AssessmentItem.objects.filter( + contentnode=assessmentitem_node_copy.id + ).first() + self._update_assessmentitem( + self._get_assessmentitem_metadata( + assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id + ), + {"question": "New Question!"}, + ) + self._delete_assessmentitem( + self._get_assessmentitem_metadata( + assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id + ) + ) + self._create_assessmentitem( + self._get_assessmentitem_metadata( + contentnode_id=assessmentitem_node_copy.id + ) + ) assessmentitem_node_copy.refresh_from_db() content_id_after_first_update = assessmentitem_node_copy.content_id # Once again, let us create, update and delete assessmentitems of copied contentnode. - assessmentitem_from_db = models.AssessmentItem.objects.filter(contentnode=assessmentitem_node_copy.id).first() - self._update_assessmentitem(self._get_assessmentitem_metadata(assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id), - {"question": "New Question!"}) - self._delete_assessmentitem(self._get_assessmentitem_metadata(assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id)) - self._create_assessmentitem(self._get_assessmentitem_metadata(contentnode_id=assessmentitem_node_copy.id)) + assessmentitem_from_db = models.AssessmentItem.objects.filter( + contentnode=assessmentitem_node_copy.id + ).first() + self._update_assessmentitem( + self._get_assessmentitem_metadata( + assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id + ), + {"question": "New Question!"}, + ) + self._delete_assessmentitem( + self._get_assessmentitem_metadata( + assessmentitem_from_db.assessment_id, assessmentitem_node_copy.id + ) + ) + self._create_assessmentitem( + self._get_assessmentitem_metadata( + contentnode_id=assessmentitem_node_copy.id + ) + ) assessmentitem_node_copy.refresh_from_db() content_id_after_second_update = assessmentitem_node_copy.content_id diff --git a/contentcuration/contentcuration/tests/viewsets/test_bookmark.py b/contentcuration/contentcuration/tests/viewsets/test_bookmark.py index f2ccdc550c..815c14de56 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_bookmark.py +++ b/contentcuration/contentcuration/tests/viewsets/test_bookmark.py @@ -10,7 +10,6 @@ class SyncTestCase(SyncTestMixin, StudioAPITestCase): - @property def bookmark_metadata(self): return { @@ -118,9 +117,7 @@ def test_delete_bookmarks(self): ) data2 = self.bookmark_db_metadata data2["channel_id"] = self.channel2.id - bookmark2 = models.Channel.bookmarked_by.through.objects.create( - **data2 - ) + bookmark2 = models.Channel.bookmarked_by.through.objects.create(**data2) self.client.force_authenticate(user=self.user) response = self.sync_changes( @@ -175,7 +172,9 @@ def test_create_bookmark(self): self.client.force_authenticate(user=self.user) bookmark = self.bookmark_metadata response = self.client.post( - reverse("bookmark-list"), bookmark, format="json", + reverse("bookmark-list"), + bookmark, + format="json", ) self.assertEqual(response.status_code, 405, response.content) diff --git a/contentcuration/contentcuration/tests/viewsets/test_channel.py b/contentcuration/contentcuration/tests/viewsets/test_channel.py index afedc2a4db..8309f47c8c 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_channel.py +++ b/contentcuration/contentcuration/tests/viewsets/test_channel.py @@ -6,6 +6,7 @@ from django.urls import reverse from kolibri_public.models import ContentNode as PublicContentNode from le_utils.constants import content_kinds +from mock import patch from contentcuration import models from contentcuration import models as cc @@ -23,14 +24,13 @@ from contentcuration.tests.viewsets.base import SyncTestMixin from contentcuration.viewsets.channel import _unpublished_changes_query from contentcuration.viewsets.sync.constants import CHANNEL -from mock import patch class SyncTestCase(SyncTestMixin, StudioAPITestCase): @classmethod def setUpClass(cls): super(SyncTestCase, cls).setUpClass() - cls.patch_copy_db = patch('contentcuration.utils.publish.save_export_database') + cls.patch_copy_db = patch("contentcuration.utils.publish.save_export_database") cls.mock_save_export = cls.patch_copy_db.start() @classmethod @@ -51,7 +51,11 @@ def test_create_channel(self): self.client.force_authenticate(user=user) channel = self.channel_metadata response = self.sync_changes( - [generate_create_event(channel["id"], CHANNEL, channel, channel_id=channel["id"])] + [ + generate_create_event( + channel["id"], CHANNEL, channel, channel_id=channel["id"] + ) + ] ) self.assertEqual(response.status_code, 200, response.content) try: @@ -66,8 +70,12 @@ def test_create_channels(self): channel2 = self.channel_metadata response = self.sync_changes( [ - generate_create_event(channel1["id"], CHANNEL, channel1, channel_id=channel1["id"]), - generate_create_event(channel2["id"], CHANNEL, channel2, channel_id=channel2["id"]), + generate_create_event( + channel1["id"], CHANNEL, channel1, channel_id=channel1["id"] + ), + generate_create_event( + channel2["id"], CHANNEL, channel2, channel_id=channel2["id"] + ), ] ) self.assertEqual(response.status_code, 200, response.content) @@ -83,63 +91,95 @@ def test_create_channels(self): def test_update_channel(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.editors.add(user) new_name = "This is not the old name" self.client.force_authenticate(user=user) response = self.sync_changes( - [generate_update_event(channel.id, CHANNEL, {"name": new_name}, channel_id=channel.id)] + [ + generate_update_event( + channel.id, CHANNEL, {"name": new_name}, channel_id=channel.id + ) + ] ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual(models.Channel.objects.get(id=channel.id).name, new_name) def test_update_channel_thumbnail_encoding(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.editors.add(user) new_encoding = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAfQA" self.client.force_authenticate(user=user) response = self.sync_changes( - [generate_update_event(channel.id, CHANNEL, { - "thumbnail_encoding.base64": new_encoding, - "thumbnail_encoding.orientation": 1, - "thumbnail_encoding.scale": 0.73602189113443, - "thumbnail_encoding.startX": -96.66631072431669, - "thumbnail_encoding.startY": -335.58116356397636, - }, channel_id=channel.id)] + [ + generate_update_event( + channel.id, + CHANNEL, + { + "thumbnail_encoding.base64": new_encoding, + "thumbnail_encoding.orientation": 1, + "thumbnail_encoding.scale": 0.73602189113443, + "thumbnail_encoding.startX": -96.66631072431669, + "thumbnail_encoding.startY": -335.58116356397636, + }, + channel_id=channel.id, + ) + ] ) self.assertEqual(response.status_code, 200, response.content) - self.assertEqual(models.Channel.objects.get(id=channel.id).thumbnail_encoding["base64"], new_encoding) + self.assertEqual( + models.Channel.objects.get(id=channel.id).thumbnail_encoding["base64"], + new_encoding, + ) def test_cannot_update_channel(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) new_name = "This is not the old name" self.client.force_authenticate(user=user) response = self.sync_changes( - [generate_update_event(channel.id, CHANNEL, {"name": new_name}, channel_id=channel.id)], + [ + generate_update_event( + channel.id, CHANNEL, {"name": new_name}, channel_id=channel.id + ) + ], ) self.assertEqual(len(response.json()["disallowed"]), 1, response.content) self.assertNotEqual(models.Channel.objects.get(id=channel.id).name, new_name) def test_viewer_cannot_update_channel(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.viewers.add(user) new_name = "This is not the old name" self.client.force_authenticate(user=user) response = self.sync_changes( - [generate_update_event(channel.id, CHANNEL, {"name": new_name}, channel_id=channel.id)], + [ + generate_update_event( + channel.id, CHANNEL, {"name": new_name}, channel_id=channel.id + ) + ], ) self.assertEqual(len(response.json()["disallowed"]), 1, response.content) self.assertNotEqual(models.Channel.objects.get(id=channel.id).name, new_name) def test_update_channel_defaults(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.editors.add(user) author = "This is not the old author" @@ -147,7 +187,10 @@ def test_update_channel_defaults(self): response = self.sync_changes( [ generate_update_event( - channel.id, CHANNEL, {"content_defaults.author": author}, channel_id=channel.id + channel.id, + CHANNEL, + {"content_defaults.author": author}, + channel_id=channel.id, ) ] ) @@ -162,7 +205,10 @@ def test_update_channel_defaults(self): response = self.sync_changes( [ generate_update_event( - channel.id, CHANNEL, {"content_defaults.aggregator": aggregator}, channel_id=channel.id + channel.id, + CHANNEL, + {"content_defaults.aggregator": aggregator}, + channel_id=channel.id, ) ] ) @@ -177,17 +223,25 @@ def test_update_channel_defaults(self): def test_update_channels(self): user = testdata.user() - channel1 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel1 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel1.editors.add(user) - channel2 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel2 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel2.editors.add(user) new_name = "This is not the old name" self.client.force_authenticate(user=user) response = self.sync_changes( [ - generate_update_event(channel1.id, CHANNEL, {"name": new_name}, channel_id=channel1.id), - generate_update_event(channel2.id, CHANNEL, {"name": new_name}, channel_id=channel2.id), + generate_update_event( + channel1.id, CHANNEL, {"name": new_name}, channel_id=channel1.id + ), + generate_update_event( + channel2.id, CHANNEL, {"name": new_name}, channel_id=channel2.id + ), ] ) self.assertEqual(response.status_code, 200, response.content) @@ -196,16 +250,24 @@ def test_update_channels(self): def test_cannot_update_some_channels(self): user = testdata.user() - channel1 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel1 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel1.editors.add(user) - channel2 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel2 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) new_name = "This is not the old name" self.client.force_authenticate(user=user) response = self.sync_changes( [ - generate_update_event(channel1.id, CHANNEL, {"name": new_name}, channel_id=channel1.id), - generate_update_event(channel2.id, CHANNEL, {"name": new_name}, channel_id=channel2.id), + generate_update_event( + channel1.id, CHANNEL, {"name": new_name}, channel_id=channel1.id + ), + generate_update_event( + channel2.id, CHANNEL, {"name": new_name}, channel_id=channel2.id + ), ], ) self.assertEqual(len(response.json()["disallowed"]), 1, response.content) @@ -214,17 +276,25 @@ def test_cannot_update_some_channels(self): def test_viewer_cannot_update_some_channels(self): user = testdata.user() - channel1 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel1 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel1.editors.add(user) - channel2 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel2 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel2.viewers.add(user) new_name = "This is not the old name" self.client.force_authenticate(user=user) response = self.sync_changes( [ - generate_update_event(channel1.id, CHANNEL, {"name": new_name}, channel_id=channel1.id), - generate_update_event(channel2.id, CHANNEL, {"name": new_name}, channel_id=channel2.id), + generate_update_event( + channel1.id, CHANNEL, {"name": new_name}, channel_id=channel1.id + ), + generate_update_event( + channel2.id, CHANNEL, {"name": new_name}, channel_id=channel2.id + ), ], ) self.assertEqual(len(response.json()["disallowed"]), 1, response.content) @@ -233,11 +303,15 @@ def test_viewer_cannot_update_some_channels(self): def test_delete_channel(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.editors.add(user) self.client.force_authenticate(user=user) - response = self.sync_changes([generate_delete_event(channel.id, CHANNEL, channel_id=channel.id)]) + response = self.sync_changes( + [generate_delete_event(channel.id, CHANNEL, channel_id=channel.id)] + ) self.assertEqual(response.status_code, 200, response.content) channel = models.Channel.objects.get(id=channel.id) self.assertTrue(channel.deleted) @@ -245,7 +319,9 @@ def test_delete_channel(self): def test_cannot_delete_channel(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) self.client.force_authenticate(user=user) response = self.sync_changes( @@ -261,10 +337,14 @@ def test_cannot_delete_channel(self): def test_delete_channels(self): user = testdata.user() - channel1 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel1 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel1.editors.add(user) - channel2 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel2 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel2.editors.add(user) self.client.force_authenticate(user=user) @@ -280,9 +360,13 @@ def test_delete_channels(self): def test_cannot_delete_some_channels(self): user = testdata.user() - channel1 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel1 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel1.editors.add(user) - channel2 = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel2 = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) self.client.force_authenticate(user=user) response = self.sync_changes( @@ -311,11 +395,7 @@ def test_sync_channel_called_correctly(self, sync_channel_mock): args = [channel.id, False, False, False, False] args[i] = True - response = self.sync_changes( - [ - generate_sync_channel_event(*args) - ] - ) + response = self.sync_changes([generate_sync_channel_event(*args)]) self.assertEqual(response.status_code, 200) sync_channel_mock.assert_called_once() @@ -325,9 +405,7 @@ def test_deploy_channel_event(self): channel = testdata.channel() user = testdata.user() channel.editors.add(user) - self.client.force_authenticate( - user - ) # This will skip all authentication checks + self.client.force_authenticate(user) # This will skip all authentication checks channel.main_tree.refresh_from_db() channel.staging_tree = cc.ContentNode( @@ -347,10 +425,8 @@ def test_deploy_channel_event(self): self.contentnode = cc.ContentNode.objects.create(kind_id="video") response = self.sync_changes( - [ - generate_deploy_channel_event(channel.id, user.id) - ] - ) + [generate_deploy_channel_event(channel.id, user.id)] + ) self.assertEqual(response.status_code, 200) modified_channel = models.Channel.objects.get(id=channel.id) @@ -362,9 +438,7 @@ def test_deploy_with_staging_tree_None(self): channel = testdata.channel() user = testdata.user() channel.editors.add(user) - self.client.force_authenticate( - user - ) # This will skip all authentication checks + self.client.force_authenticate(user) # This will skip all authentication checks channel.main_tree.refresh_from_db() channel.staging_tree = None @@ -380,10 +454,8 @@ def test_deploy_with_staging_tree_None(self): self.contentnode = cc.ContentNode.objects.create(kind_id="video") response = self.sync_changes( - [ - generate_deploy_channel_event(channel.id, user.id) - ] - ) + [generate_deploy_channel_event(channel.id, user.id)] + ) # Should raise validation error as staging tree was set to NONE self.assertEqual(len(response.json()["errors"]), 1, response.content) modified_channel = models.Channel.objects.get(id=channel.id) @@ -392,14 +464,12 @@ def test_deploy_with_staging_tree_None(self): def test_publish_does_not_make_publishable(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.editors.add(user) - self.sync_changes( - [ - generate_publish_channel_event(channel.id) - ] - ) + self.sync_changes([generate_publish_channel_event(channel.id)]) self.assertEqual(_unpublished_changes_query(channel).count(), 0) @@ -407,13 +477,10 @@ def test_publish_next(self): channel = testdata.channel() user = testdata.user() channel.editors.add(user) - self.client.force_authenticate( - user - ) # This will skip all authentication checks + self.client.force_authenticate(user) # This will skip all authentication checks channel.staging_tree = testdata.tree() - node = testdata.node({ - 'kind_id': 'video', 'title': 'title', 'children': []}) + node = testdata.node({"kind_id": "video", "title": "title", "children": []}) node.complete = True node.parent = channel.staging_tree node.save() @@ -421,11 +488,7 @@ def test_publish_next(self): channel.save() self.assertEqual(channel.staging_tree.published, False) - response = self.sync_changes( - [ - generate_publish_next_event(channel.id) - ] - ) + response = self.sync_changes([generate_publish_next_event(channel.id)]) self.assertEqual(response.status_code, 200) modified_channel = models.Channel.objects.get(id=channel.id) @@ -435,9 +498,7 @@ def test_publish_next_with_incomplete_staging_tree(self): channel = testdata.channel() user = testdata.user() channel.editors.add(user) - self.client.force_authenticate( - user - ) # This will skip all authentication checks + self.client.force_authenticate(user) # This will skip all authentication checks channel.staging_tree = cc.ContentNode( kind_id=content_kinds.TOPIC, title="test", node_id="aaa" @@ -446,15 +507,13 @@ def test_publish_next_with_incomplete_staging_tree(self): channel.save() self.assertEqual(channel.staging_tree.published, False) - response = self.sync_changes( - [ - generate_publish_next_event(channel.id) - ] - ) + response = self.sync_changes([generate_publish_next_event(channel.id)]) self.assertEqual(response.status_code, 200) self.assertTrue( - "Channel is not ready to be published" in response.json()["errors"][0]["errors"][0]) + "Channel is not ready to be published" + in response.json()["errors"][0]["errors"][0] + ) modified_channel = models.Channel.objects.get(id=channel.id) self.assertEqual(modified_channel.staging_tree.published, False) @@ -470,12 +529,15 @@ def channel_metadata(self): def test_fetch_channel_for_admin(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) user.is_admin = True user.save() self.client.force_authenticate(user=user) response = self.client.get( - reverse("channel-detail", kwargs={"pk": channel.id}), format="json", + reverse("channel-detail", kwargs={"pk": channel.id}), + format="json", ) self.assertEqual(response.status_code, 200, response.content) @@ -487,7 +549,8 @@ def test_fetch_admin_channels_invalid_filter(self): user.save() self.client.force_authenticate(user=user) response = self.client.get( - reverse("admin-channels-list") + "?public=true&page_size=25&edit=true", format="json", + reverse("admin-channels-list") + "?public=true&page_size=25&edit=true", + format="json", ) self.assertEqual(response.status_code, 200, response.content) @@ -495,7 +558,11 @@ def test_create_channel(self): user = testdata.user() self.client.force_authenticate(user=user) channel = self.channel_metadata - response = self.client.post(reverse("channel-list"), channel, format="json",) + response = self.client.post( + reverse("channel-list"), + channel, + format="json", + ) self.assertEqual(response.status_code, 201, response.content) try: models.Channel.objects.get(id=channel["id"]) @@ -504,7 +571,9 @@ def test_create_channel(self): def test_update_channel(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.editors.add(user) new_name = "This is not the old name" @@ -518,7 +587,9 @@ def test_update_channel(self): def test_delete_channel(self): user = testdata.user() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.editors.add(user) self.client.force_authenticate(user=user) @@ -535,7 +606,9 @@ def test_admin_restore_channel(self): user.is_admin = True user.is_staff = True user.save() - channel = models.Channel.objects.create(actor_id=user.id, **self.channel_metadata) + channel = models.Channel.objects.create( + actor_id=user.id, **self.channel_metadata + ) channel.editors.add(user) channel.deleted = True channel.save(actor_id=user.id) @@ -549,16 +622,31 @@ def test_admin_restore_channel(self): self.assertEqual(response.status_code, 200, response.content) channel = models.Channel.objects.get(id=channel.id) self.assertFalse(channel.deleted) - self.assertEqual(1, channel.history.filter(actor=user, action=channel_history.RECOVERY).count()) + self.assertEqual( + 1, + channel.history.filter(actor=user, action=channel_history.RECOVERY).count(), + ) class UnpublishedChangesQueryTestCase(StudioAPITestCase): def test_unpublished_changes_query_with_channel_object(self): channel = testdata.channel() user = testdata.user() - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id), created_by_id=user.id) - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id), created_by_id=user.id) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id + ), + created_by_id=user.id, + ) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id + ), + created_by_id=user.id, + ) queryset = _unpublished_changes_query(channel) self.assertEqual(queryset.count(), 1) @@ -567,24 +655,42 @@ def test_unpublished_changes_query_with_channel_object(self): def test_unpublished_changes_query_with_channel_object_none_since_publish(self): channel = testdata.channel() user = testdata.user() - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id), created_by_id=user.id) - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id), created_by_id=user.id) - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id + ), + created_by_id=user.id, + ) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id + ), + created_by_id=user.id, + ) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) queryset = _unpublished_changes_query(channel) self.assertEqual(queryset.count(), 0) - def test_unpublished_changes_query_with_channel_object_no_publishable_since_publish(self): + def test_unpublished_changes_query_with_channel_object_no_publishable_since_publish( + self, + ): channel = testdata.channel() user = testdata.user() - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id), created_by_id=user.id) - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) models.Change.create_change( generate_update_event( - channel.id, - CHANNEL, - {"name": "new name 2"}, - channel_id=channel.id + channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id + ), + created_by_id=user.id, + ) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id ), created_by_id=user.id, unpublishable=True, @@ -593,22 +699,30 @@ def test_unpublished_changes_query_with_channel_object_no_publishable_since_publ queryset = _unpublished_changes_query(channel) self.assertEqual(queryset.count(), 0) - def test_unpublished_changes_query_with_channel_object_no_publishable_since_publish_if_publish_fails_through_error(self): + def test_unpublished_changes_query_with_channel_object_no_publishable_since_publish_if_publish_fails_through_error( + self, + ): channel = testdata.channel() user = testdata.user() channel.main_tree = None channel.save() - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) queryset = _unpublished_changes_query(channel) self.assertEqual(queryset.count(), 0) - def test_unpublished_changes_query_with_channel_object_no_publishable_since_publish_if_publish_fails_because_incomplete(self): + def test_unpublished_changes_query_with_channel_object_no_publishable_since_publish_if_publish_fails_because_incomplete( + self, + ): channel = testdata.channel() user = testdata.user() channel.main_tree.complete = False channel.save() - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) queryset = _unpublished_changes_query(channel) self.assertEqual(queryset.count(), 0) @@ -616,108 +730,153 @@ def test_unpublished_changes_query_with_channel_object_no_publishable_since_publ def test_unpublished_changes_query_with_outerref(self): channel = testdata.channel() user = testdata.user() - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id), created_by_id=user.id) - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id), created_by_id=user.id) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id + ), + created_by_id=user.id, + ) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id + ), + created_by_id=user.id, + ) outer_ref = OuterRef("id") unpublished_changes = _unpublished_changes_query(outer_ref) - channels = models.Channel.objects.filter(pk=channel.pk).annotate(unpublished_changes=Exists(unpublished_changes)) + channels = models.Channel.objects.filter(pk=channel.pk).annotate( + unpublished_changes=Exists(unpublished_changes) + ) self.assertTrue(channels[0].unpublished_changes) def test_unpublished_changes_query_with_outerref_none_since_publish(self): channel = testdata.channel() user = testdata.user() - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id), created_by_id=user.id) - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id), created_by_id=user.id) - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id + ), + created_by_id=user.id, + ) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id + ), + created_by_id=user.id, + ) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) outer_ref = OuterRef("id") unpublished_changes = _unpublished_changes_query(outer_ref) - channels = models.Channel.objects.filter(pk=channel.pk).annotate(unpublished_changes=Exists(unpublished_changes)) + channels = models.Channel.objects.filter(pk=channel.pk).annotate( + unpublished_changes=Exists(unpublished_changes) + ) self.assertFalse(channels[0].unpublished_changes) def test_unpublished_changes_query_with_outerref_no_publishable_since_publish(self): channel = testdata.channel() user = testdata.user() - models.Change.create_change(generate_update_event(channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id), created_by_id=user.id) - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) models.Change.create_change( generate_update_event( - channel.id, - CHANNEL, - {"name": "new name 2"}, - channel_id=channel.id + channel.id, CHANNEL, {"name": "new name"}, channel_id=channel.id + ), + created_by_id=user.id, + ) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) + models.Change.create_change( + generate_update_event( + channel.id, CHANNEL, {"name": "new name 2"}, channel_id=channel.id ), created_by_id=user.id, - unpublishable=True + unpublishable=True, ) outer_ref = OuterRef("id") unpublished_changes = _unpublished_changes_query(outer_ref) - channels = models.Channel.objects.filter(pk=channel.pk).annotate(unpublished_changes=Exists(unpublished_changes)) + channels = models.Channel.objects.filter(pk=channel.pk).annotate( + unpublished_changes=Exists(unpublished_changes) + ) self.assertFalse(channels[0].unpublished_changes) - def test_unpublished_changes_query_no_publishable_since_publish_if_publish_fails_through_error(self): + def test_unpublished_changes_query_no_publishable_since_publish_if_publish_fails_through_error( + self, + ): channel = testdata.channel() user = testdata.user() channel.main_tree = None channel.save() - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) outer_ref = OuterRef("id") unpublished_changes = _unpublished_changes_query(outer_ref) - channels = models.Channel.objects.filter(pk=channel.pk).annotate(unpublished_changes=Exists(unpublished_changes)) + channels = models.Channel.objects.filter(pk=channel.pk).annotate( + unpublished_changes=Exists(unpublished_changes) + ) self.assertFalse(channels[0].unpublished_changes) - def test_unpublished_changes_query_no_publishable_since_publish_if_publish_fails_because_incomplete(self): + def test_unpublished_changes_query_no_publishable_since_publish_if_publish_fails_because_incomplete( + self, + ): channel = testdata.channel() user = testdata.user() channel.main_tree.complete = False channel.save() - models.Change.create_change(generate_publish_channel_event(channel.id), created_by_id=user.id) + models.Change.create_change( + generate_publish_channel_event(channel.id), created_by_id=user.id + ) outer_ref = OuterRef("id") unpublished_changes = _unpublished_changes_query(outer_ref) - channels = models.Channel.objects.filter(pk=channel.pk).annotate(unpublished_changes=Exists(unpublished_changes)) + channels = models.Channel.objects.filter(pk=channel.pk).annotate( + unpublished_changes=Exists(unpublished_changes) + ) self.assertFalse(channels[0].unpublished_changes) class ChannelLanguageTestCase(StudioAPITestCase): - def setUp(self): super(ChannelLanguageTestCase, self).setUp() self.channel = testdata.channel() - self.channel.language_id = 'en' + self.channel.language_id = "en" self.channel.save() self.channel_id = self.channel.id - self.node_id = '00000000000000000000000000000003' + self.node_id = "00000000000000000000000000000003" self.public_node = PublicContentNode.objects.create( id=uuid.UUID(self.node_id), - title='Video 1', + title="Video 1", content_id=uuid.uuid4(), channel_id=uuid.UUID(self.channel.id), - lang_id='en', + lang_id="en", ) def test_channel_language_exists_valid_channel(self): - ContentNode.objects.filter(node_id=self.public_node.id).update(language_id='en') + ContentNode.objects.filter(node_id=self.public_node.id).update(language_id="en") response = self._perform_action("channel-language-exists", self.channel.id) self.assertEqual(response.status_code, 200, response.content) self.assertTrue(response.json()["exists"]) def test_channel_language_doesnt_exists_valid_channel(self): - PublicContentNode.objects.filter(id=self.public_node.id).update(lang_id='es') + PublicContentNode.objects.filter(id=self.public_node.id).update(lang_id="es") response = self._perform_action("channel-language-exists", self.channel.id) self.assertEqual(response.status_code, 200, response.content) self.assertFalse(response.json()["exists"]) def test_channel_language_exists_invalid_channel(self): - response = self._perform_action("channel-language-exists", 'unknown_channel_id') + response = self._perform_action("channel-language-exists", "unknown_channel_id") self.assertEqual(response.status_code, 404, response.content) def test_channel_language_exists_invalid_request(self): @@ -726,11 +885,15 @@ def test_channel_language_exists_invalid_request(self): self.assertEqual(response.status_code, 404, response.content) def test_get_languages_in_channel_success_languages(self): - new_language = 'swa' + new_language = "swa" self.channel.language_id = new_language self.channel.save() - PublicContentNode.objects.filter(id=self.public_node.id).update(lang_id=new_language) - ContentNode.objects.filter(node_id=self.public_node.id).update(language_id=new_language) + PublicContentNode.objects.filter(id=self.public_node.id).update( + lang_id=new_language + ) + ContentNode.objects.filter(node_id=self.public_node.id).update( + language_id=new_language + ) response = self._perform_action("channel-languages", self.channel.id) languages = response.json()["languages"] @@ -739,12 +902,16 @@ def test_get_languages_in_channel_success_languages(self): self.assertListEqual(languages, [new_language]) def test_get_languages_in_channel_success_channel_language_excluded(self): - new_language = 'fr' - channel_lang = 'en' + new_language = "fr" + channel_lang = "en" self.channel.language_id = channel_lang self.channel.save() - PublicContentNode.objects.filter(id=self.public_node.id).update(lang_id=new_language) - ContentNode.objects.filter(node_id=self.public_node.id).update(language_id=new_language) + PublicContentNode.objects.filter(id=self.public_node.id).update( + lang_id=new_language + ) + ContentNode.objects.filter(node_id=self.public_node.id).update( + language_id=new_language + ) response = self._perform_action("channel-languages", self.channel.id) languages = response.json()["languages"] @@ -769,5 +936,7 @@ def test_get_languages_in_channel_invalid_request(self): def _perform_action(self, url_path, channel_id): user = testdata.user() self.client.force_authenticate(user=user) - response = self.client.get(reverse(url_path, kwargs={"pk": channel_id}), format="json") + response = self.client.get( + reverse(url_path, kwargs={"pk": channel_id}), format="json" + ) return response diff --git a/contentcuration/contentcuration/tests/viewsets/test_channelset.py b/contentcuration/contentcuration/tests/viewsets/test_channelset.py index 2fb108a3a2..a0f72f7a2d 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_channelset.py +++ b/contentcuration/contentcuration/tests/viewsets/test_channelset.py @@ -13,7 +13,6 @@ class SyncTestCase(SyncTestMixin, StudioAPITestCase): - @property def channelset_metadata(self): return { @@ -39,7 +38,11 @@ def test_create_channelset(self): self.client.force_authenticate(user=self.user) channelset = self.channelset_metadata response = self.sync_changes( - [generate_create_event(channelset["id"], CHANNELSET, channelset, user_id=self.user.id)], + [ + generate_create_event( + channelset["id"], CHANNELSET, channelset, user_id=self.user.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -53,8 +56,12 @@ def test_create_channelsets(self): channelset2 = self.channelset_metadata response = self.sync_changes( [ - generate_create_event(channelset1["id"], CHANNELSET, channelset1, user_id=self.user.id), - generate_create_event(channelset2["id"], CHANNELSET, channelset2, user_id=self.user.id), + generate_create_event( + channelset1["id"], CHANNELSET, channelset1, user_id=self.user.id + ), + generate_create_event( + channelset2["id"], CHANNELSET, channelset2, user_id=self.user.id + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -75,7 +82,11 @@ def test_update_channelset(self): self.client.force_authenticate(user=self.user) response = self.sync_changes( - [generate_update_event(channelset.id, CHANNELSET, {"channels": {}}, user_id=self.user.id)], + [ + generate_update_event( + channelset.id, CHANNELSET, {"channels": {}}, user_id=self.user.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertFalse( @@ -94,8 +105,12 @@ def test_update_channelsets(self): self.client.force_authenticate(user=self.user) response = self.sync_changes( [ - generate_update_event(channelset1.id, CHANNELSET, {"channels": {}}, user_id=self.user.id), - generate_update_event(channelset2.id, CHANNELSET, {"channels": {}}, user_id=self.user.id), + generate_update_event( + channelset1.id, CHANNELSET, {"channels": {}}, user_id=self.user.id + ), + generate_update_event( + channelset2.id, CHANNELSET, {"channels": {}}, user_id=self.user.id + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -116,7 +131,11 @@ def test_update_channelset_empty(self): channelset.editors.add(self.user) self.client.force_authenticate(user=self.user) response = self.sync_changes( - [generate_update_event(channelset.id, CHANNELSET, {}, user_id=self.user.id)], + [ + generate_update_event( + channelset.id, CHANNELSET, {}, user_id=self.user.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) @@ -128,7 +147,10 @@ def test_update_channelset_unwriteable_fields(self): response = self.sync_changes( [ generate_update_event( - channelset.id, CHANNELSET, {"not_a_field": "not_a_value"}, user_id=self.user.id + channelset.id, + CHANNELSET, + {"not_a_field": "not_a_value"}, + user_id=self.user.id, ) ], ) @@ -150,7 +172,7 @@ def test_update_channelset_channels(self): channelset.id, CHANNELSET, {"channels.{}".format(channel1.id): True}, - user_id=self.user.id + user_id=self.user.id, ) ], ) @@ -170,7 +192,7 @@ def test_update_channelset_channels(self): channelset.id, CHANNELSET, {"channels.{}".format(channel2.id): True}, - user_id=self.user.id + user_id=self.user.id, ) ], ) @@ -192,7 +214,7 @@ def test_update_channelset_channels(self): channelset.id, CHANNELSET, {"channels.{}".format(channel2.id): None}, - user_id=self.user.id + user_id=self.user.id, ) ], ) @@ -223,7 +245,7 @@ def test_update_channelset_channels_no_permission(self): channelset.id, CHANNELSET, {"channels.{}".format(channel1.id): True}, - user_id=self.user.id + user_id=self.user.id, ) ], ) @@ -304,7 +326,9 @@ def test_create_channelset(self): self.client.force_authenticate(user=self.user) channelset = self.channelset_metadata response = self.client.post( - reverse("channelset-list"), channelset, format="json", + reverse("channelset-list"), + channelset, + format="json", ) self.assertEqual(response.status_code, 201, response.content) try: @@ -318,7 +342,9 @@ def test_create_channelset_no_channel_permission(self): channelset = self.channelset_metadata channelset["channels"] = {new_channel.id: True} response = self.client.post( - reverse("channelset-list"), channelset, format="json", + reverse("channelset-list"), + channelset, + format="json", ) self.assertEqual(response.status_code, 400, response.content) diff --git a/contentcuration/contentcuration/tests/viewsets/test_clipboard.py b/contentcuration/contentcuration/tests/viewsets/test_clipboard.py index 50f65f7624..9f88dfd58d 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_clipboard.py +++ b/contentcuration/contentcuration/tests/viewsets/test_clipboard.py @@ -56,7 +56,11 @@ def test_create_clipboard(self): self.client.force_authenticate(user=self.user) clipboard = self.clipboard_metadata response = self.sync_changes( - [generate_create_event(clipboard["id"], CLIPBOARD, clipboard, user_id=self.user.id)], + [ + generate_create_event( + clipboard["id"], CLIPBOARD, clipboard, user_id=self.user.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -69,7 +73,11 @@ def test_create_clipboard_with_null_extra_fields(self): clipboard = self.clipboard_metadata clipboard["extra_fields"] = None response = self.sync_changes( - [generate_create_event(clipboard["id"], CLIPBOARD, clipboard, user_id=self.user.id)], + [ + generate_create_event( + clipboard["id"], CLIPBOARD, clipboard, user_id=self.user.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -84,7 +92,11 @@ def test_create_clipboard_with_parent(self): clipboard = self.clipboard_metadata clipboard["parent"] = channel.main_tree_id response = self.sync_changes( - [generate_create_event(clipboard["id"], CLIPBOARD, clipboard, user_id=self.user.id)], + [ + generate_create_event( + clipboard["id"], CLIPBOARD, clipboard, user_id=self.user.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -100,8 +112,12 @@ def test_create_clipboards(self): clipboard2 = self.clipboard_metadata response = self.sync_changes( [ - generate_create_event(clipboard1["id"], CLIPBOARD, clipboard1, user_id=self.user.id), - generate_create_event(clipboard2["id"], CLIPBOARD, clipboard2, user_id=self.user.id), + generate_create_event( + clipboard1["id"], CLIPBOARD, clipboard1, user_id=self.user.id + ), + generate_create_event( + clipboard2["id"], CLIPBOARD, clipboard2, user_id=self.user.id + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -239,7 +255,9 @@ def test_create_clipboard(self): self.client.force_authenticate(user=self.user) clipboard = self.clipboard_metadata response = self.client.post( - reverse("clipboard-list"), clipboard, format="json", + reverse("clipboard-list"), + clipboard, + format="json", ) self.assertEqual(response.status_code, 405, response.content) diff --git a/contentcuration/contentcuration/tests/viewsets/test_contentnode.py b/contentcuration/contentcuration/tests/viewsets/test_contentnode.py index 28519c2762..d27b4304d9 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_contentnode.py +++ b/contentcuration/contentcuration/tests/viewsets/test_contentnode.py @@ -15,7 +15,9 @@ from le_utils.constants import content_kinds from le_utils.constants import exercises from le_utils.constants import roles -from le_utils.constants.labels.accessibility_categories import ACCESSIBILITYCATEGORIESLIST +from le_utils.constants.labels.accessibility_categories import ( + ACCESSIBILITYCATEGORIESLIST, +) from le_utils.constants.labels.subjects import SUBJECTSLIST from contentcuration import models @@ -263,17 +265,27 @@ def assertQuerysetPKs(self, expected_qs, actual_qs): self.assertEqual(expected_pk, actual_pk) def test_filter_ancestors_of(self): - target = models.ContentNode.objects.get(node_id="00000000000000000000000000000003") - queryset = self.filter.filter_ancestors_of(models.ContentNode.objects.all(), None, target.pk) + target = models.ContentNode.objects.get( + node_id="00000000000000000000000000000003" + ) + queryset = self.filter.filter_ancestors_of( + models.ContentNode.objects.all(), None, target.pk + ) self.assertQuerysetPKs(target.get_ancestors(include_self=True), queryset) def test_filter_ancestors_of__root_node(self): - queryset = self.filter.filter_ancestors_of(models.ContentNode.objects.all(), None, self.root.pk) - self.assertQuerysetPKs(models.ContentNode.objects.filter(pk=self.root.pk), queryset) + queryset = self.filter.filter_ancestors_of( + models.ContentNode.objects.all(), None, self.root.pk + ) + self.assertQuerysetPKs( + models.ContentNode.objects.filter(pk=self.root.pk), queryset + ) def test_filter_ancestors_of__missing_target(self): - queryset = self.filter.filter_ancestors_of(models.ContentNode.objects.all(), None, "nonexistant ID") + queryset = self.filter.filter_ancestors_of( + models.ContentNode.objects.all(), None, "nonexistant ID" + ) self.assertQuerysetPKs(models.ContentNode.objects.none(), queryset) @@ -290,7 +302,8 @@ def test_get_contentnode__editor(self): self.client.force_authenticate(user=user) with self.settings(TEST_ENV=False): response = self.client.get( - self.viewset_url(pk=contentnode.id), format="json", + self.viewset_url(pk=contentnode.id), + format="json", ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual(response.data["id"], contentnode.id) @@ -305,7 +318,8 @@ def test_get_contentnode__viewer(self): self.client.force_authenticate(user=user) with self.settings(TEST_ENV=False): response = self.client.get( - self.viewset_url(pk=contentnode.id), format="json", + self.viewset_url(pk=contentnode.id), + format="json", ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual(response.data["id"], contentnode.id) @@ -319,7 +333,8 @@ def test_get_contentnode__no_permssion(self): self.client.force_authenticate(user=user) with self.settings(TEST_ENV=False): response = self.client.get( - self.viewset_url(pk=contentnode.id), format="json", + self.viewset_url(pk=contentnode.id), + format="json", ) self.assertEqual(response.status_code, 404, response.content) @@ -329,7 +344,8 @@ def test_get_contentnode__unauthenticated(self): with self.settings(TEST_ENV=False): response = self.client.get( - self.viewset_url(pk=contentnode.id), format="json", + self.viewset_url(pk=contentnode.id), + format="json", ) self.assertEqual(response.status_code, 403, response.content) @@ -341,7 +357,8 @@ def test_public_get_contentnode__unauthenticated(self): with self.settings(TEST_ENV=False): response = self.client.get( - self.viewset_url(pk=contentnode.id), format="json", + self.viewset_url(pk=contentnode.id), + format="json", ) self.assertEqual(response.status_code, 403, response.content) @@ -360,19 +377,38 @@ def test_consolidate_extra_fields(self): "m": 3, "n": 6, "mastery_model": exercises.M_OF_N, - } + }, ) self.client.force_authenticate(user=user) with self.settings(TEST_ENV=False): response = self.client.get( - self.viewset_url(pk=contentnode.id), format="json", + self.viewset_url(pk=contentnode.id), + format="json", ) self.assertEqual(response.status_code, 200, response.content) - self.assertEqual(response.data["extra_fields"]["options"]["completion_criteria"]["threshold"]["m"], 3) - self.assertEqual(response.data["extra_fields"]["options"]["completion_criteria"]["threshold"]["n"], 6) - self.assertEqual(response.data["extra_fields"]["options"]["completion_criteria"]["threshold"]["mastery_model"], exercises.M_OF_N) - self.assertEqual(response.data["extra_fields"]["options"]["completion_criteria"]["model"], completion_criteria.MASTERY) + self.assertEqual( + response.data["extra_fields"]["options"]["completion_criteria"][ + "threshold" + ]["m"], + 3, + ) + self.assertEqual( + response.data["extra_fields"]["options"]["completion_criteria"][ + "threshold" + ]["n"], + 6, + ) + self.assertEqual( + response.data["extra_fields"]["options"]["completion_criteria"][ + "threshold" + ]["mastery_model"], + exercises.M_OF_N, + ) + self.assertEqual( + response.data["extra_fields"]["options"]["completion_criteria"]["model"], + completion_criteria.MASTERY, + ) def test_consolidate_extra_fields_with_mastrey_model_none(self): @@ -387,24 +423,23 @@ def test_consolidate_extra_fields_with_mastrey_model_none(self): description="India is the hottest country in the world", parent_id=channel.main_tree_id, extra_fields={ - "m": None, "n": None, "mastery_model": None, - } + }, ) self.client.force_authenticate(user=user) with self.settings(TEST_ENV=False): response = self.client.get( - self.viewset_url(pk=contentnode.id), format="json", + self.viewset_url(pk=contentnode.id), + format="json", ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual(response.data["extra_fields"], {}) class SyncTestCase(SyncTestMixin, StudioAPITestCase): - def setUp(self): super(SyncTestCase, self).setUp() self.channel = testdata.channel() @@ -436,7 +471,14 @@ def test_create_contentnode_no_permissions(self): self.channel.editors.remove(self.user) contentnode = self.contentnode_metadata response = self.sync_changes( - [generate_create_event(contentnode["id"], CONTENTNODE, contentnode, channel_id=self.channel.id)], + [ + generate_create_event( + contentnode["id"], + CONTENTNODE, + contentnode, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) with self.assertRaises(models.ContentNode.DoesNotExist): @@ -446,7 +488,14 @@ def test_create_contentnode_with_parent(self): self.channel.editors.add(self.user) contentnode = self.contentnode_metadata response = self.sync_changes( - [generate_create_event(contentnode["id"], CONTENTNODE, contentnode, channel_id=self.channel.id)], + [ + generate_create_event( + contentnode["id"], + CONTENTNODE, + contentnode, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -464,7 +513,14 @@ def test_cannot_create_contentnode(self): contentnode["parent"] = self.channel.main_tree_id response = self.sync_changes( - [generate_create_event(contentnode["id"], CONTENTNODE, contentnode, channel_id=self.channel.id)], + [ + generate_create_event( + contentnode["id"], + CONTENTNODE, + contentnode, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(len(response.data["disallowed"]), 1) try: @@ -478,8 +534,18 @@ def test_create_contentnodes(self): contentnode2 = self.contentnode_metadata response = self.sync_changes( [ - generate_create_event(contentnode1["id"], CONTENTNODE, contentnode1, channel_id=self.channel.id), - generate_create_event(contentnode2["id"], CONTENTNODE, contentnode2, channel_id=self.channel.id), + generate_create_event( + contentnode1["id"], + CONTENTNODE, + contentnode1, + channel_id=self.channel.id, + ), + generate_create_event( + contentnode2["id"], + CONTENTNODE, + contentnode2, + channel_id=self.channel.id, + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -507,10 +573,16 @@ def test_cannot_create_some_contentnodes(self): response = self.sync_changes( [ generate_create_event( - contentnode1["id"], CONTENTNODE, contentnode1, channel_id=channel1.id + contentnode1["id"], + CONTENTNODE, + contentnode1, + channel_id=channel1.id, ), generate_create_event( - contentnode2["id"], CONTENTNODE, contentnode2, channel_id=channel2.id + contentnode2["id"], + CONTENTNODE, + contentnode2, + channel_id=channel2.id, ), ], ) @@ -533,7 +605,14 @@ def test_update_contentnode(self): new_title = "This is not the old title" response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"title": new_title}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"title": new_title}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( @@ -545,7 +624,11 @@ def test_cannot_update_contentnode_parent(self): contentnode2 = models.ContentNode.objects.create(**self.contentnode_db_metadata) self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"parent": contentnode2.id})], + [ + generate_update_event( + contentnode.id, CONTENTNODE, {"parent": contentnode2.id} + ) + ], ) self.assertNotEqual( models.ContentNode.objects.get(id=contentnode.id).parent_id, contentnode2.id @@ -559,7 +642,10 @@ def test_cannot_update_no_permissions(self): response = self.sync_changes( [ generate_update_event( - contentnode.id, CONTENTNODE, {"title": new_title}, channel_id=self.channel.id + contentnode.id, + CONTENTNODE, + {"title": new_title}, + channel_id=self.channel.id, ) ], ) @@ -577,7 +663,11 @@ def test_update_descendants_contentnode(self): new_language = "es" response = self.sync_changes( - [generate_update_descendants_event(root_node.id, {"language": new_language}, channel_id=self.channel.id)], + [ + generate_update_descendants_event( + root_node.id, {"language": new_language}, channel_id=self.channel.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) @@ -589,11 +679,19 @@ def test_update_descendants_contentnode(self): def test_cannot_update_descendants_when_updating_non_topic_node(self): root_node = testdata.tree() - video_node = root_node.get_descendants().filter(kind_id=content_kinds.VIDEO).first() + video_node = ( + root_node.get_descendants().filter(kind_id=content_kinds.VIDEO).first() + ) new_language = "pt" response = self.sync_changes( - [generate_update_descendants_event(video_node.id, {"language": new_language}, channel_id=self.channel.id)], + [ + generate_update_descendants_event( + video_node.id, + {"language": new_language}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(len(response.data["errors"]), 1) @@ -610,20 +708,33 @@ def test_update_contentnode_exercise_mastery_model(self): m = 5 n = 10 response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, { - "extra_fields.options.completion_criteria.threshold.m": m, - "extra_fields.options.completion_criteria.threshold.n": n, - "extra_fields.options.completion_criteria.threshold.mastery_model": exercises.M_OF_N, - "extra_fields.options.completion_criteria.model": completion_criteria.MASTERY - }, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + { + "extra_fields.options.completion_criteria.threshold.m": m, + "extra_fields.options.completion_criteria.threshold.n": n, + "extra_fields.options.completion_criteria.threshold.mastery_model": exercises.M_OF_N, + "extra_fields.options.completion_criteria.model": completion_criteria.MASTERY, + }, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["completion_criteria"]["threshold"]["m"], m + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "completion_criteria" + ]["threshold"]["m"], + m, ) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["completion_criteria"]["threshold"]["n"], n + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "completion_criteria" + ]["threshold"]["n"], + n, ) def test_update_contentnode_exercise_mastery_model_partial(self): @@ -646,14 +757,24 @@ def test_update_contentnode_exercise_mastery_model_partial(self): # Update m and n fields m = 4 response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, { - "extra_fields.options.completion_criteria.threshold.m": m, - }, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + { + "extra_fields.options.completion_criteria.threshold.m": m, + }, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["completion_criteria"]["threshold"]["m"], m + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "completion_criteria" + ]["threshold"]["m"], + m, ) def test_update_contentnode_exercise_mastery_model_old(self): @@ -670,23 +791,42 @@ def test_update_contentnode_exercise_mastery_model_old(self): # Update m and n fields m = 4 response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, { - "extra_fields.options.completion_criteria.threshold.m": m, - }, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + { + "extra_fields.options.completion_criteria.threshold.m": m, + }, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["completion_criteria"]["threshold"]["m"], m + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "completion_criteria" + ]["threshold"]["m"], + m, ) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["completion_criteria"]["threshold"]["n"], 10 + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "completion_criteria" + ]["threshold"]["n"], + 10, ) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["completion_criteria"]["threshold"]["mastery_model"], exercises.M_OF_N + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "completion_criteria" + ]["threshold"]["mastery_model"], + exercises.M_OF_N, ) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["completion_criteria"]["model"], completion_criteria.MASTERY + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "completion_criteria" + ]["model"], + completion_criteria.MASTERY, ) def test_update_contentnode_exercise_incomplete_mastery_model_marked_complete(self): @@ -695,16 +835,23 @@ def test_update_contentnode_exercise_incomplete_mastery_model_marked_complete(se contentnode = models.ContentNode.objects.create(**metadata) response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, { - "complete": True, - }, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + { + "complete": True, + }, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) - self.assertFalse( - models.ContentNode.objects.get(id=contentnode.id).complete - ) - change = models.Change.objects.filter(channel=self.channel, change_type=UPDATED, table=CONTENTNODE).last() + self.assertFalse(models.ContentNode.objects.get(id=contentnode.id).complete) + change = models.Change.objects.filter( + channel=self.channel, change_type=UPDATED, table=CONTENTNODE + ).last() self.assertFalse(change.kwargs["mods"]["complete"]) def test_update_contentnode_extra_fields(self): @@ -712,11 +859,19 @@ def test_update_contentnode_extra_fields(self): # Update extra_fields.randomize randomize = True response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"extra_fields.randomize": randomize}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"extra_fields.randomize": randomize}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["randomize"], randomize + models.ContentNode.objects.get(id=contentnode.id).extra_fields["randomize"], + randomize, ) def test_update_contentnode_add_to_extra_fields_nested(self): @@ -724,10 +879,22 @@ def test_update_contentnode_add_to_extra_fields_nested(self): contentnode = models.ContentNode.objects.create(**metadata) # Add extra_fields.options.modality response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"extra_fields.options.modality": "QUIZ"}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"extra_fields.options.modality": "QUIZ"}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) - self.assertEqual(models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["modality"], "QUIZ") + self.assertEqual( + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "modality" + ], + "QUIZ", + ) def test_update_contentnode_remove_from_extra_fields_nested(self): metadata = self.contentnode_db_metadata @@ -739,11 +906,20 @@ def test_update_contentnode_remove_from_extra_fields_nested(self): contentnode = models.ContentNode.objects.create(**metadata) # Remove extra_fields.options.modality response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"extra_fields.options.modality": None}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"extra_fields.options.modality": None}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) with self.assertRaises(KeyError): - models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"]["modality"] + models.ContentNode.objects.get(id=contentnode.id).extra_fields["options"][ + "modality" + ] def test_update_contentnode_update_options_completion_criteria(self): metadata = self.contentnode_db_metadata @@ -765,17 +941,22 @@ def test_update_contentnode_update_options_completion_criteria(self): CONTENTNODE, { "extra_fields.options.completion_criteria.model": completion_criteria.TIME, - "extra_fields.options.completion_criteria.threshold": 10 + "extra_fields.options.completion_criteria.threshold": 10, }, - channel_id=self.channel.id + channel_id=self.channel.id, ) ], ) self.assertEqual(response.status_code, 200, response.content) c = models.ContentNode.objects.get(id=contentnode.id) - self.assertEqual(c.extra_fields["options"]["completion_criteria"]["model"], completion_criteria.TIME) - self.assertEqual(c.extra_fields["options"]["completion_criteria"]["threshold"], 10) + self.assertEqual( + c.extra_fields["options"]["completion_criteria"]["model"], + completion_criteria.TIME, + ) + self.assertEqual( + c.extra_fields["options"]["completion_criteria"]["threshold"], 10 + ) def test_update_contentnode_update_options_completion_criteria_threshold_only(self): metadata = self.contentnode_db_metadata @@ -795,18 +976,21 @@ def test_update_contentnode_update_options_completion_criteria_threshold_only(se generate_update_event( contentnode.id, CONTENTNODE, - { - "extra_fields.options.completion_criteria.threshold": 10 - }, - channel_id=self.channel.id + {"extra_fields.options.completion_criteria.threshold": 10}, + channel_id=self.channel.id, ) ], ) self.assertEqual(response.status_code, 200, response.content) c = models.ContentNode.objects.get(id=contentnode.id) - self.assertEqual(c.extra_fields["options"]["completion_criteria"]["model"], completion_criteria.TIME) - self.assertEqual(c.extra_fields["options"]["completion_criteria"]["threshold"], 10) + self.assertEqual( + c.extra_fields["options"]["completion_criteria"]["model"], + completion_criteria.TIME, + ) + self.assertEqual( + c.extra_fields["options"]["completion_criteria"]["threshold"], 10 + ) def test_update_completion_criteria_model_to_determined_by_resource_edge_case(self): metadata = self.contentnode_db_metadata @@ -816,38 +1000,44 @@ def test_update_completion_criteria_model_to_determined_by_resource_edge_case(se "completion_criteria": { "model": completion_criteria.REFERENCE, "threshold": None, - "learner_managed": False + "learner_managed": False, } } } contentnode = models.ContentNode.objects.create(**metadata) response = self.sync_changes( - [ - generate_update_event( - contentnode.id, - CONTENTNODE, - { - "complete": True, - "extra_fields.options.completion_criteria.threshold": 600, - "extra_fields.options.completion_criteria.model": completion_criteria.APPROX_TIME - }, - channel_id=self.channel.id - ), - generate_update_event( - contentnode.id, - CONTENTNODE, - { - "extra_fields.options.completion_criteria.model": completion_criteria.DETERMINED_BY_RESOURCE - }, - channel_id=self.channel.id - ) - ], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + { + "complete": True, + "extra_fields.options.completion_criteria.threshold": 600, + "extra_fields.options.completion_criteria.model": completion_criteria.APPROX_TIME, + }, + channel_id=self.channel.id, + ), + generate_update_event( + contentnode.id, + CONTENTNODE, + { + "extra_fields.options.completion_criteria.model": completion_criteria.DETERMINED_BY_RESOURCE + }, + channel_id=self.channel.id, + ), + ], ) self.assertEqual(len(response.data["errors"]), 0) updated_contentnode = models.ContentNode.objects.get(id=contentnode.id) - self.assertEqual(updated_contentnode.extra_fields["options"]["completion_criteria"]["model"], completion_criteria.DETERMINED_BY_RESOURCE) - self.assertNotIn("threshold", updated_contentnode.extra_fields["options"]["completion_criteria"]) + self.assertEqual( + updated_contentnode.extra_fields["options"]["completion_criteria"]["model"], + completion_criteria.DETERMINED_BY_RESOURCE, + ) + self.assertNotIn( + "threshold", + updated_contentnode.extra_fields["options"]["completion_criteria"], + ) def test_update_contentnode_update_options_invalid_completion_criteria(self): metadata = self.contentnode_db_metadata @@ -872,15 +1062,20 @@ def test_update_contentnode_update_options_invalid_completion_criteria(self): "complete": True, "extra_fields.options.completion_criteria.model": completion_criteria.TIME, }, - channel_id=self.channel.id + channel_id=self.channel.id, ) ], ) self.assertEqual(response.status_code, 200, response.content) c = models.ContentNode.objects.get(id=contentnode.id) - self.assertEqual(c.extra_fields["options"]["completion_criteria"]["model"], completion_criteria.REFERENCE) - self.assertEqual(c.extra_fields["options"]["completion_criteria"]["threshold"], None) + self.assertEqual( + c.extra_fields["options"]["completion_criteria"]["model"], + completion_criteria.REFERENCE, + ) + self.assertEqual( + c.extra_fields["options"]["completion_criteria"]["threshold"], None + ) def test_update_contentnode_add_multiple_metadata_labels(self): contentnode = models.ContentNode.objects.create(**self.contentnode_db_metadata) @@ -890,44 +1085,90 @@ def test_update_contentnode_add_multiple_metadata_labels(self): generate_update_event( contentnode.id, CONTENTNODE, - {"accessibility_labels.{}".format(ACCESSIBILITYCATEGORIESLIST[0]): True}, - channel_id=self.channel.id + { + "accessibility_labels.{}".format( + ACCESSIBILITYCATEGORIESLIST[0] + ): True + }, + channel_id=self.channel.id, ) ], ) self.assertEqual(response.status_code, 200, response.content) - self.assertTrue(models.ContentNode.objects.get(id=contentnode.id).accessibility_labels[ACCESSIBILITYCATEGORIESLIST[0]]) + self.assertTrue( + models.ContentNode.objects.get(id=contentnode.id).accessibility_labels[ + ACCESSIBILITYCATEGORIESLIST[0] + ] + ) response = self.sync_changes( [ generate_update_event( contentnode.id, CONTENTNODE, - {"accessibility_labels.{}".format(ACCESSIBILITYCATEGORIESLIST[1]): True}, - channel_id=self.channel.id + { + "accessibility_labels.{}".format( + ACCESSIBILITYCATEGORIESLIST[1] + ): True + }, + channel_id=self.channel.id, ) ], ) self.assertEqual(response.status_code, 200, response.content) - self.assertTrue(models.ContentNode.objects.get(id=contentnode.id).accessibility_labels[ACCESSIBILITYCATEGORIESLIST[0]]) - self.assertTrue(models.ContentNode.objects.get(id=contentnode.id).accessibility_labels[ACCESSIBILITYCATEGORIESLIST[1]]) + self.assertTrue( + models.ContentNode.objects.get(id=contentnode.id).accessibility_labels[ + ACCESSIBILITYCATEGORIESLIST[0] + ] + ) + self.assertTrue( + models.ContentNode.objects.get(id=contentnode.id).accessibility_labels[ + ACCESSIBILITYCATEGORIESLIST[1] + ] + ) def test_update_contentnode_add_multiple_nested_metadata_labels(self): contentnode = models.ContentNode.objects.create(**self.contentnode_db_metadata) # Add metadata label to categories response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"categories.{}".format(nested_subjects[0]): True}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"categories.{}".format(nested_subjects[0]): True}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) - self.assertTrue(models.ContentNode.objects.get(id=contentnode.id).categories[nested_subjects[0]]) + self.assertTrue( + models.ContentNode.objects.get(id=contentnode.id).categories[ + nested_subjects[0] + ] + ) response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"categories.{}".format(nested_subjects[1]): True}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"categories.{}".format(nested_subjects[1]): True}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) - self.assertTrue(models.ContentNode.objects.get(id=contentnode.id).categories[nested_subjects[0]]) - self.assertTrue(models.ContentNode.objects.get(id=contentnode.id).categories[nested_subjects[1]]) + self.assertTrue( + models.ContentNode.objects.get(id=contentnode.id).categories[ + nested_subjects[0] + ] + ) + self.assertTrue( + models.ContentNode.objects.get(id=contentnode.id).categories[ + nested_subjects[1] + ] + ) def test_update_contentnode_remove_metadata_label(self): metadata = self.contentnode_db_metadata @@ -940,14 +1181,20 @@ def test_update_contentnode_remove_metadata_label(self): generate_update_event( contentnode.id, CONTENTNODE, - {"accessibility_labels.{}".format(ACCESSIBILITYCATEGORIESLIST[0]): None}, - channel_id=self.channel.id + { + "accessibility_labels.{}".format( + ACCESSIBILITYCATEGORIESLIST[0] + ): None + }, + channel_id=self.channel.id, ) ], ) self.assertEqual(response.status_code, 200, response.content) with self.assertRaises(KeyError): - models.ContentNode.objects.get(id=contentnode.id).accessibility_labels[ACCESSIBILITYCATEGORIESLIST[0]] + models.ContentNode.objects.get(id=contentnode.id).accessibility_labels[ + ACCESSIBILITYCATEGORIESLIST[0] + ] def test_update_contentnode_remove_nested_metadata_label(self): metadata = self.contentnode_db_metadata @@ -956,11 +1203,20 @@ def test_update_contentnode_remove_nested_metadata_label(self): contentnode = models.ContentNode.objects.create(**self.contentnode_db_metadata) # Add metadata label to categories response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"categories.{}".format(nested_subjects[0]): None}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"categories.{}".format(nested_subjects[0]): None}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) with self.assertRaises(KeyError): - models.ContentNode.objects.get(id=contentnode.id).categories[nested_subjects[0]] + models.ContentNode.objects.get(id=contentnode.id).categories[ + nested_subjects[0] + ] def test_update_contentnode_tags(self): contentnode = models.ContentNode.objects.create(**self.contentnode_db_metadata) @@ -969,7 +1225,10 @@ def test_update_contentnode_tags(self): response = self.sync_changes( [ generate_update_event( - contentnode.id, CONTENTNODE, {"tags.{}".format(tag): True}, channel_id=self.channel.id + contentnode.id, + CONTENTNODE, + {"tags.{}".format(tag): True}, + channel_id=self.channel.id, ) ], ) @@ -985,7 +1244,10 @@ def test_update_contentnode_tags(self): response = self.sync_changes( [ generate_update_event( - contentnode.id, CONTENTNODE, {"tags.{}".format(other_tag): True}, channel_id=self.channel.id + contentnode.id, + CONTENTNODE, + {"tags.{}".format(other_tag): True}, + channel_id=self.channel.id, ) ], ) @@ -1004,7 +1266,10 @@ def test_update_contentnode_tags(self): response = self.sync_changes( [ generate_update_event( - contentnode.id, CONTENTNODE, {"tags.{}".format(other_tag): None}, channel_id=self.channel.id + contentnode.id, + CONTENTNODE, + {"tags.{}".format(other_tag): None}, + channel_id=self.channel.id, ) ], ) @@ -1028,7 +1293,10 @@ def test_update_contentnode_tag_greater_than_30_chars(self): response = self.sync_changes( [ generate_update_event( - contentnode.id, CONTENTNODE, {"tags.{}".format(tag): True}, channel_id=self.channel.id + contentnode.id, + CONTENTNODE, + {"tags.{}".format(tag): True}, + channel_id=self.channel.id, ) ], ) @@ -1044,22 +1312,39 @@ def test_update_contentnode_suggested_duration(self): new_suggested_duration = 600 response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"suggested_duration": new_suggested_duration}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"suggested_duration": new_suggested_duration}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( - models.ContentNode.objects.get(id=contentnode.id).suggested_duration, new_suggested_duration + models.ContentNode.objects.get(id=contentnode.id).suggested_duration, + new_suggested_duration, ) def test_update_contentnode_extra_fields_inherited_metadata(self): contentnode = models.ContentNode.objects.create(**self.contentnode_db_metadata) response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"extra_fields.inherited_metadata.categories": True}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"extra_fields.inherited_metadata.categories": True}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertTrue( - models.ContentNode.objects.get(id=contentnode.id).extra_fields["inherited_metadata"]["categories"] + models.ContentNode.objects.get(id=contentnode.id).extra_fields[ + "inherited_metadata" + ]["categories"] ) def test_update_contentnode_tags_dont_duplicate(self): @@ -1071,7 +1356,10 @@ def test_update_contentnode_tags_dont_duplicate(self): response = self.sync_changes( [ generate_update_event( - contentnode.id, CONTENTNODE, {"tags.{}".format(tag): True}, channel_id=self.channel.id + contentnode.id, + CONTENTNODE, + {"tags.{}".format(tag): True}, + channel_id=self.channel.id, ) ], ) @@ -1087,7 +1375,14 @@ def test_update_contentnode_tags_list(self): tag = "howzat!" response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"tags": [tag]}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"tags": [tag]}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(len(response.data["errors"]), 1) @@ -1099,10 +1394,16 @@ def test_update_contentnodes(self): response = self.sync_changes( [ generate_update_event( - contentnode1.id, CONTENTNODE, {"title": new_title}, channel_id=self.channel.id + contentnode1.id, + CONTENTNODE, + {"title": new_title}, + channel_id=self.channel.id, ), generate_update_event( - contentnode2.id, CONTENTNODE, {"title": new_title}, channel_id=self.channel.id + contentnode2.id, + CONTENTNODE, + {"title": new_title}, + channel_id=self.channel.id, ), ], ) @@ -1127,10 +1428,16 @@ def test_cannot_update_some_contentnodes(self): response = self.sync_changes( [ generate_update_event( - contentnode1.id, CONTENTNODE, {"title": new_title}, channel_id=channel1.id + contentnode1.id, + CONTENTNODE, + {"title": new_title}, + channel_id=channel1.id, ), generate_update_event( - contentnode2.id, CONTENTNODE, {"title": new_title}, channel_id=channel2.id + contentnode2.id, + CONTENTNODE, + {"title": new_title}, + channel_id=channel2.id, ), ], ) @@ -1150,7 +1457,14 @@ def test_update_contentnode_updates_last_modified(self): new_title = "This is not the old title" response = self.sync_changes( - [generate_update_event(contentnode.id, CONTENTNODE, {"title": new_title}, channel_id=self.channel.id)], + [ + generate_update_event( + contentnode.id, + CONTENTNODE, + {"title": new_title}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) updated_node = models.ContentNode.objects.get(id=contentnode.id) @@ -1160,7 +1474,11 @@ def test_delete_contentnode(self): contentnode = models.ContentNode.objects.create(**self.contentnode_db_metadata) response = self.sync_changes( - [generate_delete_event(contentnode.id, CONTENTNODE, channel_id=self.channel.id)], + [ + generate_delete_event( + contentnode.id, CONTENTNODE, channel_id=self.channel.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -1174,7 +1492,11 @@ def test_cannot_delete_contentnode_no_permissions(self): contentnode = create_and_get_contentnode(self.channel.main_tree_id) response = self.sync_changes( - [generate_delete_event(contentnode.id, CONTENTNODE, channel_id=self.channel.id)], + [ + generate_delete_event( + contentnode.id, CONTENTNODE, channel_id=self.channel.id + ) + ], ) # Return a 200 here rather than a 404. self.assertEqual(response.status_code, 200, response.content) @@ -1189,8 +1511,12 @@ def test_delete_contentnodes(self): self.sync_changes( [ - generate_delete_event(contentnode1.id, CONTENTNODE, channel_id=self.channel.id), - generate_delete_event(contentnode2.id, CONTENTNODE, channel_id=self.channel.id), + generate_delete_event( + contentnode1.id, CONTENTNODE, channel_id=self.channel.id + ), + generate_delete_event( + contentnode2.id, CONTENTNODE, channel_id=self.channel.id + ), ], ) try: @@ -1216,8 +1542,12 @@ def test_cannot_delete_some_contentnodes(self): response = self.sync_changes( [ - generate_delete_event(contentnode1.id, CONTENTNODE, channel_id=channel1.id), - generate_delete_event(contentnode2.id, CONTENTNODE, channel_id=channel2.id), + generate_delete_event( + contentnode1.id, CONTENTNODE, channel_id=channel1.id + ), + generate_delete_event( + contentnode2.id, CONTENTNODE, channel_id=channel2.id + ), ], ) self.assertEqual(len(response.data["disallowed"]), 1) @@ -1239,7 +1569,11 @@ def test_copy_contentnode(self): response = self.sync_changes( [ generate_copy_event( - new_node_id, CONTENTNODE, contentnode.id, self.channel.main_tree_id, channel_id=self.channel.id + new_node_id, + CONTENTNODE, + contentnode.id, + self.channel.main_tree_id, + channel_id=self.channel.id, ) ], ) @@ -1259,7 +1593,11 @@ def test_copy_contentnode_finalization_does_not_make_publishable(self): response = self.sync_changes( [ generate_copy_event( - new_node_id, CONTENTNODE, contentnode.id, self.channel.main_tree_id, channel_id=self.channel.id + new_node_id, + CONTENTNODE, + contentnode.id, + self.channel.main_tree_id, + channel_id=self.channel.id, ), # Save a published change for the channel, so that the finalization change will be generated # after the publish change, and we can check that it is properly not making the channel appear publishable. @@ -1278,7 +1616,11 @@ def test_cannot_copy_contentnode__source_permission(self): response = self.sync_changes( [ generate_copy_event( - new_node_id, CONTENTNODE, contentnode.id, self.channel.main_tree_id, channel_id=self.channel.id + new_node_id, + CONTENTNODE, + contentnode.id, + self.channel.main_tree_id, + channel_id=self.channel.id, ) ], ) @@ -1298,7 +1640,11 @@ def test_cannot_copy_contentnode__target_permission(self): response = self.sync_changes( [ generate_copy_event( - new_node_id, CONTENTNODE, contentnode.id, self.channel.main_tree_id, channel_id=self.channel.id + new_node_id, + CONTENTNODE, + contentnode.id, + self.channel.main_tree_id, + channel_id=self.channel.id, ) ], ) @@ -1317,7 +1663,14 @@ def test_create_contentnode_moveable(self): """ contentnode = self.contentnode_metadata response = self.sync_changes( - [generate_create_event(contentnode["id"], CONTENTNODE, contentnode, channel_id=self.channel.id)], + [ + generate_create_event( + contentnode["id"], + CONTENTNODE, + contentnode, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -1348,7 +1701,11 @@ def test_copy_contentnode_moveable(self): response = self.sync_changes( [ generate_copy_event( - new_node_id, CONTENTNODE, contentnode.id, self.channel.main_tree_id, channel_id=self.channel.id + new_node_id, + CONTENTNODE, + contentnode.id, + self.channel.main_tree_id, + channel_id=self.channel.id, ) ], ) @@ -1392,7 +1749,11 @@ def test_delete_orphanage_root(self): models.ContentNode.objects.create(**self.contentnode_db_metadata) response = self.sync_changes( - [generate_delete_event(settings.ORPHANAGE_ROOT_ID, CONTENTNODE, channel_id=self.channel.id)], + [ + generate_delete_event( + settings.ORPHANAGE_ROOT_ID, CONTENTNODE, channel_id=self.channel.id + ) + ], ) # We return 200 even when a deletion is not found, but it should # still not actually delete it. @@ -1409,10 +1770,16 @@ def test_create_prerequisites(self): response = self.sync_changes( [ generate_create_event( - [contentnode.id, prereq.id], CONTENTNODE_PREREQUISITE, {}, channel_id=self.channel.id + [contentnode.id, prereq.id], + CONTENTNODE_PREREQUISITE, + {}, + channel_id=self.channel.id, ), generate_create_event( - [postreq.id, contentnode.id], CONTENTNODE_PREREQUISITE, {}, channel_id=self.channel.id + [postreq.id, contentnode.id], + CONTENTNODE_PREREQUISITE, + {}, + channel_id=self.channel.id, ), ], ) @@ -1425,7 +1792,10 @@ def test_create_self_referential_prerequisite(self): response = self.sync_changes( [ generate_create_event( - [contentnode.id, contentnode.id], CONTENTNODE_PREREQUISITE, {}, channel_id=self.channel.id + [contentnode.id, contentnode.id], + CONTENTNODE_PREREQUISITE, + {}, + channel_id=self.channel.id, ), ], ) @@ -1441,7 +1811,10 @@ def test_create_cyclic_prerequisite(self): response = self.sync_changes( [ generate_create_event( - [prereq.id, contentnode.id], CONTENTNODE_PREREQUISITE, {}, channel_id=self.channel.id + [prereq.id, contentnode.id], + CONTENTNODE_PREREQUISITE, + {}, + channel_id=self.channel.id, ), ], ) @@ -1456,7 +1829,10 @@ def test_create_cross_tree_prerequisite(self): response = self.sync_changes( [ generate_create_event( - [contentnode.id, prereq.id], CONTENTNODE_PREREQUISITE, {}, channel_id=self.channel.id + [contentnode.id, prereq.id], + CONTENTNODE_PREREQUISITE, + {}, + channel_id=self.channel.id, ), ], ) @@ -1470,7 +1846,10 @@ def test_create_no_permission_prerequisite(self): response = self.sync_changes( [ generate_create_event( - [contentnode.id, prereq.id], CONTENTNODE_PREREQUISITE, {}, channel_id=self.channel.id + [contentnode.id, prereq.id], + CONTENTNODE_PREREQUISITE, + {}, + channel_id=self.channel.id, ), ], ) @@ -1490,10 +1869,14 @@ def test_delete_prerequisites(self): response = self.sync_changes( [ generate_delete_event( - [contentnode.id, prereq.id], CONTENTNODE_PREREQUISITE, channel_id=self.channel.id + [contentnode.id, prereq.id], + CONTENTNODE_PREREQUISITE, + channel_id=self.channel.id, ), generate_delete_event( - [postreq.id, contentnode.id], CONTENTNODE_PREREQUISITE, channel_id=self.channel.id + [postreq.id, contentnode.id], + CONTENTNODE_PREREQUISITE, + channel_id=self.channel.id, ), ], ) @@ -1511,7 +1894,9 @@ def test_delete_no_permission_prerequisite(self): response = self.sync_changes( [ generate_delete_event( - [contentnode.id, prereq.id], CONTENTNODE_PREREQUISITE, channel_id=self.channel.id + [contentnode.id, prereq.id], + CONTENTNODE_PREREQUISITE, + channel_id=self.channel.id, ), ], ) @@ -1520,7 +1905,6 @@ def test_delete_no_permission_prerequisite(self): class CRUDTestCase(StudioAPITestCase): - def setUp(self): super(CRUDTestCase, self).setUp() self.channel = testdata.channel() @@ -1551,14 +1935,17 @@ def contentnode_db_metadata(self): def test_fetch_contentnode(self): contentnode = models.ContentNode.objects.create(**self.contentnode_db_metadata) response = self.client.get( - reverse("contentnode-detail", kwargs={"pk": contentnode.id}), format="json", + reverse("contentnode-detail", kwargs={"pk": contentnode.id}), + format="json", ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual(response.data["id"], contentnode.id) def test_fetch_contentnode__by_parent(self): - channel = models.Channel.objects.create(actor_id=self.user.id, name="Test channel") + channel = models.Channel.objects.create( + actor_id=self.user.id, name="Test channel" + ) channel.editors.add(self.user) channel.save() @@ -1567,14 +1954,18 @@ def test_fetch_contentnode__by_parent(self): contentnode = models.ContentNode.objects.create(**metadata) response = self.client.get( - reverse("contentnode-list"), format="json", data={"parent": channel.main_tree_id}, + reverse("contentnode-list"), + format="json", + data={"parent": channel.main_tree_id}, ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual(len(response.data), 1) self.assertEqual(response.data[0]["id"], contentnode.id) def test_fetch_contentnode__by_node_id_channel_id(self): - channel = models.Channel.objects.create(actor_id=self.user.id, name="Test channel") + channel = models.Channel.objects.create( + actor_id=self.user.id, name="Test channel" + ) channel.editors.add(self.user) channel.save() @@ -1632,7 +2023,9 @@ def test_fetch_requisites(self): def test_create_contentnode(self): contentnode = self.contentnode_metadata response = self.client.post( - reverse("contentnode-list"), contentnode, format="json", + reverse("contentnode-list"), + contentnode, + format="json", ) self.assertEqual(response.status_code, 405, response.content) @@ -1673,7 +2066,7 @@ def test_resource_size(self): total_size = sum(files_map.values()) - self.assertEqual(response.data.get('size', 0), total_size) + self.assertEqual(response.data.get("size", 0), total_size) class AnnotationsTest(StudioAPITestCase): diff --git a/contentcuration/contentcuration/tests/viewsets/test_file.py b/contentcuration/contentcuration/tests/viewsets/test_file.py index 1d5462b6bf..b2ef65280a 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_file.py +++ b/contentcuration/contentcuration/tests/viewsets/test_file.py @@ -17,7 +17,6 @@ class SyncTestCase(SyncTestMixin, StudioAPITestCase): - @property def file_metadata(self): return { @@ -64,8 +63,12 @@ def test_cannot_create_files(self): file2 = self.file_metadata response = self.sync_changes( [ - generate_create_event(file1["id"], FILE, file1, channel_id=self.channel.id), - generate_create_event(file2["id"], FILE, file2, channel_id=self.channel.id), + generate_create_event( + file1["id"], FILE, file1, channel_id=self.channel.id + ), + generate_create_event( + file2["id"], FILE, file2, channel_id=self.channel.id + ), ], ) self.assertEqual(len(response.data["errors"]), 2) @@ -87,11 +90,16 @@ def test_update_file(self): new_preset = format_presets.VIDEO_HIGH_RES response = self.sync_changes( - [generate_update_event(file.id, FILE, {"preset": new_preset}, channel_id=self.channel.id)], + [ + generate_update_event( + file.id, FILE, {"preset": new_preset}, channel_id=self.channel.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( - models.File.objects.get(id=file.id).preset_id, new_preset, + models.File.objects.get(id=file.id).preset_id, + new_preset, ) def test_update_file_no_channel(self): @@ -100,11 +108,19 @@ def test_update_file_no_channel(self): file = models.File.objects.create(**file_metadata) response = self.sync_changes( - [generate_update_event(file.id, FILE, {"contentnode": contentnode_id}, channel_id=self.channel.id)], + [ + generate_update_event( + file.id, + FILE, + {"contentnode": contentnode_id}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( - models.File.objects.get(id=file.id).contentnode_id, contentnode_id, + models.File.objects.get(id=file.id).contentnode_id, + contentnode_id, ) def test_update_file_with_complete_contentnode(self): @@ -118,7 +134,7 @@ def test_update_file_with_complete_contentnode(self): parent=self.channel.main_tree, license_id=models.License.objects.first().id, license_description="don't do this!", - copyright_holder="Some person" + copyright_holder="Some person", ) errors = complete_except_no_file.mark_complete() complete_except_no_file.save() @@ -128,15 +144,19 @@ def test_update_file_with_complete_contentnode(self): self.assertEqual(complete_except_no_file.complete, False) self.sync_changes( - [generate_update_event(file.id, FILE, {"contentnode": complete_except_no_file.id}, channel_id=self.channel.id)], + [ + generate_update_event( + file.id, + FILE, + {"contentnode": complete_except_no_file.id}, + channel_id=self.channel.id, + ) + ], ) # We should see two Changes, one of them should be for the CONTENTNODE table self.assertEqual(models.Change.objects.count(), 2) - self.assertEqual( - models.Change.objects.filter(table=CONTENTNODE).count(), - 1 - ) + self.assertEqual(models.Change.objects.filter(table=CONTENTNODE).count(), 1) complete_except_no_file.refresh_from_db() @@ -149,11 +169,16 @@ def test_update_file_no_channel_permission(self): self.channel.editors.remove(self.user) response = self.sync_changes( - [generate_update_event(file.id, FILE, {"preset": new_preset}, channel_id=self.channel.id)], + [ + generate_update_event( + file.id, FILE, {"preset": new_preset}, channel_id=self.channel.id + ) + ], ) self.assertEqual(len(response.data["disallowed"]), 1) self.assertNotEqual( - models.File.objects.get(id=file.id).preset_id, new_preset, + models.File.objects.get(id=file.id).preset_id, + new_preset, ) def test_update_file_no_channel_edit_permission(self): @@ -164,11 +189,16 @@ def test_update_file_no_channel_edit_permission(self): self.channel.viewers.add(self.user) response = self.sync_changes( - [generate_update_event(file.id, FILE, {"preset": new_preset}, channel_id=self.channel.id)], + [ + generate_update_event( + file.id, FILE, {"preset": new_preset}, channel_id=self.channel.id + ) + ], ) self.assertEqual(len(response.data["disallowed"]), 1) self.assertNotEqual( - models.File.objects.get(id=file.id).preset_id, new_preset, + models.File.objects.get(id=file.id).preset_id, + new_preset, ) def test_update_file_no_node_permission(self): @@ -177,10 +207,18 @@ def test_update_file_no_node_permission(self): new_channel_node = new_channel.main_tree.get_descendants().first().id self.sync_changes( - [generate_update_event(file.id, FILE, {"contentnode": new_channel_node}, channel_id=self.channel.id)], + [ + generate_update_event( + file.id, + FILE, + {"contentnode": new_channel_node}, + channel_id=self.channel.id, + ) + ], ) self.assertNotEqual( - models.File.objects.get(id=file.id).contentnode, new_channel_node, + models.File.objects.get(id=file.id).contentnode, + new_channel_node, ) def test_update_file_no_assessmentitem_permission(self): @@ -194,10 +232,18 @@ def test_update_file_no_assessmentitem_permission(self): new_channel_assessmentitem = new_channel_exercise.assessment_items.first().id self.sync_changes( - [generate_update_event(file.id, FILE, {"assessment_item": new_channel_assessmentitem}, channel_id=self.channel.id)], + [ + generate_update_event( + file.id, + FILE, + {"assessment_item": new_channel_assessmentitem}, + channel_id=self.channel.id, + ) + ], ) self.assertNotEqual( - models.File.objects.get(id=file.id).assessment_item, new_channel_assessmentitem, + models.File.objects.get(id=file.id).assessment_item, + new_channel_assessmentitem, ) def test_update_files(self): @@ -208,29 +254,44 @@ def test_update_files(self): response = self.sync_changes( [ - generate_update_event(file1.id, FILE, {"preset": new_preset}, channel_id=self.channel.id), - generate_update_event(file2.id, FILE, {"preset": new_preset}, channel_id=self.channel.id), + generate_update_event( + file1.id, FILE, {"preset": new_preset}, channel_id=self.channel.id + ), + generate_update_event( + file2.id, FILE, {"preset": new_preset}, channel_id=self.channel.id + ), ], ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual( - models.File.objects.get(id=file1.id).preset_id, new_preset, + models.File.objects.get(id=file1.id).preset_id, + new_preset, ) self.assertEqual( - models.File.objects.get(id=file2.id).preset_id, new_preset, + models.File.objects.get(id=file2.id).preset_id, + new_preset, ) def test_update_file_empty(self): file = models.File.objects.create(**self.file_db_metadata) - response = self.sync_changes([generate_update_event(file.id, FILE, {}, channel_id=self.channel.id)]) + response = self.sync_changes( + [generate_update_event(file.id, FILE, {}, channel_id=self.channel.id)] + ) self.assertEqual(response.status_code, 200, response.content) def test_update_file_unwriteable_fields(self): file = models.File.objects.create(**self.file_db_metadata) response = self.sync_changes( - [generate_update_event(file.id, FILE, {"not_a_field": "not_a_value"}, channel_id=self.channel.id)], + [ + generate_update_event( + file.id, + FILE, + {"not_a_field": "not_a_value"}, + channel_id=self.channel.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) @@ -239,7 +300,9 @@ def test_delete_file(self): file = models.File.objects.create(**self.file_db_metadata) self.client.force_authenticate(user=self.user) - response = self.sync_changes([generate_delete_event(file.id, FILE, channel_id=self.channel.id)]) + response = self.sync_changes( + [generate_delete_event(file.id, FILE, channel_id=self.channel.id)] + ) self.assertEqual(response.status_code, 200, response.content) try: models.File.objects.get(id=file.id) @@ -305,7 +368,11 @@ def setUp(self): def test_cannot_create_file(self): self.client.force_authenticate(user=self.user) file = self.file_metadata - response = self.client.post(reverse("file-list"), file, format="json",) + response = self.client.post( + reverse("file-list"), + file, + format="json", + ) self.assertEqual(response.status_code, 405, response.content) try: models.File.objects.get(id=file["id"]) @@ -343,23 +410,27 @@ def setUp(self): "name": "le_studio", "file_format": file_formats.MP3, "preset": format_presets.AUDIO, - "duration": 10.123 + "duration": 10.123, } def test_required_keys(self): del self.file["name"] self.client.force_authenticate(user=self.user) response = self.client.post( - reverse("file-upload-url"), self.file, format="json", + reverse("file-upload-url"), + self.file, + format="json", ) self.assertEqual(response.status_code, 400) def test_duration_invalid(self): - self.file["duration"] = '1.23' + self.file["duration"] = "1.23" self.client.force_authenticate(user=self.user) response = self.client.post( - reverse("file-upload-url"), self.file, format="json", + reverse("file-upload-url"), + self.file, + format="json", ) self.assertEqual(response.status_code, 400) @@ -370,7 +441,9 @@ def test_duration_missing(self): self.client.force_authenticate(user=self.user) response = self.client.post( - reverse("file-upload-url"), self.file, format="json", + reverse("file-upload-url"), + self.file, + format="json", ) self.assertEqual(response.status_code, 200) @@ -381,7 +454,9 @@ def test_duration_missing_but_required(self): self.client.force_authenticate(user=self.user) response = self.client.post( - reverse("file-upload-url"), self.file, format="json", + reverse("file-upload-url"), + self.file, + format="json", ) self.assertEqual(response.status_code, 400) @@ -392,7 +467,9 @@ def test_duration_null(self): self.client.force_authenticate(user=self.user) response = self.client.post( - reverse("file-upload-url"), self.file, format="json", + reverse("file-upload-url"), + self.file, + format="json", ) self.assertEqual(response.status_code, 200) @@ -403,7 +480,9 @@ def test_duration_null_but_required(self): self.client.force_authenticate(user=self.user) response = self.client.post( - reverse("file-upload-url"), self.file, format="json", + reverse("file-upload-url"), + self.file, + format="json", ) self.assertEqual(response.status_code, 400) @@ -416,10 +495,12 @@ def test_invalid_file_format_upload(self): "name": "le_studio", "file_format": "ppx", "preset": format_presets.AUDIO, - "duration": 10.123 + "duration": 10.123, } response = self.client.post( - reverse("file-upload-url"), file, format="json", + reverse("file-upload-url"), + file, + format="json", ) self.assertEqual(response.status_code, 400) @@ -431,7 +512,7 @@ def test_invalid_preset_upload(self): "name": "le_studio", "file_format": file_formats.MP3, "preset": "invalid_preset", # Deliberately invalid - "duration": 10.123 + "duration": 10.123, } response = self.client.post(reverse("file-upload-url"), file, format="json") self.assertEqual(response.status_code, 400) @@ -440,20 +521,32 @@ def test_insufficient_storage(self): self.file["size"] = 100000000000000 self.client.force_authenticate(user=self.user) - response = self.client.post(reverse("file-upload-url"), self.file, format="json",) + response = self.client.post( + reverse("file-upload-url"), + self.file, + format="json", + ) self.assertEqual(response.status_code, 412) def test_upload_url(self): self.client.force_authenticate(user=self.user) - response = self.client.post(reverse("file-upload-url"), self.file, format="json",) + response = self.client.post( + reverse("file-upload-url"), + self.file, + format="json", + ) self.assertEqual(response.status_code, 200) file = models.File.objects.get(checksum=self.file["checksum"]) self.assertEqual(10, file.duration) def test_upload_url_doesnot_sets_contentnode(self): self.client.force_authenticate(user=self.user) - response = self.client.post(reverse("file-upload-url"), self.file, format="json",) + response = self.client.post( + reverse("file-upload-url"), + self.file, + format="json", + ) file = models.File.objects.get(checksum=self.file["checksum"]) self.assertEqual(response.status_code, 200) self.assertEqual(file.contentnode, None) @@ -463,7 +556,9 @@ def test_duration_zero(self): self.client.force_authenticate(user=self.user) response = self.client.post( - reverse("file-upload-url"), self.file, format="json", + reverse("file-upload-url"), + self.file, + format="json", ) self.assertEqual(response.status_code, 400) @@ -495,23 +590,34 @@ def _upload_file_to_contentnode(self, file_metadata=None, contentnode_id=None): to point to the contentnode. """ file = file_metadata or self._get_file_metadata() - self.client.post(reverse("file-upload-url"), file, format="json",) + self.client.post( + reverse("file-upload-url"), + file, + format="json", + ) file_from_db = models.File.objects.get(checksum=file["checksum"]) self.sync_changes( - [generate_update_event( - file_from_db.id, - FILE, - { - "contentnode": contentnode_id or self.channel.main_tree.get_descendants().first().id - }, - channel_id=self.channel.id)],) + [ + generate_update_event( + file_from_db.id, + FILE, + { + "contentnode": contentnode_id + or self.channel.main_tree.get_descendants().first().id + }, + channel_id=self.channel.id, + ) + ], + ) file_from_db.refresh_from_db() return file_from_db def _delete_file_from_contentnode(self, file_from_db): self.sync_changes( [ - generate_delete_event(file_from_db.id, FILE, channel_id=self.channel.id), + generate_delete_event( + file_from_db.id, FILE, channel_id=self.channel.id + ), ], ) @@ -534,19 +640,25 @@ def test_content_id__changes_on_upload_file_to_node(self): # Assert after new file upload, content_id changes. file.contentnode.refresh_from_db() file_contentnode_copy.refresh_from_db() - self.assertNotEqual(file.contentnode.content_id, file_contentnode_copy.content_id) + self.assertNotEqual( + file.contentnode.content_id, file_contentnode_copy.content_id + ) def test_content_id__changes_on_delete_file_from_node(self): file = self._upload_file_to_contentnode() file_contentnode_copy = file.contentnode.copy_to(target=self.channel.main_tree) # Delete file from the copied contentnode. - self._delete_file_from_contentnode(file_from_db=file_contentnode_copy.files.first()) + self._delete_file_from_contentnode( + file_from_db=file_contentnode_copy.files.first() + ) # Assert after deleting file, content_id changes. file.contentnode.refresh_from_db() file_contentnode_copy.refresh_from_db() - self.assertNotEqual(file.contentnode.content_id, file_contentnode_copy.content_id) + self.assertNotEqual( + file.contentnode.content_id, file_contentnode_copy.content_id + ) def test_content_id__doesnot_changes_on_update_original_file_node(self): file = self._upload_file_to_contentnode() @@ -585,17 +697,31 @@ def test_content_id__thumbnails_dont_update_content_id(self): thumbnail_file_meta_1 = self._get_file_metadata() thumbnail_file_meta_2 = self._get_file_metadata() - thumbnail_file_meta_1.update({"preset": format_presets.AUDIO_THUMBNAIL, "file_format": file_formats.JPEG, }) + thumbnail_file_meta_1.update( + { + "preset": format_presets.AUDIO_THUMBNAIL, + "file_format": file_formats.JPEG, + } + ) del thumbnail_file_meta_1["duration"] - thumbnail_file_meta_2.update({"preset": format_presets.AUDIO_THUMBNAIL, "file_format": file_formats.JPEG, }) + thumbnail_file_meta_2.update( + { + "preset": format_presets.AUDIO_THUMBNAIL, + "file_format": file_formats.JPEG, + } + ) del thumbnail_file_meta_2["duration"] # Upload thumbnail to original contentnode and copied contentnode. # content_id should remain same for both these nodes. original_node_content_id_before_upload = file.contentnode.content_id copied_node_content_id_before_upload = file_contentnode_copy.content_id - self._upload_file_to_contentnode(file_metadata=thumbnail_file_meta_1, contentnode_id=file.contentnode.id) - self._upload_file_to_contentnode(file_metadata=thumbnail_file_meta_2, contentnode_id=file_contentnode_copy.id) + self._upload_file_to_contentnode( + file_metadata=thumbnail_file_meta_1, contentnode_id=file.contentnode.id + ) + self._upload_file_to_contentnode( + file_metadata=thumbnail_file_meta_2, contentnode_id=file_contentnode_copy.id + ) # Assert content_id is same after uploading thumbnails to nodes. file.contentnode.refresh_from_db() @@ -603,5 +729,10 @@ def test_content_id__thumbnails_dont_update_content_id(self): original_node_content_id_after_upload = file.contentnode.content_id copied_node_content_id_after_upload = file_contentnode_copy.content_id - self.assertEqual(original_node_content_id_before_upload, original_node_content_id_after_upload) - self.assertEqual(copied_node_content_id_before_upload, copied_node_content_id_after_upload) + self.assertEqual( + original_node_content_id_before_upload, + original_node_content_id_after_upload, + ) + self.assertEqual( + copied_node_content_id_before_upload, copied_node_content_id_after_upload + ) diff --git a/contentcuration/contentcuration/tests/viewsets/test_flagged.py b/contentcuration/contentcuration/tests/viewsets/test_flagged.py index a507c5e4e9..1f2acf3ac2 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_flagged.py +++ b/contentcuration/contentcuration/tests/viewsets/test_flagged.py @@ -10,13 +10,13 @@ class CRUDTestCase(StudioAPITestCase): @property def flag_feedback_object(self): return { - 'context': {'spam': 'Spam or misleading'}, - 'contentnode_id': self.contentNode.id, - 'content_id': self.contentNode.content_id, - 'target_channel_id': self.channel.id, - 'user': self.user.id, - 'feedback_type': 'FLAGGED', - 'feedback_reason': 'Some reason provided by the user' + "context": {"spam": "Spam or misleading"}, + "contentnode_id": self.contentNode.id, + "content_id": self.contentNode.content_id, + "target_channel_id": self.channel.id, + "user": self.user.id, + "feedback_type": "FLAGGED", + "feedback_reason": "Some reason provided by the user", } def setUp(self): @@ -34,17 +34,21 @@ def test_create_flag_event(self): self.client.force_authenticate(user=self.user) flagged_content = self.flag_feedback_object response = self.client.post( - reverse("flagged-list"), flagged_content, format="json", + reverse("flagged-list"), + flagged_content, + format="json", ) self.assertEqual(response.status_code, 201, response.content) def test_create_flag_event_fails_for_flag_test_dev_feature_disabled(self): flagged_content = self.flag_feedback_object - self.user.feature_flags = {'test_dev_feature': False} + self.user.feature_flags = {"test_dev_feature": False} self.user.save() self.client.force_authenticate(user=self.user) response = self.client.post( - reverse("flagged-list"), flagged_content, format="json", + reverse("flagged-list"), + flagged_content, + format="json", ) self.assertEqual(response.status_code, 403, response.content) @@ -54,14 +58,18 @@ def test_create_flag_event_fails_for_flag_test_dev_feature_None(self): self.user.save() self.client.force_authenticate(user=self.user) response = self.client.post( - reverse("flagged-list"), flagged_content, format="json", + reverse("flagged-list"), + flagged_content, + format="json", ) self.assertEqual(response.status_code, 403, response.content) def test_create_flag_event_fails_for_unauthorized_user(self): flagged_content = self.flag_feedback_object response = self.client.post( - reverse("flagged-list"), flagged_content, format="json", + reverse("flagged-list"), + flagged_content, + format="json", ) self.assertEqual(response.status_code, 403, response.content) @@ -76,16 +84,19 @@ def test_retreive_fails_for_normal_user(self): self.client.force_authenticate(user=self.user) flag_feedback_object = FlagFeedbackEvent.objects.create( **{ - 'context': {'spam': 'Spam or misleading'}, - 'contentnode_id': self.contentNode.id, - 'content_id': self.contentNode.content_id, - 'target_channel_id': self.channel.id, - 'feedback_type': 'FLAGGED', - 'feedback_reason': 'Some reason provided by the user' + "context": {"spam": "Spam or misleading"}, + "contentnode_id": self.contentNode.id, + "content_id": self.contentNode.content_id, + "target_channel_id": self.channel.id, + "feedback_type": "FLAGGED", + "feedback_reason": "Some reason provided by the user", }, user=self.user, ) - response = self.client.get(reverse("flagged-detail", kwargs={"pk": flag_feedback_object.id}), format="json") + response = self.client.get( + reverse("flagged-detail", kwargs={"pk": flag_feedback_object.id}), + format="json", + ) self.assertEqual(response.status_code, 403, response.content) def test_list_fails_for_normal_user(self): @@ -103,32 +114,38 @@ def test_destroy_flagged_content_super_admin(self): self.client.force_authenticate(self.user) flag_feedback_object = FlagFeedbackEvent.objects.create( **{ - 'context': {'spam': 'Spam or misleading'}, - 'contentnode_id': self.contentNode.id, - 'content_id': self.contentNode.content_id, - 'target_channel_id': self.channel.id, - 'feedback_type': 'FLAGGED', - 'feedback_reason': 'Some reason provided by the user' + "context": {"spam": "Spam or misleading"}, + "contentnode_id": self.contentNode.id, + "content_id": self.contentNode.content_id, + "target_channel_id": self.channel.id, + "feedback_type": "FLAGGED", + "feedback_reason": "Some reason provided by the user", }, user=self.user, ) - response = self.client.delete(reverse("flagged-detail", kwargs={"pk": flag_feedback_object.id}), format="json") + response = self.client.delete( + reverse("flagged-detail", kwargs={"pk": flag_feedback_object.id}), + format="json", + ) self.assertEqual(response.status_code, 204, response.content) def test_destroy_flagged_content_fails_for_user_with_feature_flag_disabled(self): - self.user.feature_flags = {'test_dev_feature': False} + self.user.feature_flags = {"test_dev_feature": False} self.user.save() self.client.force_authenticate(user=self.user) flag_feedback_object = FlagFeedbackEvent.objects.create( **{ - 'context': {'spam': 'Spam or misleading'}, - 'contentnode_id': self.contentNode.id, - 'content_id': self.contentNode.content_id, - 'target_channel_id': self.channel.id, - 'feedback_type': 'FLAGGED', - 'feedback_reason': 'Some reason provided by the user' + "context": {"spam": "Spam or misleading"}, + "contentnode_id": self.contentNode.id, + "content_id": self.contentNode.content_id, + "target_channel_id": self.channel.id, + "feedback_type": "FLAGGED", + "feedback_reason": "Some reason provided by the user", }, user=self.user, ) - response = self.client.delete(reverse("flagged-detail", kwargs={"pk": flag_feedback_object.id}), format="json") + response = self.client.delete( + reverse("flagged-detail", kwargs={"pk": flag_feedback_object.id}), + format="json", + ) self.assertEqual(response.status_code, 403, response.content) diff --git a/contentcuration/contentcuration/tests/viewsets/test_invitation.py b/contentcuration/contentcuration/tests/viewsets/test_invitation.py index 22e6499a64..f044f50a99 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_invitation.py +++ b/contentcuration/contentcuration/tests/viewsets/test_invitation.py @@ -13,7 +13,6 @@ class SyncTestCase(SyncTestMixin, StudioAPITestCase): - @property def invitation_metadata(self): return { @@ -43,7 +42,15 @@ def setUp(self): def test_create_invitation(self): invitation = self.invitation_metadata response = self.sync_changes( - [generate_create_event(invitation["id"], INVITATION, invitation, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_create_event( + invitation["id"], + INVITATION, + invitation, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -56,8 +63,20 @@ def test_create_invitations(self): invitation2 = self.invitation_metadata response = self.sync_changes( [ - generate_create_event(invitation1["id"], INVITATION, invitation1, channel_id=self.channel.id, user_id=self.invited_user.id), - generate_create_event(invitation2["id"], INVITATION, invitation2, channel_id=self.channel.id, user_id=self.invited_user.id), + generate_create_event( + invitation1["id"], + INVITATION, + invitation1, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ), + generate_create_event( + invitation2["id"], + INVITATION, + invitation2, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -77,7 +96,15 @@ def test_create_invitation_no_channel_permission(self): invitation = self.invitation_metadata invitation["channel"] = new_channel.id response = self.sync_changes( - [generate_create_event(invitation["id"], INVITATION, invitation, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_create_event( + invitation["id"], + INVITATION, + invitation, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -92,7 +119,14 @@ def test_update_invitation_accept(self): self.client.force_authenticate(user=self.invited_user) response = self.sync_changes( - [generate_update_event(invitation.id, INVITATION, {"accepted": True}, user_id=self.invited_user.id)], + [ + generate_update_event( + invitation.id, + INVITATION, + {"accepted": True}, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -112,7 +146,15 @@ def test_update_invitation_revoke(self): invitation = models.Invitation.objects.create(**self.invitation_db_metadata) response = self.sync_changes( - [generate_update_event(invitation.id, INVITATION, {"revoked": True}, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_update_event( + invitation.id, + INVITATION, + {"revoked": True}, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -133,7 +175,15 @@ def test_update_invitation_invited_user_cannot_revoke(self): self.client.force_authenticate(user=self.invited_user) response = self.sync_changes( - [generate_update_event(invitation.id, INVITATION, {"revoked": True}, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_update_event( + invitation.id, + INVITATION, + {"revoked": True}, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) invitation = models.Invitation.objects.get(id=invitation.id) @@ -147,7 +197,15 @@ def test_update_invitation_invited_user_cannot_accept_revoked_invitation(self): self.client.force_authenticate(user=self.invited_user) response = self.sync_changes( - [generate_update_event(invitation.id, INVITATION, {"accepted": True}, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_update_event( + invitation.id, + INVITATION, + {"accepted": True}, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) invitation = models.Invitation.objects.get(id=invitation.id) @@ -158,7 +216,15 @@ def test_update_invitation_sender_cannot_modify_invited_user_fields(self): invitation = models.Invitation.objects.create(**self.invitation_db_metadata) response = self.sync_changes( - [generate_update_event(invitation.id, INVITATION, {"accepted": True, "declined": True}, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_update_event( + invitation.id, + INVITATION, + {"accepted": True, "declined": True}, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) invitation = models.Invitation.objects.get(id=invitation.id) @@ -170,7 +236,15 @@ def test_update_invitation_decline(self): invitation = models.Invitation.objects.create(**self.invitation_db_metadata) response = self.sync_changes( - [generate_update_event(invitation.id, INVITATION, {"declined": True}, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_update_event( + invitation.id, + INVITATION, + {"declined": True}, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -188,7 +262,15 @@ def test_update_invitation_empty(self): invitation = models.Invitation.objects.create(**self.invitation_db_metadata) response = self.sync_changes( - [generate_update_event(invitation.id, INVITATION, {}, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_update_event( + invitation.id, + INVITATION, + {}, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) @@ -198,7 +280,11 @@ def test_update_invitation_unwriteable_fields(self): response = self.sync_changes( [ generate_update_event( - invitation.id, INVITATION, {"not_a_field": "not_a_value"}, channel_id=self.channel.id, user_id=self.invited_user.id + invitation.id, + INVITATION, + {"not_a_field": "not_a_value"}, + channel_id=self.channel.id, + user_id=self.invited_user.id, ) ], ) @@ -209,7 +295,14 @@ def test_delete_invitation(self): invitation = models.Invitation.objects.create(**self.invitation_db_metadata) response = self.sync_changes( - [generate_delete_event(invitation.id, INVITATION, channel_id=self.channel.id, user_id=self.invited_user.id)], + [ + generate_delete_event( + invitation.id, + INVITATION, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -225,8 +318,18 @@ def test_delete_invitations(self): response = self.sync_changes( [ - generate_delete_event(invitation1.id, INVITATION, channel_id=self.channel.id, user_id=self.invited_user.id), - generate_delete_event(invitation2.id, INVITATION, channel_id=self.channel.id, user_id=self.invited_user.id), + generate_delete_event( + invitation1.id, + INVITATION, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ), + generate_delete_event( + invitation2.id, + INVITATION, + channel_id=self.channel.id, + user_id=self.invited_user.id, + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -273,7 +376,9 @@ def test_create_invitation(self): self.client.force_authenticate(user=self.user) invitation = self.invitation_metadata response = self.client.post( - reverse("invitation-list"), invitation, format="json", + reverse("invitation-list"), + invitation, + format="json", ) self.assertEqual(response.status_code, 405, response.content) @@ -281,7 +386,9 @@ def test_update_invitation_accept(self): invitation = models.Invitation.objects.create(**self.invitation_db_metadata) self.client.force_authenticate(user=self.invited_user) - response = self.client.post(reverse("invitation-accept", kwargs={"pk": invitation.id})) + response = self.client.post( + reverse("invitation-accept", kwargs={"pk": invitation.id}) + ) self.assertEqual(response.status_code, 200, response.content) try: invitation = models.Invitation.objects.get(id=invitation.id) @@ -322,7 +429,9 @@ def test_update_invitation_decline(self): invitation = models.Invitation.objects.create(**self.invitation_db_metadata) self.client.force_authenticate(user=self.invited_user) - response = self.client.post(reverse("invitation-decline", kwargs={"pk": invitation.id})) + response = self.client.post( + reverse("invitation-decline", kwargs={"pk": invitation.id}) + ) self.assertEqual(response.status_code, 200, response.content) try: invitation = models.Invitation.objects.get(id=invitation.id) diff --git a/contentcuration/contentcuration/tests/viewsets/test_recommendations.py b/contentcuration/contentcuration/tests/viewsets/test_recommendations.py index d45e39ed04..67c96b05b3 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_recommendations.py +++ b/contentcuration/contentcuration/tests/viewsets/test_recommendations.py @@ -10,7 +10,6 @@ class CRUDTestCase(StudioAPITestCase): - @property def topics(self): return { @@ -52,18 +51,22 @@ def recommendations_list(self): "node_id": "00000000000000000000000000000005", "main_tree_id": "2", "parent_id": "00000000000000000000000000000006", - } + }, ] def setUp(self): super(CRUDTestCase, self).setUp() - @patch("contentcuration.utils.automation_manager.AutomationManager.load_recommendations") + @patch( + "contentcuration.utils.automation_manager.AutomationManager.load_recommendations" + ) def test_recommend_success(self, mock_load_recommendations): self.client.force_authenticate(user=self.admin_user) mock_load_recommendations.return_value = self.recommendations_list - response = self.client.post(reverse("recommendations"), data=self.topics, format="json") + response = self.client.post( + reverse("recommendations"), data=self.topics, format="json" + ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual(response.json(), self.recommendations_list) @@ -73,57 +76,68 @@ def test_recommend_invalid_data_empty_data(self): error_message = "Invalid request data. Please check the required fields." invalid_data = {} - response = self.client.post(reverse("recommendations"), data=invalid_data, - format="json") + response = self.client.post( + reverse("recommendations"), data=invalid_data, format="json" + ) self.assertEqual(response.status_code, 400) - self.assertIn(error_message, response.json()['error']) + self.assertIn(error_message, response.json()["error"]) def test_recommend_invalid_data_wrong_topic_data(self): self.client.force_authenticate(user=self.admin_user) error_message = "Invalid request data. Please check the required fields." - invalid_data = {'topics': [{'ramdon_field': "random_value"}]} - response = self.client.post(reverse("recommendations"), data=invalid_data, - format="json") + invalid_data = {"topics": [{"ramdon_field": "random_value"}]} + response = self.client.post( + reverse("recommendations"), data=invalid_data, format="json" + ) self.assertEqual(response.status_code, 400) - self.assertEqual(error_message, response.json()['error']) + self.assertEqual(error_message, response.json()["error"]) - @patch("contentcuration.utils.automation_manager.AutomationManager.load_recommendations") + @patch( + "contentcuration.utils.automation_manager.AutomationManager.load_recommendations" + ) def test_recommendation_invalid_data_formats(self, mock_load_recommendations): self.client.force_authenticate(user=self.admin_user) error_message = "Invalid input provided." mock_load_recommendations.side_effect = errors.InvalidRequest(error_message) - response = self.client.post(reverse("recommendations"), data=self.topics, - format="json") + response = self.client.post( + reverse("recommendations"), data=self.topics, format="json" + ) self.assertEqual(response.status_code, 400) self.assertEqual(response.json(), {"error": error_message}) mock_load_recommendations.assert_called_once() - @patch("contentcuration.utils.automation_manager.AutomationManager.load_recommendations") + @patch( + "contentcuration.utils.automation_manager.AutomationManager.load_recommendations" + ) def test_recommendation_service_unavailable(self, mock_load_recommendations): self.client.force_authenticate(user=self.admin_user) error_message = "Recommendation service unavailable" mock_load_recommendations.side_effect = errors.ConnectionError(error_message) - response = self.client.post(reverse("recommendations"), data=self.topics, - format="json") + response = self.client.post( + reverse("recommendations"), data=self.topics, format="json" + ) self.assertEqual(response.status_code, 503) self.assertEqual(response.json(), {"error": error_message}) mock_load_recommendations.assert_called_once() - @patch("contentcuration.utils.automation_manager.AutomationManager.load_recommendations") + @patch( + "contentcuration.utils.automation_manager.AutomationManager.load_recommendations" + ) def test_recommendation_generic_error(self, mock_load_recommendations): self.client.force_authenticate(user=self.admin_user) error_message = "Unable to load recommendations" mock_load_recommendations.side_effect = errors.HttpError(error_message) - response = self.client.post(reverse("recommendations"), data=self.topics, - format="json") + response = self.client.post( + reverse("recommendations"), data=self.topics, format="json" + ) self.assertEqual(response.status_code, 500) self.assertEqual(response.content.decode(), error_message) @@ -134,13 +148,20 @@ class RecommendationsEventViewSetTestCase(StudioAPITestCase): @property def recommendations_event_object(self): return { - 'context': {'model_version': 1, 'breadcrumbs': "#Title#->Random"}, - 'contentnode_id': self.contentNode.id, - 'content_id': self.contentNode.content_id, - 'target_channel_id': self.channel.id, - 'user': self.user.id, - 'time_hidden': '2024-03-20T10:00:00Z', - 'content': [{'content_id': str(self.contentNode.content_id), 'node_id': str(self.contentNode.id), 'channel_id': str(self.channel.id), 'score': 4}] + "context": {"model_version": 1, "breadcrumbs": "#Title#->Random"}, + "contentnode_id": self.contentNode.id, + "content_id": self.contentNode.content_id, + "target_channel_id": self.channel.id, + "user": self.user.id, + "time_hidden": "2024-03-20T10:00:00Z", + "content": [ + { + "content_id": str(self.contentNode.content_id), + "node_id": str(self.contentNode.id), + "channel_id": str(self.channel.id), + "score": 4, + } + ], } def setUp(self): @@ -158,7 +179,9 @@ def setUp(self): def test_create_recommendations_event(self): recommendations_event = self.recommendations_event_object response = self.client.post( - reverse("recommendations-list"), recommendations_event, format="json", + reverse("recommendations-list"), + recommendations_event, + format="json", ) self.assertEqual(response.status_code, 201, response.content) @@ -168,85 +191,100 @@ def test_list_fails(self): def test_retrieve_fails(self): recommendations_event = RecommendationsEvent.objects.create( - context={'model_version': 1, 'breadcrumbs': "#Title#->Random"}, + context={"model_version": 1, "breadcrumbs": "#Title#->Random"}, contentnode_id=self.contentNode.id, content_id=self.contentNode.content_id, target_channel_id=self.channel.id, - time_hidden='2024-03-20T10:00:00Z', - content=[{ - 'content_id': str(self.contentNode.content_id), - 'node_id': str(self.contentNode.id), - 'channel_id': str(self.channel.id), - 'score': 4 - }], + time_hidden="2024-03-20T10:00:00Z", + content=[ + { + "content_id": str(self.contentNode.content_id), + "node_id": str(self.contentNode.id), + "channel_id": str(self.channel.id), + "score": 4, + } + ], user=self.user, ) - response = self.client.get(reverse("recommendations-detail", kwargs={"pk": recommendations_event.id}), format="json") + response = self.client.get( + reverse("recommendations-detail", kwargs={"pk": recommendations_event.id}), + format="json", + ) self.assertEqual(response.status_code, 405, response.content) def test_update_recommendations_event(self): recommendations_event = RecommendationsEvent.objects.create( - context={'model_version': 1, 'breadcrumbs': "#Title#->Random"}, + context={"model_version": 1, "breadcrumbs": "#Title#->Random"}, contentnode_id=self.contentNode.id, content_id=self.contentNode.content_id, target_channel_id=self.channel.id, - time_hidden='2024-03-20T10:00:00Z', - content=[{ - 'content_id': str(self.contentNode.content_id), - 'node_id': str(self.contentNode.id), - 'channel_id': str(self.channel.id), - 'score': 4 - }], + time_hidden="2024-03-20T10:00:00Z", + content=[ + { + "content_id": str(self.contentNode.content_id), + "node_id": str(self.contentNode.id), + "channel_id": str(self.channel.id), + "score": 4, + } + ], user=self.user, ) updated_data = self.recommendations_event_object - updated_data['context'] = {'model_version': 2, 'breadcrumbs': "#Title#->Updated"} + updated_data["context"] = { + "model_version": 2, + "breadcrumbs": "#Title#->Updated", + } response = self.client.put( reverse("recommendations-detail", kwargs={"pk": recommendations_event.id}), updated_data, - format="json" + format="json", ) self.assertEqual(response.status_code, 200, response.content) def test_partial_update_recommendations_event(self): recommendations_event = RecommendationsEvent.objects.create( - context={'model_version': 1, 'breadcrumbs': "#Title#->Random"}, + context={"model_version": 1, "breadcrumbs": "#Title#->Random"}, contentnode_id=self.contentNode.id, content_id=self.contentNode.content_id, target_channel_id=self.channel.id, - time_hidden='2024-03-20T10:00:00Z', - content=[{ - 'content_id': str(self.contentNode.content_id), - 'node_id': str(self.contentNode.id), - 'channel_id': str(self.channel.id), - 'score': 4 - }], + time_hidden="2024-03-20T10:00:00Z", + content=[ + { + "content_id": str(self.contentNode.content_id), + "node_id": str(self.contentNode.id), + "channel_id": str(self.channel.id), + "score": 4, + } + ], user=self.user, ) response = self.client.patch( reverse("recommendations-detail", kwargs={"pk": recommendations_event.id}), - {'context': {'model_version': 2}}, - format="json" + {"context": {"model_version": 2}}, + format="json", ) self.assertEqual(response.status_code, 200, response.content) def test_destroy_recommendations_event(self): recommendations_event = RecommendationsEvent.objects.create( - context={'model_version': 1, 'breadcrumbs': "#Title#->Random"}, + context={"model_version": 1, "breadcrumbs": "#Title#->Random"}, contentnode_id=self.contentNode.id, content_id=self.contentNode.content_id, target_channel_id=self.channel.id, - time_hidden='2024-03-20T10:00:00Z', - content=[{ - 'content_id': str(self.contentNode.content_id), - 'node_id': str(self.contentNode.id), - 'channel_id': str(self.channel.id), 'score': 4 - }], + time_hidden="2024-03-20T10:00:00Z", + content=[ + { + "content_id": str(self.contentNode.content_id), + "node_id": str(self.contentNode.id), + "channel_id": str(self.channel.id), + "score": 4, + } + ], user=self.user, ) response = self.client.delete( reverse("recommendations-detail", kwargs={"pk": recommendations_event.id}), - format="json" + format="json", ) self.assertEqual(response.status_code, 405, response.content) @@ -255,12 +293,12 @@ class RecommendationsInteractionEventViewSetTestCase(StudioAPITestCase): @property def recommendations_interaction_object(self): return { - 'context': {'test_key': 'test_value'}, - 'contentnode_id': self.interaction_node.id, - 'content_id': self.interaction_node.content_id, - 'feedback_type': 'IGNORED', - 'feedback_reason': '----', - 'recommendation_event_id': str(self.recommendation_event.id) + "context": {"test_key": "test_value"}, + "contentnode_id": self.interaction_node.id, + "content_id": self.interaction_node.content_id, + "feedback_type": "IGNORED", + "feedback_reason": "----", + "recommendation_event_id": str(self.recommendation_event.id), } def setUp(self): @@ -285,84 +323,105 @@ def setUp(self): target_channel_id=self.channel.id, content_id=self.node_where_import_is_initiated.content_id, contentnode_id=self.node_where_import_is_initiated.id, - context={'model_version': 1, 'breadcrumbs': "#Title#->Random"}, - time_hidden='2024-03-20T10:00:00Z', - content=[{ - 'content_id': str(self.interaction_node.content_id), - 'node_id': str(self.interaction_node.id), - 'channel_id': str(self.channel.id), - 'score': 4 - }] + context={"model_version": 1, "breadcrumbs": "#Title#->Random"}, + time_hidden="2024-03-20T10:00:00Z", + content=[ + { + "content_id": str(self.interaction_node.content_id), + "node_id": str(self.interaction_node.id), + "channel_id": str(self.channel.id), + "score": 4, + } + ], ) def test_create_recommendations_interaction(self): recommendations_interaction = self.recommendations_interaction_object response = self.client.post( - reverse("recommendations-interaction-list"), recommendations_interaction, format="json", + reverse("recommendations-interaction-list"), + recommendations_interaction, + format="json", ) self.assertEqual(response.status_code, 201, response.content) def test_list_fails(self): - response = self.client.get(reverse("recommendations-interaction-list"), format="json") + response = self.client.get( + reverse("recommendations-interaction-list"), format="json" + ) self.assertEqual(response.status_code, 405, response.content) def test_retrieve_fails(self): recommendations_interaction = RecommendationsInteractionEvent.objects.create( - context={'test_key': 'test_value'}, + context={"test_key": "test_value"}, contentnode_id=self.interaction_node.id, content_id=self.interaction_node.content_id, - feedback_type='IGNORED', - feedback_reason='----', - recommendation_event_id=self.recommendation_event.id + feedback_type="IGNORED", + feedback_reason="----", + recommendation_event_id=self.recommendation_event.id, + ) + response = self.client.get( + reverse( + "recommendations-interaction-detail", + kwargs={"pk": recommendations_interaction.id}, + ), + format="json", ) - response = self.client.get(reverse("recommendations-interaction-detail", kwargs={"pk": recommendations_interaction.id}), format="json") self.assertEqual(response.status_code, 405, response.content) def test_update_recommendations_interaction(self): recommendations_interaction = RecommendationsInteractionEvent.objects.create( - context={'test_key': 'test_value'}, + context={"test_key": "test_value"}, contentnode_id=self.interaction_node.id, content_id=self.interaction_node.content_id, - feedback_type='IGNORED', - feedback_reason='----', - recommendation_event_id=self.recommendation_event.id + feedback_type="IGNORED", + feedback_reason="----", + recommendation_event_id=self.recommendation_event.id, ) updated_data = self.recommendations_interaction_object - updated_data['feedback_type'] = 'PREVIEWED' + updated_data["feedback_type"] = "PREVIEWED" response = self.client.put( - reverse("recommendations-interaction-detail", kwargs={"pk": recommendations_interaction.id}), + reverse( + "recommendations-interaction-detail", + kwargs={"pk": recommendations_interaction.id}, + ), updated_data, - format="json" + format="json", ) self.assertEqual(response.status_code, 200, response.content) def test_partial_update_recommendations_interaction(self): recommendations_interaction = RecommendationsInteractionEvent.objects.create( - context={'test_key': 'test_value'}, + context={"test_key": "test_value"}, contentnode_id=self.interaction_node.id, content_id=self.interaction_node.content_id, - feedback_type='IGNORED', - feedback_reason='----', - recommendation_event_id=self.recommendation_event.id + feedback_type="IGNORED", + feedback_reason="----", + recommendation_event_id=self.recommendation_event.id, ) response = self.client.patch( - reverse("recommendations-interaction-detail", kwargs={"pk": recommendations_interaction.id}), - {'feedback_type': 'IMPORTED'}, - format="json" + reverse( + "recommendations-interaction-detail", + kwargs={"pk": recommendations_interaction.id}, + ), + {"feedback_type": "IMPORTED"}, + format="json", ) self.assertEqual(response.status_code, 200, response.content) def test_destroy_recommendations_interaction(self): recommendations_interaction = RecommendationsInteractionEvent.objects.create( - context={'test_key': 'test_value'}, + context={"test_key": "test_value"}, contentnode_id=self.interaction_node.id, content_id=self.interaction_node.content_id, - feedback_type='IGNORED', - feedback_reason='----', - recommendation_event_id=self.recommendation_event.id + feedback_type="IGNORED", + feedback_reason="----", + recommendation_event_id=self.recommendation_event.id, ) response = self.client.delete( - reverse("recommendations-interaction-detail", kwargs={"pk": recommendations_interaction.id}), - format="json" + reverse( + "recommendations-interaction-detail", + kwargs={"pk": recommendations_interaction.id}, + ), + format="json", ) self.assertEqual(response.status_code, 405, response.content) diff --git a/contentcuration/contentcuration/tests/viewsets/test_user.py b/contentcuration/contentcuration/tests/viewsets/test_user.py index 64f052ed9e..5e8554f35a 100644 --- a/contentcuration/contentcuration/tests/viewsets/test_user.py +++ b/contentcuration/contentcuration/tests/viewsets/test_user.py @@ -10,7 +10,6 @@ class SyncTestCase(SyncTestMixin, StudioAPITestCase): - def setUp(self): super(SyncTestCase, self).setUp() self.channel = testdata.channel() @@ -23,8 +22,20 @@ def test_create_editor_and_viewer(self): self.client.force_authenticate(user=self.user) response = self.sync_changes( [ - generate_create_event([editor.id, self.channel.id], EDITOR_M2M, {}, channel_id=self.channel.id, user_id=editor.id), - generate_create_event([viewer.id, self.channel.id], VIEWER_M2M, {}, channel_id=self.channel.id, user_id=viewer.id), + generate_create_event( + [editor.id, self.channel.id], + EDITOR_M2M, + {}, + channel_id=self.channel.id, + user_id=editor.id, + ), + generate_create_event( + [viewer.id, self.channel.id], + VIEWER_M2M, + {}, + channel_id=self.channel.id, + user_id=viewer.id, + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -39,8 +50,18 @@ def test_delete_editor_and_viewer(self): self.client.force_authenticate(user=self.user) response = self.sync_changes( [ - generate_delete_event([editor.id, self.channel.id], EDITOR_M2M, channel_id=self.channel.id, user_id=editor.id), - generate_delete_event([viewer.id, self.channel.id], VIEWER_M2M, channel_id=self.channel.id, user_id=viewer.id), + generate_delete_event( + [editor.id, self.channel.id], + EDITOR_M2M, + channel_id=self.channel.id, + user_id=editor.id, + ), + generate_delete_event( + [viewer.id, self.channel.id], + VIEWER_M2M, + channel_id=self.channel.id, + user_id=viewer.id, + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -58,14 +79,19 @@ def setUp(self): def test_fetch_user(self): self.client.force_authenticate(user=self.user) response = self.client.get( - reverse("user-detail", kwargs={"pk": self.user.id}), format="json", + reverse("user-detail", kwargs={"pk": self.user.id}), + format="json", ) self.assertEqual(response.status_code, 200, response.content) def test_no_create_user(self): self.client.force_authenticate(user=self.user) user = {} - response = self.client.post(reverse("user-list"), user, format="json",) + response = self.client.post( + reverse("user-list"), + user, + format="json", + ) self.assertEqual(response.status_code, 405, response.content) def test_admin_no_create_user(self): @@ -73,7 +99,11 @@ def test_admin_no_create_user(self): self.user.save() self.client.force_authenticate(user=self.user) user = {} - response = self.client.post(reverse("admin-users-list"), user, format="json",) + response = self.client.post( + reverse("admin-users-list"), + user, + format="json", + ) self.assertEqual(response.status_code, 405, response.content) def test_no_update_user(self): @@ -127,7 +157,9 @@ def setUp(self): def test_fetch_users(self): self.client.force_authenticate(user=self.user) response = self.client.get( - reverse("user-list"), data={"channel": self.channel.id}, format="json", + reverse("user-list"), + data={"channel": self.channel.id}, + format="json", ) self.assertEqual(response.status_code, 200, response.content) @@ -135,7 +167,9 @@ def test_fetch_users_no_permissions(self): new_channel = testdata.channel() self.client.force_authenticate(user=self.user) response = self.client.get( - reverse("user-list"), data={"channel": new_channel.id}, format="json", + reverse("user-list"), + data={"channel": new_channel.id}, + format="json", ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual(response.json(), []) diff --git a/contentcuration/contentcuration/urls.py b/contentcuration/contentcuration/urls.py index aa8fae777b..94fe8511e3 100644 --- a/contentcuration/contentcuration/urls.py +++ b/contentcuration/contentcuration/urls.py @@ -53,47 +53,77 @@ class StagingPageRedirectView(RedirectView): def get_redirect_url(self, *args, **kwargs): - channel_id = kwargs['channel_id'] - return '/channels/{}/#/staging'.format(channel_id) + channel_id = kwargs["channel_id"] + return "/channels/{}/#/staging".format(channel_id) router = routers.DefaultRouter(trailing_slash=False) -router.register(r'bookmark', BookmarkViewSet, basename="bookmark") -router.register(r'channel', ChannelViewSet) -router.register(r'channelset', ChannelSetViewSet) -router.register(r'catalog', CatalogViewSet, basename='catalog') -router.register(r'admin-channels', AdminChannelViewSet, basename='admin-channels') -router.register(r'file', FileViewSet) -router.register(r'channeluser', ChannelUserViewSet, basename="channeluser") -router.register(r'user', UserViewSet) -router.register(r'invitation', InvitationViewSet) -router.register(r'contentnode', ContentNodeViewSet) -router.register(r'assessmentitem', AssessmentItemViewSet) -router.register(r'admin-users', AdminUserViewSet, basename='admin-users') -router.register(r'clipboard', ClipboardViewSet, basename='clipboard') -router.register(r'flagged', FlagFeedbackEventViewSet, basename='flagged') -router.register(r'recommendations', RecommendationsEventViewSet, basename='recommendations') -router.register(r'recommendationsinteraction', RecommendationsInteractionEventViewSet, basename='recommendations-interaction') +router.register(r"bookmark", BookmarkViewSet, basename="bookmark") +router.register(r"channel", ChannelViewSet) +router.register(r"channelset", ChannelSetViewSet) +router.register(r"catalog", CatalogViewSet, basename="catalog") +router.register(r"admin-channels", AdminChannelViewSet, basename="admin-channels") +router.register(r"file", FileViewSet) +router.register(r"channeluser", ChannelUserViewSet, basename="channeluser") +router.register(r"user", UserViewSet) +router.register(r"invitation", InvitationViewSet) +router.register(r"contentnode", ContentNodeViewSet) +router.register(r"assessmentitem", AssessmentItemViewSet) +router.register(r"admin-users", AdminUserViewSet, basename="admin-users") +router.register(r"clipboard", ClipboardViewSet, basename="clipboard") +router.register(r"flagged", FlagFeedbackEventViewSet, basename="flagged") +router.register( + r"recommendations", RecommendationsEventViewSet, basename="recommendations" +) +router.register( + r"recommendationsinteraction", + RecommendationsInteractionEventViewSet, + basename="recommendations-interaction", +) urlpatterns = [ - re_path(r'^api/', include(router.urls)), - re_path(r'^serviceWorker.js$', pwa.ServiceWorkerView.as_view(), name="service_worker"), - re_path(r'^healthz$', views.health, name='health'), - re_path(r'^stealthz$', views.stealth, name='stealth'), - re_path(r'^api/search/', include('search.urls'), name='search'), - re_path(r'^api/probers/get_prober_channel', views.get_prober_channel, name='get_prober_channel'), - re_path(r'^api/probers/publishing_status', views.publishing_status, name='publishing_status'), - re_path(r'^api/probers/celery_worker_status', views.celery_worker_status, name='celery_worker_status'), - re_path(r'^api/probers/task_queue_status', views.task_queue_status, name='task_queue_status'), - re_path(r'^api/probers/unapplied_changes_status', views.unapplied_changes_status, name='unapplied_changes_status'), - re_path(r'^api/sync/$', SyncView.as_view(), name="sync"), - re_path(r'^api/recommendations/$', RecommendationView.as_view(), name="recommendations"), + re_path(r"^api/", include(router.urls)), + re_path( + r"^serviceWorker.js$", pwa.ServiceWorkerView.as_view(), name="service_worker" + ), + re_path(r"^healthz$", views.health, name="health"), + re_path(r"^stealthz$", views.stealth, name="stealth"), + re_path(r"^api/search/", include("search.urls"), name="search"), + re_path( + r"^api/probers/get_prober_channel", + views.get_prober_channel, + name="get_prober_channel", + ), + re_path( + r"^api/probers/publishing_status", + views.publishing_status, + name="publishing_status", + ), + re_path( + r"^api/probers/celery_worker_status", + views.celery_worker_status, + name="celery_worker_status", + ), + re_path( + r"^api/probers/task_queue_status", + views.task_queue_status, + name="task_queue_status", + ), + re_path( + r"^api/probers/unapplied_changes_status", + views.unapplied_changes_status, + name="unapplied_changes_status", + ), + re_path(r"^api/sync/$", SyncView.as_view(), name="sync"), + re_path( + r"^api/recommendations/$", RecommendationView.as_view(), name="recommendations" + ), ] # if activated, turn on django prometheus urls if "django_prometheus" in settings.INSTALLED_APPS: urlpatterns += [ - re_path('', include('django_prometheus.urls')), + re_path("", include("django_prometheus.urls")), ] @@ -102,77 +132,216 @@ def get_redirect_url(self, *args, **kwargs): # Add node api enpoints urlpatterns += [ - re_path(r'^api/get_channel_details/(?P[^/]*)$', node_views.get_channel_details, name='get_channel_details'), - re_path(r'^api/get_node_details/(?P[^/]*)$', node_views.get_node_details, name='get_node_details'), - re_path(r'^api/get_node_diff/(?P[^/]*)/(?P[^/]*)$', node_views.get_node_diff, name='get_node_diff'), - re_path(r'^api/generate_node_diff/(?P[^/]*)/(?P[^/]*)$', node_views.generate_node_diff, name='generate_node_diff'), + re_path( + r"^api/get_channel_details/(?P[^/]*)$", + node_views.get_channel_details, + name="get_channel_details", + ), + re_path( + r"^api/get_node_details/(?P[^/]*)$", + node_views.get_node_details, + name="get_node_details", + ), + re_path( + r"^api/get_node_diff/(?P[^/]*)/(?P[^/]*)$", + node_views.get_node_diff, + name="get_node_diff", + ), + re_path( + r"^api/generate_node_diff/(?P[^/]*)/(?P[^/]*)$", + node_views.generate_node_diff, + name="generate_node_diff", + ), ] # Add file api enpoints urlpatterns += [ - re_path(r'^zipcontent/(?P[^/]+)/(?P.*)', zip_views.ZipContentView.as_view(), {}, "zipcontent"), + re_path( + r"^zipcontent/(?P[^/]+)/(?P.*)", + zip_views.ZipContentView.as_view(), + {}, + "zipcontent", + ), ] # Add settings endpoints urlpatterns += [ - re_path(r'^api/delete_user_account/$', settings_views.DeleteAccountView.as_view(), name='delete_user_account'), - re_path(r'^api/export_user_data/$', settings_views.export_user_data, name='export_user_data'), - re_path(r'^api/change_password/$', settings_views.UserPasswordChangeView.as_view(), name='change_password'), - re_path(r'^api/update_user_full_name/$', settings_views.UsernameChangeView.as_view(), name='update_user_full_name'), - re_path(r'^settings/issues', settings_views.IssuesSettingsView.as_view(), name='issues_settings'), - re_path(r'^settings/request_storage', settings_views.StorageSettingsView.as_view(), name='request_storage'), - re_path(r'^policies/update', settings_views.PolicyAcceptView.as_view(), name='policy_update'), + re_path( + r"^api/delete_user_account/$", + settings_views.DeleteAccountView.as_view(), + name="delete_user_account", + ), + re_path( + r"^api/export_user_data/$", + settings_views.export_user_data, + name="export_user_data", + ), + re_path( + r"^api/change_password/$", + settings_views.UserPasswordChangeView.as_view(), + name="change_password", + ), + re_path( + r"^api/update_user_full_name/$", + settings_views.UsernameChangeView.as_view(), + name="update_user_full_name", + ), + re_path( + r"^settings/issues", + settings_views.IssuesSettingsView.as_view(), + name="issues_settings", + ), + re_path( + r"^settings/request_storage", + settings_views.StorageSettingsView.as_view(), + name="request_storage", + ), + re_path( + r"^policies/update", + settings_views.PolicyAcceptView.as_view(), + name="policy_update", + ), ] # Add internal endpoints urlpatterns += [ - re_path(r'^api/internal/authenticate_user_internal$', internal_views.authenticate_user_internal, name="authenticate_user_internal"), - re_path(r'^api/internal/check_version$', internal_views.check_version, name="check_version"), - re_path(r'^api/internal/file_diff$', internal_views.file_diff, name="file_diff"), - re_path(r'^api/internal/file_upload$', internal_views.api_file_upload, name="api_file_upload"), - re_path(r'^api/internal/publish_channel$', internal_views.api_publish_channel, name="api_publish_channel"), - re_path(r'^api/internal/check_user_is_editor$', internal_views.check_user_is_editor, name='check_user_is_editor'), - re_path(r'^api/internal/get_tree_data$', internal_views.get_tree_data, name='get_tree_data'), - re_path(r'^api/internal/get_node_tree_data$', internal_views.get_node_tree_data, name='get_node_tree_data'), - re_path(r'^api/internal/create_channel$', internal_views.api_create_channel_endpoint, name="api_create_channel"), - re_path(r'^api/internal/add_nodes$', internal_views.api_add_nodes_to_tree, name="api_add_nodes_to_tree"), - re_path(r'^api/internal/finish_channel$', internal_views.api_commit_channel, name="api_finish_channel"), - re_path(r'^api/internal/get_channel_status_bulk$', internal_views.get_channel_status_bulk, name="get_channel_status_bulk"), + re_path( + r"^api/internal/authenticate_user_internal$", + internal_views.authenticate_user_internal, + name="authenticate_user_internal", + ), + re_path( + r"^api/internal/check_version$", + internal_views.check_version, + name="check_version", + ), + re_path(r"^api/internal/file_diff$", internal_views.file_diff, name="file_diff"), + re_path( + r"^api/internal/file_upload$", + internal_views.api_file_upload, + name="api_file_upload", + ), + re_path( + r"^api/internal/publish_channel$", + internal_views.api_publish_channel, + name="api_publish_channel", + ), + re_path( + r"^api/internal/check_user_is_editor$", + internal_views.check_user_is_editor, + name="check_user_is_editor", + ), + re_path( + r"^api/internal/get_tree_data$", + internal_views.get_tree_data, + name="get_tree_data", + ), + re_path( + r"^api/internal/get_node_tree_data$", + internal_views.get_node_tree_data, + name="get_node_tree_data", + ), + re_path( + r"^api/internal/create_channel$", + internal_views.api_create_channel_endpoint, + name="api_create_channel", + ), + re_path( + r"^api/internal/add_nodes$", + internal_views.api_add_nodes_to_tree, + name="api_add_nodes_to_tree", + ), + re_path( + r"^api/internal/finish_channel$", + internal_views.api_commit_channel, + name="api_finish_channel", + ), + re_path( + r"^api/internal/get_channel_status_bulk$", + internal_views.get_channel_status_bulk, + name="get_channel_status_bulk", + ), ] # Add admin endpoints urlpatterns += [ - re_path(r'^api/send_custom_email/$', admin_views.send_custom_email, name='send_custom_email'), + re_path( + r"^api/send_custom_email/$", + admin_views.send_custom_email, + name="send_custom_email", + ), ] -urlpatterns += [re_path(r'^jsreverse/$', django_js_reverse_views.urls_js, name='js_reverse')] +urlpatterns += [ + re_path(r"^jsreverse/$", django_js_reverse_views.urls_js, name="js_reverse") +] # I18N Endpoints urlpatterns += [ - re_path(r'^i18n/', include('django.conf.urls.i18n')), + re_path(r"^i18n/", include("django.conf.urls.i18n")), ] # Include all URLS prefixed by language urlpatterns += i18n_patterns( - re_path(r'^$', views.base, name='base'), + re_path(r"^$", views.base, name="base"), re_path(r"^i18n/setlang/$", views.set_language, name="set_language"), - re_path(r'^channels/$', views.channel_list, name='channels'), + re_path(r"^channels/$", views.channel_list, name="channels"), # Redirect deprecated staging URL to new URL - re_path(r'^channels/(?P[^/]{32})/staging/$', StagingPageRedirectView.as_view(), name='staging_redirect'), - re_path(r'^channels/(?P[^/]{32})/$', views.channel, name='channel'), - re_path(r'^accounts/login/$', registration_views.login, name='login'), - re_path(r'^accounts/logout/$', registration_views.logout, name='logout'), - re_path(r'^accounts/request_activation_link/$', registration_views.request_activation_link, name='request_activation_link'), + re_path( + r"^channels/(?P[^/]{32})/staging/$", + StagingPageRedirectView.as_view(), + name="staging_redirect", + ), + re_path(r"^channels/(?P[^/]{32})/$", views.channel, name="channel"), + re_path(r"^accounts/login/$", registration_views.login, name="login"), + re_path(r"^accounts/logout/$", registration_views.logout, name="logout"), + re_path( + r"^accounts/request_activation_link/$", + registration_views.request_activation_link, + name="request_activation_link", + ), re_path(r"^accounts/$", views.accounts, name="accounts"), - path(r'accounts/password/reset/', registration_views.UserPasswordResetView.as_view(), name='auth_password_reset'), - path(r'accounts/password/reset/confirm///', registration_views.UserPasswordResetConfirmView.as_view(), name='auth_password_reset_confirm'), - re_path(r'^accounts/register/$', registration_views.UserRegistrationView.as_view(), name='register'), - re_path(r'^activate/(?P[-:\w]+)/$', registration_views.UserActivationView.as_view(), name='registration_activate'), - re_path(r'^api/send_invitation_email/$', registration_views.send_invitation_email, name='send_invitation_email'), - re_path(r'^new/accept_invitation/(?P[^/]+)/', registration_views.new_user_redirect, name="accept_invitation_and_registration"), - re_path(r'^api/deferred_user_space_by_kind/$', registration_views.deferred_user_space_by_kind, name="deferred_user_space_by_kind"), - re_path(r'^api/deferred_user_api_token/$', registration_views.deferred_user_api_token, name="deferred_user_api_token"), - re_path(r'^settings/$', settings_views.settings, name='settings'), - re_path(r'^administration/', admin_views.administration, name='administration'), - re_path(r'^manifest.webmanifest$', pwa.ManifestView.as_view(), name="manifest"), + path( + r"accounts/password/reset/", + registration_views.UserPasswordResetView.as_view(), + name="auth_password_reset", + ), + path( + r"accounts/password/reset/confirm///", + registration_views.UserPasswordResetConfirmView.as_view(), + name="auth_password_reset_confirm", + ), + re_path( + r"^accounts/register/$", + registration_views.UserRegistrationView.as_view(), + name="register", + ), + re_path( + r"^activate/(?P[-:\w]+)/$", + registration_views.UserActivationView.as_view(), + name="registration_activate", + ), + re_path( + r"^api/send_invitation_email/$", + registration_views.send_invitation_email, + name="send_invitation_email", + ), + re_path( + r"^new/accept_invitation/(?P[^/]+)/", + registration_views.new_user_redirect, + name="accept_invitation_and_registration", + ), + re_path( + r"^api/deferred_user_space_by_kind/$", + registration_views.deferred_user_space_by_kind, + name="deferred_user_space_by_kind", + ), + re_path( + r"^api/deferred_user_api_token/$", + registration_views.deferred_user_api_token, + name="deferred_user_api_token", + ), + re_path(r"^settings/$", settings_views.settings, name="settings"), + re_path(r"^administration/", admin_views.administration, name="administration"), + re_path(r"^manifest.webmanifest$", pwa.ManifestView.as_view(), name="manifest"), ) diff --git a/contentcuration/contentcuration/utils/automation_manager.py b/contentcuration/contentcuration/utils/automation_manager.py index cda43aeb37..7f03ced95e 100644 --- a/contentcuration/contentcuration/utils/automation_manager.py +++ b/contentcuration/contentcuration/utils/automation_manager.py @@ -16,7 +16,9 @@ def __init__(self): self.instance = self.factory.create_backend() self.adapter = RecommendationsAdapter(self.instance) - def generate_embeddings(self, channel_id: str, nodes: List[Union[ContentNode, PublicContentNode]]): + def generate_embeddings( + self, channel_id: str, nodes: List[Union[ContentNode, PublicContentNode]] + ): """ Generates embeddings for the given list of nodes. This process is async. @@ -27,7 +29,9 @@ def generate_embeddings(self, channel_id: str, nodes: List[Union[ContentNode, Pu """ return self.adapter.embed_content(channel_id, nodes) - def load_recommendations(self, request_data: Dict[str, Any], override_threshold=False): + def load_recommendations( + self, request_data: Dict[str, Any], override_threshold=False + ): """ Loads recommendations for the given topic(s). diff --git a/contentcuration/contentcuration/utils/cache.py b/contentcuration/contentcuration/utils/cache.py index 436d02c17e..7d06dfb00b 100644 --- a/contentcuration/contentcuration/utils/cache.py +++ b/contentcuration/contentcuration/utils/cache.py @@ -118,12 +118,14 @@ def redis_retry(func): @see django_redis.client.default.DefaultClient """ + def redis_retry_func(*args, **kwargs): try: return func(*args, **kwargs) except _main_exceptions: # try one more time return func(*args, **kwargs) + return redis_retry_func @@ -161,7 +163,7 @@ def redis_client(self): :rtype: redis.client.StrictRedis """ redis_client = None - cache_client = getattr(self.cache, 'client', None) + cache_client = getattr(self.cache, "client", None) if isinstance(cache_client, DefaultClient): redis_client = cache_client.get_client(write=True) return redis_client @@ -210,7 +212,10 @@ def set_size(self, size): return self.cache_set(self.size_key, size) def set_modified(self, modified): - return self.cache_set(self.modified_key, modified.isoformat() if isinstance(modified, datetime) else modified) + return self.cache_set( + self.modified_key, + modified.isoformat() if isinstance(modified, datetime) else modified, + ) def reset_modified(self, modified): """ diff --git a/contentcuration/contentcuration/utils/celery/app.py b/contentcuration/contentcuration/utils/celery/app.py index 7eec641dc9..796dcbce26 100644 --- a/contentcuration/contentcuration/utils/celery/app.py +++ b/contentcuration/contentcuration/utils/celery/app.py @@ -10,7 +10,7 @@ class CeleryApp(Celery): task_cls = CeleryTask - result_cls = 'contentcuration.utils.celery.tasks:CeleryAsyncResult' + result_cls = "contentcuration.utils.celery.tasks:CeleryAsyncResult" _result_cls = None def on_init(self): @@ -40,7 +40,7 @@ def get_queued_tasks(self, queue_name="celery"): for task in tasks: try: j = json.loads(task) - body = json.loads(base64.b64decode(j['body'])) + body = json.loads(base64.b64decode(j["body"])) decoded_tasks.append(body) except (TypeError, json.JSONDecodeError, AttributeError): pass diff --git a/contentcuration/contentcuration/utils/celery/tasks.py b/contentcuration/contentcuration/utils/celery/tasks.py index df0f282304..b9885605e6 100644 --- a/contentcuration/contentcuration/utils/celery/tasks.py +++ b/contentcuration/contentcuration/utils/celery/tasks.py @@ -23,6 +23,7 @@ class ProgressTracker: """ Helper to track task progress """ + __slots__ = ("task_id", "send_event", "total", "progress", "last_reported_progress") def __init__(self, task_id, send_event): @@ -71,6 +72,7 @@ def get_task_model(ref, task_id): :rtype: contentcuration.models.CustomTaskMetadata """ from contentcuration.models import CustomTaskMetadata + try: return CustomTaskMetadata.objects.get(task_id=task_id) except CustomTaskMetadata.DoesNotExist: @@ -87,9 +89,9 @@ def generate_task_signature(task_name, task_kwargs=None, channel_id=None): :rtype: str """ md5 = hashlib.md5() - md5.update(task_name.encode('utf-8')) - md5.update((task_kwargs or '').encode('utf-8')) - md5.update((channel_id or '').encode('utf-8')) + md5.update(task_name.encode("utf-8")) + md5.update((task_kwargs or "").encode("utf-8")) + md5.update((channel_id or "").encode("utf-8")) return md5.hexdigest() @@ -104,6 +106,7 @@ def my_task(self): progress.increment() ``` """ + # by default, celery does not track task starting itself track_started = True send_events = True @@ -122,7 +125,9 @@ def on_failure(self, exc, task_id, args, kwargs, einfo): """ Report task failures to sentry as long as the exception is not one of the types for which it should `autoretry` """ - if not getattr(self, "autoretry_for", None) or not isinstance(exc, self.autoretry_for): + if not getattr(self, "autoretry_for", None) or not isinstance( + exc, self.autoretry_for + ): report_exception(exc) def shadow_name(self, *args, **kwargs): @@ -150,7 +155,7 @@ def generate_signature(self, kwargs): return generate_task_signature( self.name, task_kwargs=self.backend.encode(prepared_kwargs), - channel_id=prepared_kwargs.get('channel_id') + channel_id=prepared_kwargs.get("channel_id"), ) @contextlib.contextmanager @@ -162,7 +167,7 @@ def _lock_signature(self, signature): """ with transaction.atomic(): # compute crc32 to turn signature into integer - key2 = zlib.crc32(signature.encode('utf-8')) + key2 = zlib.crc32(signature.encode("utf-8")) advisory_lock(TASK_LOCK, key2=key2) yield @@ -173,8 +178,10 @@ def find_ids(self, signature): :rtype: django.db.models.query.QuerySet """ from contentcuration.models import CustomTaskMetadata - return CustomTaskMetadata.objects.filter(signature=signature)\ - .values_list("task_id", flat=True) + + return CustomTaskMetadata.objects.filter(signature=signature).values_list( + "task_id", flat=True + ) def find_incomplete_ids(self, signature): """ @@ -183,9 +190,12 @@ def find_incomplete_ids(self, signature): :rtype: django.db.models.query.QuerySet """ from django_celery_results.models import TaskResult + # Get the filtered task_ids from CustomTaskMetadata model filtered_task_ids = self.find_ids(signature) - task_objects_ids = TaskResult.objects.filter(task_id__in=filtered_task_ids, status__in=states.UNREADY_STATES).values_list("task_id", flat=True) + task_objects_ids = TaskResult.objects.filter( + task_id__in=filtered_task_ids, status__in=states.UNREADY_STATES + ).values_list("task_id", flat=True) return task_objects_ids def fetch(self, task_id): @@ -212,7 +222,7 @@ def enqueue(self, user, **kwargs): if user is None or not isinstance(user, User): raise TypeError("All tasks must be assigned to a user.") - signature = kwargs.pop('signature', None) + signature = kwargs.pop("signature", None) if signature is None: signature = self.generate_signature(kwargs) @@ -220,14 +230,13 @@ def enqueue(self, user, **kwargs): prepared_kwargs = self._prepare_kwargs(kwargs) channel_id = prepared_kwargs.get("channel_id") custom_task_result = CustomTaskMetadata( - task_id=task_id, - user=user, - signature=signature, - channel_id=channel_id + task_id=task_id, user=user, signature=signature, channel_id=channel_id ) custom_task_result.save() - logging.info(f"Enqueuing task:id {self.name}:{task_id} for user:channel {user.pk}:{channel_id} | {signature}") + logging.info( + f"Enqueuing task:id {self.name}:{task_id} for user:channel {user.pk}:{channel_id} | {signature}" + ) # returns a CeleryAsyncResult async_result = self.apply_async( @@ -265,9 +274,13 @@ def fetch_or_enqueue(self, user, **kwargs): async_result = self.fetch(task_ids[0]) # double check if async_result and async_result.status not in states.READY_STATES: - logging.info(f"Fetched matching task {self.name} for user {user.pk} with id {async_result.id} | {signature}") + logging.info( + f"Fetched matching task {self.name} for user {user.pk} with id {async_result.id} | {signature}" + ) return async_result - logging.info(f"Didn't fetch matching task {self.name} for user {user.pk} | {signature}") + logging.info( + f"Didn't fetch matching task {self.name} for user {user.pk} | {signature}" + ) kwargs.update(signature=signature) return self.enqueue(user, **kwargs) @@ -279,15 +292,22 @@ def requeue(self, **kwargs): :rtype: CeleryAsyncResult """ from contentcuration.models import CustomTaskMetadata + request = self.request if request is None: - raise NotImplementedError("This method should only be called within the execution of a task") + raise NotImplementedError( + "This method should only be called within the execution of a task" + ) task_kwargs = request.kwargs.copy() task_kwargs.update(kwargs) signature = self.generate_signature(kwargs) custom_task_metadata = CustomTaskMetadata.objects.get(task_id=request.id) - logging.info(f"Re-queuing task {self.name} for user {custom_task_metadata.user.pk} from {request.id} | {signature}") - return self.enqueue(custom_task_metadata.user, signature=signature, **task_kwargs) + logging.info( + f"Re-queuing task {self.name} for user {custom_task_metadata.user.pk} from {request.id} | {signature}" + ) + return self.enqueue( + custom_task_metadata.user, signature=signature, **task_kwargs + ) def revoke(self, exclude_task_ids=None, **kwargs): """ @@ -297,6 +317,7 @@ def revoke(self, exclude_task_ids=None, **kwargs): :return: The number of tasks revoked """ from django_celery_results.models import TaskResult + signature = self.generate_signature(kwargs) task_ids = self.find_incomplete_ids(signature) diff --git a/contentcuration/contentcuration/utils/cloud_storage.py b/contentcuration/contentcuration/utils/cloud_storage.py index bf60b51bb3..a331226905 100644 --- a/contentcuration/contentcuration/utils/cloud_storage.py +++ b/contentcuration/contentcuration/utils/cloud_storage.py @@ -28,7 +28,6 @@ def create_backend(self) -> Backend: class CloudStorage(Backend): - def connect(self) -> None: return super().connect() @@ -36,5 +35,5 @@ def make_request(self, request) -> CloudStorageResponse: return super().make_request(request) @classmethod - def _create_instance(cls) -> 'CloudStorage': + def _create_instance(cls) -> "CloudStorage": return cls() diff --git a/contentcuration/contentcuration/utils/csv_writer.py b/contentcuration/contentcuration/utils/csv_writer.py index a5fecd4ba7..0ceaefcd7c 100644 --- a/contentcuration/contentcuration/utils/csv_writer.py +++ b/contentcuration/contentcuration/utils/csv_writer.py @@ -21,89 +21,135 @@ # Formatting helpers -def _format_size(num, suffix='B'): + +def _format_size(num, suffix="B"): """ Format sizes """ - for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: + for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 - return "%.1f%s%s" % (num, 'Yi', suffix) + return "%.1f%s%s" % (num, "Yi", suffix) def generate_user_csv_filename(user): directory = os.path.join(settings.CSV_ROOT, "users") if not os.path.exists(directory): os.makedirs(directory) - email = re.sub(r'([^\s\w]|_)+', '', user.email.split('.')[0]) - return os.path.join(directory, "{}{}- {} {} Data.csv".format(email, user.id, user.first_name, user.last_name)) + email = re.sub(r"([^\s\w]|_)+", "", user.email.split(".")[0]) + return os.path.join( + directory, + "{}{}- {} {} Data.csv".format(email, user.id, user.first_name, user.last_name), + ) def _write_user_row(file, writer, domain): - filename = '{}.{}'.format(file['checksum'], file['file_format__extension']) - writer.writerow([ - file['channel_name'] or _("No Channel"), - file['contentnode__title'] or _("No resource"), - next((k[1] for k in content_kinds.choices if k[0] == file['contentnode__kind_id']), ''), - file['original_filename'], - _format_size(file['file_size'] or 0), - generate_storage_url(filename), - file['contentnode__description'], - file['contentnode__author'], - file['language__readable_name'] or file['contentnode__language__readable_name'], - file['contentnode__license__license_name'], - file['contentnode__license_description'], - file['contentnode__copyright_holder'], - ]) + filename = "{}.{}".format(file["checksum"], file["file_format__extension"]) + writer.writerow( + [ + file["channel_name"] or _("No Channel"), + file["contentnode__title"] or _("No resource"), + next( + ( + k[1] + for k in content_kinds.choices + if k[0] == file["contentnode__kind_id"] + ), + "", + ), + file["original_filename"], + _format_size(file["file_size"] or 0), + generate_storage_url(filename), + file["contentnode__description"], + file["contentnode__author"], + file["language__readable_name"] + or file["contentnode__language__readable_name"], + file["contentnode__license__license_name"], + file["contentnode__license_description"], + file["contentnode__copyright_holder"], + ] + ) def write_user_csv(user, path=None): csv_path = path or generate_user_csv_filename(user) - mode = 'wb' + mode = "wb" encoding = None # On Python 3, if sys.version_info.major == 3: - mode = 'w' - encoding = 'utf-8' + mode = "w" + encoding = "utf-8" with io.open(csv_path, mode, encoding=encoding) as csvfile: - writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL) - - writer.writerow([_('Channel'), _('Title'), _('Kind'), _('Filename'), _('File Size'), _('URL'), _('Description'), - _('Author'), _('Language'), _('License'), _('License Description'), _('Copyright Holder')]) + writer = csv.writer(csvfile, delimiter=",", quoting=csv.QUOTE_MINIMAL) + + writer.writerow( + [ + _("Channel"), + _("Title"), + _("Kind"), + _("Filename"), + _("File Size"), + _("URL"), + _("Description"), + _("Author"), + _("Language"), + _("License"), + _("License Description"), + _("Copyright Holder"), + ] + ) domain = Site.objects.get(pk=1).domain # Get all user files channel_query = Channel.objects.filter( - Q(main_tree__tree_id=OuterRef("contentnode__tree_id")) | - Q(trash_tree__tree_id=OuterRef("contentnode__tree_id")) + Q(main_tree__tree_id=OuterRef("contentnode__tree_id")) + | Q(trash_tree__tree_id=OuterRef("contentnode__tree_id")) ) - user_files = user.files \ - .select_related('language', 'contentnode', 'file_format') \ - .annotate(channel_name=Subquery(channel_query.values_list("name", flat=True)[:1])) \ + user_files = ( + user.files.select_related("language", "contentnode", "file_format") + .annotate( + channel_name=Subquery(channel_query.values_list("name", flat=True)[:1]) + ) .values( - 'channel_name', - 'original_filename', - 'file_size', - 'checksum', - 'file_format__extension', - 'language__readable_name', - 'contentnode__title', - 'contentnode__language__readable_name', - 'contentnode__license__license_name', - 'contentnode__kind_id', - 'contentnode__description', - 'contentnode__author', - 'contentnode__provider', - 'contentnode__aggregator', - 'contentnode__license_description', - 'contentnode__copyright_holder', + "channel_name", + "original_filename", + "file_size", + "checksum", + "file_format__extension", + "language__readable_name", + "contentnode__title", + "contentnode__language__readable_name", + "contentnode__license__license_name", + "contentnode__kind_id", + "contentnode__description", + "contentnode__author", + "contentnode__provider", + "contentnode__aggregator", + "contentnode__license_description", + "contentnode__copyright_holder", ) + ) for file in user_files: _write_user_row(file, writer, domain) for file in user.staged_files.all(): file_size = _format_size(file.file_size) - writer.writerow([_("No Channel"), _("No Resource"), "", _("Staged File"), file_size, "", "", "", "", "", "", ""]) + writer.writerow( + [ + _("No Channel"), + _("No Resource"), + "", + _("Staged File"), + file_size, + "", + "", + "", + "", + "", + "", + "", + ] + ) return csv_path diff --git a/contentcuration/contentcuration/utils/db_tools.py b/contentcuration/contentcuration/utils/db_tools.py index 820f9ba1a2..e433b16590 100644 --- a/contentcuration/contentcuration/utils/db_tools.py +++ b/contentcuration/contentcuration/utils/db_tools.py @@ -43,7 +43,7 @@ def create_user(email, password, first_name, last_name, admin=False): user.set_password(password) user.first_name = first_name user.last_name = last_name - print( + print( # noqa: T201 "User created (email: {}, password: {}, admin: {})".format( email, password, admin ) @@ -67,7 +67,9 @@ def create_channel( domain = uuid.uuid5(uuid.NAMESPACE_DNS, name) node_id = uuid.uuid5(domain, name) - channel, _new = Channel.objects.get_or_create(actor_id=editors[0].id, pk=node_id.hex) + channel, _new = Channel.objects.get_or_create( + actor_id=editors[0].id, pk=node_id.hex + ) channel.name = name channel.description = description @@ -158,7 +160,9 @@ def create_topic(title, parent, description=""): all_questions = (question_1, question_2, question_3, question_4) -def create_exercise(title, parent, license_id, description="", user=None, empty=False, complete=True): +def create_exercise( + title, parent, license_id, description="", user=None, empty=False, complete=True +): mastery_model = { "mastery_model": exercises.M_OF_N, "randomize": False, @@ -177,7 +181,7 @@ def create_exercise(title, parent, license_id, description="", user=None, empty= license_description=LICENSE_DESCRIPTION, extra_fields=mastery_model, sort_order=get_sort_order(), - complete=complete + complete=complete, ) exercise.save() @@ -212,7 +216,15 @@ def create_question(node, question, question_type, answers): def create_contentnode( - title, parent, file, kind_id, license_id, description="", user=None, tags=None, complete=True + title, + parent, + file, + kind_id, + license_id, + description="", + user=None, + tags=None, + complete=True, ): copyright_holder = "Someone Somewhere" if user: @@ -318,7 +330,8 @@ def insert_into_default_db(self): ContentNode.objects.build_tree_nodes(self._root_node), batch_size=BATCH_SIZE ) AssessmentItem.objects.bulk_create( - self.assessment_items, batch_size=BATCH_SIZE, + self.assessment_items, + batch_size=BATCH_SIZE, ) File.objects.bulk_create(self.files, batch_size=BATCH_SIZE) if self.tags: @@ -347,14 +360,23 @@ def recurse_and_generate(self, parent_id, levels): return children def generate_topic(self, parent_id=None): - data = self.contentnode_data(kind=content_kinds.TOPIC, parent_id=parent_id,) + data = self.contentnode_data( + kind=content_kinds.TOPIC, + parent_id=parent_id, + ) self.generate_file( - data["id"], "Topic Thumbnail", format_presets.TOPIC_THUMBNAIL, "png", + data["id"], + "Topic Thumbnail", + format_presets.TOPIC_THUMBNAIL, + "png", ) return data def generate_document(self, parent_id): - data = self.contentnode_data(kind=content_kinds.DOCUMENT, parent_id=parent_id,) + data = self.contentnode_data( + kind=content_kinds.DOCUMENT, + parent_id=parent_id, + ) self.generate_file( data["id"], "Sample Document", format_presets.DOCUMENT, file_formats.PDF ) @@ -367,9 +389,15 @@ def generate_document(self, parent_id): return data def generate_video(self, parent_id): - data = self.contentnode_data(kind=content_kinds.VIDEO, parent_id=parent_id,) + data = self.contentnode_data( + kind=content_kinds.VIDEO, + parent_id=parent_id, + ) self.generate_file( - data["id"], "Sample Video", format_presets.VIDEO_HIGH_RES, file_formats.MP4, + data["id"], + "Sample Video", + format_presets.VIDEO_HIGH_RES, + file_formats.MP4, ) self.generate_file( data["id"], @@ -386,7 +414,10 @@ def generate_video(self, parent_id): return data def generate_audio(self, parent_id): - data = self.contentnode_data(kind=content_kinds.AUDIO, parent_id=parent_id,) + data = self.contentnode_data( + kind=content_kinds.AUDIO, + parent_id=parent_id, + ) self.generate_file( data["id"], "Sample Audio", format_presets.AUDIO, file_formats.MP3 ) @@ -399,7 +430,10 @@ def generate_audio(self, parent_id): return data def generate_html5(self, parent_id): - data = self.contentnode_data(kind=content_kinds.HTML5, parent_id=parent_id,) + data = self.contentnode_data( + kind=content_kinds.HTML5, + parent_id=parent_id, + ) self.generate_file( data["id"], "Sample HTML", format_presets.HTML5_ZIP, file_formats.HTML5 ) @@ -465,7 +499,11 @@ def generate_leaf(self, parent_id): return node def generate_file( - self, contentnode_id, display_name, preset_id, extension, + self, + contentnode_id, + display_name, + preset_id, + extension, ): if extension not in self.temporary_files: with tempfile.NamedTemporaryFile( @@ -501,7 +539,9 @@ def generate_file( ) self.files.append(file) - def contentnode_data(self, parent_id=None, kind=None, extra_fields=None, complete=True): + def contentnode_data( + self, parent_id=None, kind=None, extra_fields=None, complete=True + ): return { "extra_fields": extra_fields or {}, "content_id": uuid4_hex(), @@ -521,11 +561,24 @@ def contentnode_data(self, parent_id=None, kind=None, extra_fields=None, complet "parent_id": parent_id, "kind_id": kind, "complete": complete, - "resource_types": {c: True for c in choices(RESOURCETYPELIST, k=random.randint(1, 2))}, - "learning_activities": {c: True for c in choices(LEARNINGACTIVITIESLIST, k=random.randint(1, 3))}, - "accessibility_labels": {c: True for c in choices(ACCESSIBILITYCATEGORIESLIST, k=random.randint(1, 3))}, - "grade_levels": {c: True for c in choices(LEVELSLIST, k=random.randint(1, 2))}, - "categories": {c: True for c in choices(SUBJECTSLIST, k=random.randint(1, 10))}, - "learner_needs": {c: True for c in choices(NEEDSLIST, k=random.randint(1, 5))}, + "resource_types": { + c: True for c in choices(RESOURCETYPELIST, k=random.randint(1, 2)) + }, + "learning_activities": { + c: True for c in choices(LEARNINGACTIVITIESLIST, k=random.randint(1, 3)) + }, + "accessibility_labels": { + c: True + for c in choices(ACCESSIBILITYCATEGORIESLIST, k=random.randint(1, 3)) + }, + "grade_levels": { + c: True for c in choices(LEVELSLIST, k=random.randint(1, 2)) + }, + "categories": { + c: True for c in choices(SUBJECTSLIST, k=random.randint(1, 10)) + }, + "learner_needs": { + c: True for c in choices(NEEDSLIST, k=random.randint(1, 5)) + }, "suggested_duration": random.randint(5, 5000), } diff --git a/contentcuration/contentcuration/utils/files.py b/contentcuration/contentcuration/utils/files.py index a5d8361e8c..4067e843d6 100644 --- a/contentcuration/contentcuration/utils/files.py +++ b/contentcuration/contentcuration/utils/files.py @@ -21,7 +21,9 @@ THUMBNAIL_WIDTH = 400 -def create_file_from_contents(contents, ext=None, node=None, preset_id=None, uploaded_by=None): +def create_file_from_contents( + contents, ext=None, node=None, preset_id=None, uploaded_by=None +): checksum, _, path = write_raw_content_to_storage(contents, ext=ext) result = File( @@ -30,7 +32,7 @@ def create_file_from_contents(contents, ext=None, node=None, preset_id=None, upl checksum=checksum, preset_id=preset_id, contentnode=node, - uploaded_by=uploaded_by + uploaded_by=uploaded_by, ) result.file_on_disk.name = path result.save() @@ -54,7 +56,9 @@ def get_file_diff(files): def check_file_url(f): filepath = generate_object_storage_name(os.path.splitext(f)[0], f) - url = "/".join([settings.AWS_S3_ENDPOINT_URL, settings.AWS_S3_BUCKET_NAME, filepath]) + url = "/".join( + [settings.AWS_S3_ENDPOINT_URL, settings.AWS_S3_BUCKET_NAME, filepath] + ) resp = session.head(url) if resp.status_code != 200: ret.append(f) @@ -66,7 +70,9 @@ def check_file_url(f): return ret -def duplicate_file(file_object, node=None, assessment_item=None, preset_id=None, save=True): +def duplicate_file( + file_object, node=None, assessment_item=None, preset_id=None, save=True +): if not file_object: return None file_copy = copy.copy(file_object) @@ -81,11 +87,11 @@ def duplicate_file(file_object, node=None, assessment_item=None, preset_id=None, def get_thumbnail_encoding(filename, dimension=THUMBNAIL_WIDTH): """ - Generates a base64 encoding for a thumbnail - Args: - filename (str): thumbnail to generate encoding from (must be in storage already) - dimension (int, optional): desired width of thumbnail. Defaults to 400. - Returns base64 encoding of resized thumbnail + Generates a base64 encoding for a thumbnail + Args: + filename (str): thumbnail to generate encoding from (must be in storage already) + dimension (int, optional): desired width of thumbnail. Defaults to 400. + Returns base64 encoding of resized thumbnail """ if filename.startswith("data:image"): @@ -99,10 +105,10 @@ def get_thumbnail_encoding(filename, dimension=THUMBNAIL_WIDTH): try: if not filename.startswith(settings.STATIC_ROOT): filename = generate_object_storage_name(checksum, filename) - inbuffer = default_storage.open(filename, 'rb') + inbuffer = default_storage.open(filename, "rb") else: - inbuffer = open(filename, 'rb') + inbuffer = open(filename, "rb") if not inbuffer: raise AssertionError @@ -118,7 +124,9 @@ def get_thumbnail_encoding(filename, dimension=THUMBNAIL_WIDTH): image.thumbnail(thumbnail_size, Image.LANCZOS) image.save(outbuffer, image_format) - return "data:image/{};base64,{}".format(ext[1:], base64.b64encode(outbuffer.getvalue()).decode('utf-8')) + return "data:image/{};base64,{}".format( + ext[1:], base64.b64encode(outbuffer.getvalue()).decode("utf-8") + ) finally: # Try to close the inbuffer if it has been created try: @@ -128,25 +136,25 @@ def get_thumbnail_encoding(filename, dimension=THUMBNAIL_WIDTH): outbuffer.close() -BASE64_REGEX_STR = r'data:image\/([A-Za-z]*);base64,((?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)*)' +BASE64_REGEX_STR = r"data:image\/([A-Za-z]*);base64,((?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)*)" BASE64_REGEX = re.compile(BASE64_REGEX_STR, flags=re.IGNORECASE) def get_base64_encoding(text): - """ get_base64_encoding: Get the first base64 match or None - Args: - text (str): text to check for base64 encoding - Returns: First match in text + """get_base64_encoding: Get the first base64 match or None + Args: + text (str): text to check for base64 encoding + Returns: First match in text """ return BASE64_REGEX.search(text) def write_base64_to_file(encoding, fpath_out): - """ write_base64_to_file: Convert base64 image to file - Args: - encoding (str): base64 encoded string - fpath_out (str): path to file to write - Returns: None + """write_base64_to_file: Convert base64 image to file + Args: + encoding (str): base64 encoded string + fpath_out (str): path to file to write + Returns: None """ encoding_match = get_base64_encoding(encoding) @@ -155,23 +163,30 @@ def write_base64_to_file(encoding, fpath_out): raise AssertionError("Error writing to file: Invalid base64 encoding") with open(fpath_out, "wb") as target_file: - target_file.write(base64.decodebytes(encoding_match.group(2).encode('utf-8'))) + target_file.write(base64.decodebytes(encoding_match.group(2).encode("utf-8"))) -def create_thumbnail_from_base64(encoding, file_format_id=file_formats.PNG, preset_id=None, uploaded_by=None): +def create_thumbnail_from_base64( + encoding, file_format_id=file_formats.PNG, preset_id=None, uploaded_by=None +): """ - Takes encoding and makes it into a file object - Args: - encoding (str): base64 to make into an image file - file_format_id (str): what the extension should be - preset_id (str): what the preset should be - uploaded_by (): who uploaded the image - Returns object with the file_on_disk being the image file generated from the encoding + Takes encoding and makes it into a file object + Args: + encoding (str): base64 to make into an image file + file_format_id (str): what the extension should be + preset_id (str): what the preset should be + uploaded_by (): who uploaded the image + Returns object with the file_on_disk being the image file generated from the encoding """ fd, path = tempfile.mkstemp() try: write_base64_to_file(encoding, path) - with open(path, 'rb') as tf: - return create_file_from_contents(tf.read(), ext=file_format_id, preset_id=preset_id, uploaded_by=uploaded_by) + with open(path, "rb") as tf: + return create_file_from_contents( + tf.read(), + ext=file_format_id, + preset_id=preset_id, + uploaded_by=uploaded_by, + ) finally: os.close(fd) diff --git a/contentcuration/contentcuration/utils/garbage_collect.py b/contentcuration/contentcuration/utils/garbage_collect.py index 805a114231..5f771b825a 100755 --- a/contentcuration/contentcuration/utils/garbage_collect.py +++ b/contentcuration/contentcuration/utils/garbage_collect.py @@ -45,7 +45,9 @@ def __exit__(self, exc_type, exc_val, exc_tb): def get_deleted_chefs_root(): - deleted_chefs_node, _new = ContentNode.objects.get_or_create(pk=settings.DELETED_CHEFS_ROOT_ID, kind_id=content_kinds.TOPIC) + deleted_chefs_node, _new = ContentNode.objects.get_or_create( + pk=settings.DELETED_CHEFS_ROOT_ID, kind_id=content_kinds.TOPIC + ) return deleted_chefs_node @@ -59,7 +61,11 @@ def _clean_up_files(contentnode_ids): files_on_storage = files.values_list("file_on_disk", flat=True) for disk_file_path in files_on_storage: - is_other_node_pointing = Exists(File.objects.filter(file_on_disk=disk_file_path).exclude(contentnode__in=contentnode_ids)) + is_other_node_pointing = Exists( + File.objects.filter(file_on_disk=disk_file_path).exclude( + contentnode__in=contentnode_ids + ) + ) if not is_other_node_pointing: default_storage.delete(disk_file_path) @@ -80,11 +86,17 @@ def clean_up_soft_deleted_users(): used by any other channel. - all user invitations. """ - account_deletion_buffer_delta = now() - datetime.timedelta(days=settings.ACCOUNT_DELETION_BUFFER) - user_latest_deletion_time_subquery = Subquery(UserHistory.objects.filter(user_id=OuterRef( - "id"), action=user_history.DELETION).values("performed_at").order_by("-performed_at")[:1]) - users_to_delete = User.objects.annotate(latest_deletion_time=user_latest_deletion_time_subquery).filter( - deleted=True, latest_deletion_time__lt=account_deletion_buffer_delta) + account_deletion_buffer_delta = now() - datetime.timedelta( + days=settings.ACCOUNT_DELETION_BUFFER + ) + user_latest_deletion_time_subquery = Subquery( + UserHistory.objects.filter(user_id=OuterRef("id"), action=user_history.DELETION) + .values("performed_at") + .order_by("-performed_at")[:1] + ) + users_to_delete = User.objects.annotate( + latest_deletion_time=user_latest_deletion_time_subquery + ).filter(deleted=True, latest_deletion_time__lt=account_deletion_buffer_delta) for user in users_to_delete: user.hard_delete_user_related_data() @@ -148,15 +160,17 @@ def clean_up_feature_flags(): """ current_flag_keys = feature_flags.SCHEMA.get("properties", {}).keys() existing_flag_keys = ( - User.objects - .annotate(key=JSONObjectKeys("feature_flags")) + User.objects.annotate(key=JSONObjectKeys("feature_flags")) .values_list("key", flat=True) .distinct() ) - for remove_flag in (set(existing_flag_keys) - set(current_flag_keys)): - User.objects.filter(feature_flags__has_key=remove_flag) \ - .update(feature_flags=CombinedExpression(F("feature_flags"), "-", Value(remove_flag))) + for remove_flag in set(existing_flag_keys) - set(current_flag_keys): + User.objects.filter(feature_flags__has_key=remove_flag).update( + feature_flags=CombinedExpression( + F("feature_flags"), "-", Value(remove_flag) + ) + ) def clean_up_tasks(): @@ -166,8 +180,12 @@ def clean_up_tasks(): with DisablePostDeleteSignal(): date_cutoff = now() - datetime.timedelta(days=7) - tasks_to_delete = TaskResult.objects.filter(date_done__lt=date_cutoff, status__in=states.READY_STATES) - CustomTaskMetadata.objects.filter(task_id__in=tasks_to_delete.values_list("task_id", flat=True)).delete() + tasks_to_delete = TaskResult.objects.filter( + date_done__lt=date_cutoff, status__in=states.READY_STATES + ) + CustomTaskMetadata.objects.filter( + task_id__in=tasks_to_delete.values_list("task_id", flat=True) + ).delete() count, _ = tasks_to_delete.delete() logging.info("Deleted {} completed task(s) from the task table".format(count)) @@ -185,10 +203,15 @@ def clean_up_stale_files(last_modified=None): with DisablePostDeleteSignal(): files_to_clean_up = File.objects.filter( - contentnode__isnull=True, assessment_item__isnull=True, slideshow_slide__isnull=True, modified__lt=last_modified + contentnode__isnull=True, + assessment_item__isnull=True, + slideshow_slide__isnull=True, + modified__lt=last_modified, ) - files_to_clean_up_slice = files_to_clean_up.values_list("id", flat=True)[0:CHUNKSIZE] + files_to_clean_up_slice = files_to_clean_up.values_list("id", flat=True)[ + 0:CHUNKSIZE + ] count = 0 @@ -197,6 +220,12 @@ def clean_up_stale_files(last_modified=None): this_count = len(files_to_clean_up_slice) count += this_count - files_to_clean_up_slice = files_to_clean_up.values_list("id", flat=True)[0:CHUNKSIZE] + files_to_clean_up_slice = files_to_clean_up.values_list("id", flat=True)[ + 0:CHUNKSIZE + ] - logging.info("Files with a modified date older than {} were deleted. Deleted {} file(s).".format(last_modified, count)) + logging.info( + "Files with a modified date older than {} were deleted. Deleted {} file(s).".format( + last_modified, count + ) + ) diff --git a/contentcuration/contentcuration/utils/gcs_storage.py b/contentcuration/contentcuration/utils/gcs_storage.py index 9ec21a3886..5c4a425aec 100644 --- a/contentcuration/contentcuration/utils/gcs_storage.py +++ b/contentcuration/contentcuration/utils/gcs_storage.py @@ -18,7 +18,9 @@ MAX_RETRY_TIME = 60 # seconds -def _create_default_client(service_account_credentials_path=settings.GCS_STORAGE_SERVICE_ACCOUNT_KEY_PATH): +def _create_default_client( + service_account_credentials_path=settings.GCS_STORAGE_SERVICE_ACCOUNT_KEY_PATH, +): if service_account_credentials_path: return Client.from_service_account_json(service_account_credentials_path) return Client() @@ -121,6 +123,7 @@ def save(self, name, fobj, max_length=None, blob_object=None): # determine the current file's mimetype based on the name # import determine_content_type lazily in here, so we don't get into an infinite loop with circular dependencies from contentcuration.utils.storage_common import determine_content_type + content_type = determine_content_type(name) # force the current file to be at file location 0, to @@ -132,7 +135,8 @@ def save(self, name, fobj, max_length=None, blob_object=None): return name blob.upload_from_file( - fobj, content_type=content_type, + fobj, + content_type=content_type, ) # Close StringIO object and discard memory buffer if created @@ -215,10 +219,14 @@ def _is_file_empty(fobj): class CompositeGCS(Storage): def __init__(self): self.backends = [] - self.backends.append(GoogleCloudStorage(_create_default_client(), settings.AWS_S3_BUCKET_NAME)) + self.backends.append( + GoogleCloudStorage(_create_default_client(), settings.AWS_S3_BUCKET_NAME) + ) # Only add the studio-content bucket (the production bucket) if we're not in production if settings.SITE_ID != settings.PRODUCTION_SITE_ID: - self.backends.append(GoogleCloudStorage(Client.create_anonymous_client(), "studio-content")) + self.backends.append( + GoogleCloudStorage(Client.create_anonymous_client(), "studio-content") + ) def _get_writeable_backend(self): """ @@ -241,7 +249,7 @@ def _get_readable_backend(self, name): def get_client(self): return self._get_writeable_backend().get_client() - def open(self, name, mode='rb'): + def open(self, name, mode="rb"): return self._get_readable_backend(name).open(name, mode) def save(self, name, content, max_length=None): diff --git a/contentcuration/contentcuration/utils/import_tools.py b/contentcuration/contentcuration/utils/import_tools.py index e662b75fc4..0a187ce4c9 100644 --- a/contentcuration/contentcuration/utils/import_tools.py +++ b/contentcuration/contentcuration/utils/import_tools.py @@ -27,27 +27,29 @@ from contentcuration.utils.garbage_collect import get_deleted_chefs_root -CHANNEL_TABLE = 'content_channelmetadata' -NODE_TABLE = 'content_contentnode' -ASSESSMENTMETADATA_TABLE = 'content_assessmentmetadata' -FILE_TABLE = 'content_file' -TAG_TABLE = 'content_contenttag' -NODE_TAG_TABLE = 'content_contentnode_tags' -LICENSE_TABLE = 'content_license' +CHANNEL_TABLE = "content_channelmetadata" +NODE_TABLE = "content_contentnode" +ASSESSMENTMETADATA_TABLE = "content_assessmentmetadata" +FILE_TABLE = "content_file" +TAG_TABLE = "content_contenttag" +NODE_TAG_TABLE = "content_contentnode_tags" +LICENSE_TABLE = "content_license" NODE_COUNT = 0 FILE_COUNT = 0 TAG_COUNT = 0 ANSWER_FIELD_MAP = { - exercises.SINGLE_SELECTION: 'radio 1', - exercises.MULTIPLE_SELECTION: 'radio 1', - exercises.INPUT_QUESTION: 'numeric-input 1', + exercises.SINGLE_SELECTION: "radio 1", + exercises.MULTIPLE_SELECTION: "radio 1", + exercises.INPUT_QUESTION: "numeric-input 1", } log = logging.getLogger(__name__) -def import_channel(source_id, target_id=None, download_url=None, editor=None, logger=None): +def import_channel( + source_id, target_id=None, download_url=None, editor=None, logger=None +): """ Import a channel from another Studio instance. This can be used to copy online Studio channels into local machines for development, @@ -78,7 +80,9 @@ def import_channel(source_id, target_id=None, download_url=None, editor=None, lo conn = None try: if download_url: - response = requests.get('{}/content/databases/{}.sqlite3'.format(download_url, source_id)) + response = requests.get( + "{}/content/databases/{}.sqlite3".format(download_url, source_id) + ) for chunk in response: tempf.write(chunk) else: @@ -132,25 +136,33 @@ def import_channel(source_id, target_id=None, download_url=None, editor=None, lo os.unlink(tempf.name) # Print stats - log.info("\n\nChannel has been imported (time: {ms})\n".format(ms=datetime.datetime.now() - start)) + log.info( + "\n\nChannel has been imported (time: {ms})\n".format( + ms=datetime.datetime.now() - start + ) + ) log.info("\n\n********** IMPORT COMPLETE **********\n\n") def create_channel(cursor, target_id, editor): - """ create_channel: Create channel at target id - Args: - cursor (sqlite3.Connection): connection to export database - target_id (str): channel_id to write to - Returns: channel model created and id of root node + """create_channel: Create channel at target id + Args: + cursor (sqlite3.Connection): connection to export database + target_id (str): channel_id to write to + Returns: channel model created and id of root node """ id, name, description, thumbnail, root_pk, version, last_updated = cursor.execute( - 'SELECT id, name, description, thumbnail, root_pk, version, last_updated FROM {table}' - .format(table=CHANNEL_TABLE)).fetchone() - channel, is_new = models.Channel.objects.get_or_create(pk=target_id, actor_id=editor.id) + "SELECT id, name, description, thumbnail, root_pk, version, last_updated FROM {table}".format( + table=CHANNEL_TABLE + ) + ).fetchone() + channel, is_new = models.Channel.objects.get_or_create( + pk=target_id, actor_id=editor.id + ) channel.name = name channel.description = description channel.thumbnail = write_to_thumbnail_file(thumbnail) - channel.thumbnail_encoding = {'base64': thumbnail, 'points': [], 'zoom': 0} + channel.thumbnail_encoding = {"base64": thumbnail, "points": [], "zoom": 0} channel.version = version channel.save() log.info("\tCreated channel {} with name {}".format(target_id, name)) @@ -158,18 +170,25 @@ def create_channel(cursor, target_id, editor): def write_to_thumbnail_file(raw_thumbnail): - """ write_to_thumbnail_file: Convert base64 thumbnail to file - Args: - raw_thumbnail (str): base64 encoded thumbnail - Returns: thumbnail filename + """write_to_thumbnail_file: Convert base64 thumbnail to file + Args: + raw_thumbnail (str): base64 encoded thumbnail + Returns: thumbnail filename """ - if raw_thumbnail and isinstance(raw_thumbnail, str) and raw_thumbnail != "" and 'static' not in raw_thumbnail: + if ( + raw_thumbnail + and isinstance(raw_thumbnail, str) + and raw_thumbnail != "" + and "static" not in raw_thumbnail + ): with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tempf: try: tempf.close() write_base64_to_file(raw_thumbnail, tempf.name) - with open(tempf.name, 'rb') as tf: - fobj = create_file_from_contents(tf.read(), ext="png", preset_id=format_presets.CHANNEL_THUMBNAIL) + with open(tempf.name, "rb") as tf: + fobj = create_file_from_contents( + tf.read(), ext="png", preset_id=format_presets.CHANNEL_THUMBNAIL + ) return str(fobj) finally: tempf.close() @@ -177,25 +196,44 @@ def write_to_thumbnail_file(raw_thumbnail): def create_nodes(cursor, target_id, parent, indent=1, download_url=None): - """ create_channel: Create channel at target id - Args: - cursor (sqlite3.Connection): connection to export database - target_id (str): channel_id to write to - parent (models.ContentNode): node's parent - indent (int): How far to indent print statements - Returns: newly created node + """create_channel: Create channel at target id + Args: + cursor (sqlite3.Connection): connection to export database + target_id (str): channel_id to write to + parent (models.ContentNode): node's parent + indent (int): How far to indent print statements + Returns: newly created node """ # Read database rows that match parent - parent_query = "parent_id=\'{}\'".format(parent.node_id) + parent_query = "parent_id='{}'".format(parent.node_id) - sql_command = 'SELECT id, title, content_id, description, sort_order, '\ - 'license_owner, author, license_id, kind, coach_content, lang_id FROM {table} WHERE {query} ORDER BY sort_order;'\ - .format(table=NODE_TABLE, query=parent_query) + sql_command = ( + "SELECT id, title, content_id, description, sort_order, " + "license_owner, author, license_id, kind, coach_content, lang_id FROM {table} WHERE {query} ORDER BY sort_order;".format( + table=NODE_TABLE, query=parent_query + ) + ) query = cursor.execute(sql_command).fetchall() # Parse through rows and create models - for id, title, content_id, description, sort_order, license_owner, author, license_id, kind, coach_content, lang_id in query: - log.info("{indent} {id} ({title} - {kind})...".format(indent=" |" * indent, id=id, title=title, kind=kind)) + for ( + id, + title, + content_id, + description, + sort_order, + license_owner, + author, + license_id, + kind, + coach_content, + lang_id, + ) in query: + log.info( + "{indent} {id} ({title} - {kind})...".format( + indent=" |" * indent, id=id, title=title, kind=kind + ) + ) # Determine role role = roles.LEARNER @@ -203,7 +241,9 @@ def create_nodes(cursor, target_id, parent, indent=1, download_url=None): role = roles.COACH # Determine extra_fields - assessment_query = "SELECT mastery_model, randomize FROM {table} WHERE contentnode_id='{node}'".format(table=ASSESSMENTMETADATA_TABLE, node=id) + assessment_query = "SELECT mastery_model, randomize FROM {table} WHERE contentnode_id='{node}'".format( + table=ASSESSMENTMETADATA_TABLE, node=id + ) result = cursor.execute(assessment_query).fetchone() extra_fields = result[0] if result else {} if isinstance(extra_fields, str): @@ -242,9 +282,13 @@ def create_nodes(cursor, target_id, parent, indent=1, download_url=None): # Handle foreign key references (children, files, tags) if kind == content_kinds.TOPIC: - create_nodes(cursor, target_id, node, indent=indent + 1, download_url=download_url) + create_nodes( + cursor, target_id, node, indent=indent + 1, download_url=download_url + ) elif kind == content_kinds.EXERCISE: - create_assessment_items(cursor, node, indent=indent + 1, download_url=download_url) + create_assessment_items( + cursor, node, indent=indent + 1, download_url=download_url + ) create_files(cursor, node, indent=indent + 1, download_url=download_url) create_tags(cursor, node, target_id, indent=indent + 1) @@ -252,11 +296,11 @@ def create_nodes(cursor, target_id, parent, indent=1, download_url=None): def retrieve_license(cursor, license_id): - """ retrieve_license_name: Get license based on id from exported db - Args: - cursor (sqlite3.Connection): connection to export database - license_id (str): id of license on exported db - Returns: license model matching the name and the associated license description + """retrieve_license_name: Get license based on id from exported db + Args: + cursor (sqlite3.Connection): connection to export database + license_id (str): id of license on exported db + Returns: license model matching the name and the associated license description """ # Handle no license being assigned if license_id is None or license_id == "": @@ -264,24 +308,40 @@ def retrieve_license(cursor, license_id): # Return license that matches name name, description = cursor.execute( - 'SELECT license_name, license_description FROM {table} WHERE id={id}'.format(table=LICENSE_TABLE, id=license_id) + "SELECT license_name, license_description FROM {table} WHERE id={id}".format( + table=LICENSE_TABLE, id=license_id + ) ).fetchone() return models.License.objects.get(license_name=name), description -def download_file(filename, download_url=None, contentnode=None, assessment_item=None, preset=None, file_size=None, lang_id=None): +def download_file( + filename, + download_url=None, + contentnode=None, + assessment_item=None, + preset=None, + file_size=None, + lang_id=None, +): checksum, extension = os.path.splitext(filename) - extension = extension.lstrip('.') + extension = extension.lstrip(".") filepath = models.generate_object_storage_name(checksum, filename) # Download file if it hasn't already been downloaded if download_url and not default_storage.exists(filepath): buffer = BytesIO() - response = requests.get('{}/content/storage/{}/{}/{}'.format(download_url, filename[0], filename[1], filename)) + response = requests.get( + "{}/content/storage/{}/{}/{}".format( + download_url, filename[0], filename[1], filename + ) + ) for chunk in response: buffer.write(chunk) - checksum, _, filepath = write_raw_content_to_storage(buffer.getvalue(), ext=extension) + checksum, _, filepath = write_raw_content_to_storage( + buffer.getvalue(), ext=extension + ) buffer.close() # Save values to new file object @@ -298,56 +358,77 @@ def download_file(filename, download_url=None, contentnode=None, assessment_item def create_files(cursor, contentnode, indent=0, download_url=None): - """ create_files: Get license - Args: - cursor (sqlite3.Connection): connection to export database - contentnode (models.ContentNode): node file references - indent (int): How far to indent print statements - Returns: None + """create_files: Get license + Args: + cursor (sqlite3.Connection): connection to export database + contentnode (models.ContentNode): node file references + indent (int): How far to indent print statements + Returns: None """ # Parse database for files referencing content node and make file models - sql_command = 'SELECT checksum, extension, file_size, contentnode_id, '\ - 'lang_id, preset FROM {table} WHERE contentnode_id=\'{id}\';'\ - .format(table=FILE_TABLE, id=contentnode.node_id) + sql_command = ( + "SELECT checksum, extension, file_size, contentnode_id, " + "lang_id, preset FROM {table} WHERE contentnode_id='{id}';".format( + table=FILE_TABLE, id=contentnode.node_id + ) + ) query = cursor.execute(sql_command).fetchall() for checksum, extension, file_size, contentnode_id, lang_id, preset in query: filename = "{}.{}".format(checksum, extension) - log.info("{indent} * FILE {filename}...".format(indent=" |" * indent, filename=filename)) + log.info( + "{indent} * FILE {filename}...".format( + indent=" |" * indent, filename=filename + ) + ) try: - download_file(filename, download_url=download_url, contentnode=contentnode, preset=preset, file_size=file_size, lang_id=lang_id) + download_file( + filename, + download_url=download_url, + contentnode=contentnode, + preset=preset, + file_size=file_size, + lang_id=lang_id, + ) except IOError as e: log.warning("\b FAILED (check logs for more details)") - sys.stderr.write("Restoration Process Error: Failed to save file object {}: {}".format(filename, os.strerror(e.errno))) + sys.stderr.write( + "Restoration Process Error: Failed to save file object {}: {}".format( + filename, os.strerror(e.errno) + ) + ) continue def create_tags(cursor, contentnode, target_id, indent=0): - """ create_tags: Create tags associated with node - Args: - cursor (sqlite3.Connection): connection to export database - contentnode (models.ContentNode): node file references - target_id (str): channel_id to write to - indent (int): How far to indent print statements - Returns: None + """create_tags: Create tags associated with node + Args: + cursor (sqlite3.Connection): connection to export database + contentnode (models.ContentNode): node file references + target_id (str): channel_id to write to + indent (int): How far to indent print statements + Returns: None """ # Parse database for files referencing content node and make file models - sql_command = 'SELECT ct.id, ct.tag_name FROM {cnttable} cnt '\ - 'JOIN {cttable} ct ON cnt.contenttag_id = ct.id ' \ - 'WHERE cnt.contentnode_id=\'{id}\';'\ - .format( + sql_command = ( + "SELECT ct.id, ct.tag_name FROM {cnttable} cnt " + "JOIN {cttable} ct ON cnt.contenttag_id = ct.id " + "WHERE cnt.contentnode_id='{id}';".format( cnttable=NODE_TAG_TABLE, cttable=TAG_TABLE, id=contentnode.node_id, ) + ) query = cursor.execute(sql_command).fetchall() # Build up list of tags tag_list = [] for id, tag_name in query: - log.info("{indent} ** TAG {tag}...".format(indent=" |" * indent, tag=tag_name)) + log.info( + "{indent} ** TAG {tag}...".format(indent=" |" * indent, tag=tag_name) + ) # Save values to new or existing tag object tag_obj, is_new = models.ContentTag.objects.get_or_create( pk=id, @@ -362,140 +443,198 @@ def create_tags(cursor, contentnode, target_id, indent=0): def create_assessment_items(cursor, contentnode, indent=0, download_url=None): - """ create_assessment_items: Generate assessment items based on perseus zip - Args: - cursor (sqlite3.Connection): connection to export database - contentnode (models.ContentNode): node assessment items reference - indent (int): How far to indent print statements - download_url (str): Domain to download files from - Returns: None + """create_assessment_items: Generate assessment items based on perseus zip + Args: + cursor (sqlite3.Connection): connection to export database + contentnode (models.ContentNode): node assessment items reference + indent (int): How far to indent print statements + download_url (str): Domain to download files from + Returns: None """ # Parse database for files referencing content node and make file models - sql_command = 'SELECT checksum, extension '\ - 'preset FROM {table} WHERE contentnode_id=\'{id}\' AND preset=\'exercise\';'\ - .format(table=FILE_TABLE, id=contentnode.node_id) + sql_command = ( + "SELECT checksum, extension " + "preset FROM {table} WHERE contentnode_id='{id}' AND preset='exercise';".format( + table=FILE_TABLE, id=contentnode.node_id + ) + ) query = cursor.execute(sql_command).fetchall() for checksum, extension in query: filename = "{}.{}".format(checksum, extension) - log.info("{indent} * EXERCISE {filename}...".format(indent=" |" * indent, filename=filename)) + log.info( + "{indent} * EXERCISE {filename}...".format( + indent=" |" * indent, filename=filename + ) + ) try: # Store the downloaded zip into temporary storage - tempf = tempfile.NamedTemporaryFile(suffix='.{}'.format(extension), delete=False) - response = requests.get('{}/content/storage/{}/{}/{}'.format(download_url, filename[0], filename[1], filename)) + tempf = tempfile.NamedTemporaryFile( + suffix=".{}".format(extension), delete=False + ) + response = requests.get( + "{}/content/storage/{}/{}/{}".format( + download_url, filename[0], filename[1], filename + ) + ) for chunk in response: tempf.write(chunk) tempf.close() extract_assessment_items(tempf.name, contentnode, download_url=download_url) except IOError as e: log.warning("\b FAILED (check logs for more details)") - sys.stderr.write("Restoration Process Error: Failed to save file object {}: {}".format(filename, os.strerror(e.errno))) + sys.stderr.write( + "Restoration Process Error: Failed to save file object {}: {}".format( + filename, os.strerror(e.errno) + ) + ) continue finally: os.unlink(tempf.name) def extract_assessment_items(filepath, contentnode, download_url=None): - """ extract_assessment_items: Create and save assessment items to content node - Args: - filepath (str): Where perseus zip is stored - contentnode (models.ContentNode): node assessment items reference - download_url (str): Domain to download files from - Returns: None + """extract_assessment_items: Create and save assessment items to content node + Args: + filepath (str): Where perseus zip is stored + contentnode (models.ContentNode): node assessment items reference + download_url (str): Domain to download files from + Returns: None """ try: tempdir = tempfile.mkdtemp() - with zipfile.ZipFile(filepath, 'r') as zipf: + with zipfile.ZipFile(filepath, "r") as zipf: zipf.extractall(tempdir) os.chdir(tempdir) - with open('exercise.json', 'rb') as fobj: + with open("exercise.json", "rb") as fobj: data = json.load(fobj) - for index, assessment_id in enumerate(data['all_assessment_items']): - with open('{}.json'.format(assessment_id), 'rb') as fobj: + for index, assessment_id in enumerate(data["all_assessment_items"]): + with open("{}.json".format(assessment_id), "rb") as fobj: assessment_item = generate_assessment_item( assessment_id, index, - data['assessment_mapping'][assessment_id], + data["assessment_mapping"][assessment_id], json.load(fobj), - download_url=download_url + download_url=download_url, ) contentnode.assessment_items.add(assessment_item) finally: shutil.rmtree(tempdir) -def generate_assessment_item(assessment_id, order, assessment_type, assessment_data, download_url=None): - """ generate_assessment_item: Generates a new assessment item - Args: - assessment_id (str): AssessmentItem.assessment_id value - order (Number): AssessmentItem.order value - assessment_type (str): AssessmentItem.type value - assessment_data (dict): Extracted data from perseus file - download_url (str): Domain to download files from - Returns: models.AssessmentItem +def generate_assessment_item( + assessment_id, order, assessment_type, assessment_data, download_url=None +): + """generate_assessment_item: Generates a new assessment item + Args: + assessment_id (str): AssessmentItem.assessment_id value + order (Number): AssessmentItem.order value + assessment_type (str): AssessmentItem.type value + assessment_data (dict): Extracted data from perseus file + download_url (str): Domain to download files from + Returns: models.AssessmentItem """ assessment_item = models.AssessmentItem.objects.create( - assessment_id=assessment_id, - type=assessment_type, - order=order + assessment_id=assessment_id, type=assessment_type, order=order ) if assessment_type == exercises.PERSEUS_QUESTION: assessment_item.raw_data = json.dumps(assessment_data) else: # Parse questions - assessment_data['question']['content'] = '\n\n'.join(assessment_data['question']['content'].split('\n\n')[:-1]) - assessment_item.question = process_content(assessment_data['question'], assessment_item, download_url=download_url) + assessment_data["question"]["content"] = "\n\n".join( + assessment_data["question"]["content"].split("\n\n")[:-1] + ) + assessment_item.question = process_content( + assessment_data["question"], assessment_item, download_url=download_url + ) # Parse answers - answer_data = assessment_data['question']['widgets'][ANSWER_FIELD_MAP[assessment_type]]['options'] + answer_data = assessment_data["question"]["widgets"][ + ANSWER_FIELD_MAP[assessment_type] + ]["options"] if assessment_type == exercises.INPUT_QUESTION: - assessment_item.answers = json.dumps([ - {'answer': answer['value'], 'correct': True} for answer in answer_data['answers'] - ]) + assessment_item.answers = json.dumps( + [ + {"answer": answer["value"], "correct": True} + for answer in answer_data["answers"] + ] + ) else: - assessment_item.answers = json.dumps([ - {'answer': process_content(answer, assessment_item, download_url=download_url), 'correct': answer['correct']} - for answer in answer_data['choices'] - ]) - assessment_item.randomize = answer_data['randomize'] + assessment_item.answers = json.dumps( + [ + { + "answer": process_content( + answer, assessment_item, download_url=download_url + ), + "correct": answer["correct"], + } + for answer in answer_data["choices"] + ] + ) + assessment_item.randomize = answer_data["randomize"] # Parse hints - assessment_item.hints = json.dumps([ - {'hint': process_content(hint, assessment_item, download_url=download_url)} - for hint in assessment_data['hints'] - ]) + assessment_item.hints = json.dumps( + [ + { + "hint": process_content( + hint, assessment_item, download_url=download_url + ) + } + for hint in assessment_data["hints"] + ] + ) assessment_item.save() return assessment_item def process_content(data, assessment_item, download_url=None): - """ process_content: Parses perseus text for special formatting (e.g. formulas, images) - Args: - data (dict): Perseus data to parse (e.g. parsing 'question' field) - download_url (str): Domain to download files from - assessment_item (models.AssessmentItem): assessment item to save images to - Returns: models.AssessmentItem + """process_content: Parses perseus text for special formatting (e.g. formulas, images) + Args: + data (dict): Perseus data to parse (e.g. parsing 'question' field) + download_url (str): Domain to download files from + assessment_item (models.AssessmentItem): assessment item to save images to + Returns: models.AssessmentItem """ - data['content'] = data['content'].replace(' ', '') # Remove unrecognized non unicode characters + data["content"] = data["content"].replace( + " ", "" + ) # Remove unrecognized non unicode characters # Process formulas - for match in re.finditer(r'(\$[^\$☣]+\$)', data['content']): - data['content'] = data['content'].replace(match.group(0), '${}$'.format(match.group(0))) + for match in re.finditer(r"(\$[^\$☣]+\$)", data["content"]): + data["content"] = data["content"].replace( + match.group(0), "${}$".format(match.group(0)) + ) # Process images - for match in re.finditer(r'!\[[^\]]*\]\((\$(\{☣ LOCALPATH\}\/images)\/([^\.]+\.[^\)]+))\)', data['content']): - data['content'] = data['content'].replace(match.group(2), exercises.CONTENT_STORAGE_PLACEHOLDER) - image_data = data['images'].get(match.group(1)) - if image_data and image_data.get('width'): - data['content'] = data['content'].replace(match.group(3), '{} ={}x{}'.format(match.group(3), image_data['width'], image_data['height'])) + for match in re.finditer( + r"!\[[^\]]*\]\((\$(\{☣ LOCALPATH\}\/images)\/([^\.]+\.[^\)]+))\)", + data["content"], + ): + data["content"] = data["content"].replace( + match.group(2), exercises.CONTENT_STORAGE_PLACEHOLDER + ) + image_data = data["images"].get(match.group(1)) + if image_data and image_data.get("width"): + data["content"] = data["content"].replace( + match.group(3), + "{} ={}x{}".format( + match.group(3), image_data["width"], image_data["height"] + ), + ) # Save files to db - download_file(match.group(3), assessment_item=assessment_item, preset=format_presets.EXERCISE, download_url=download_url) + download_file( + match.group(3), + assessment_item=assessment_item, + preset=format_presets.EXERCISE, + download_url=download_url, + ) - return data['content'] + return data["content"] diff --git a/contentcuration/contentcuration/utils/incidents.py b/contentcuration/contentcuration/utils/incidents.py index ae48033556..ef9e0589d7 100644 --- a/contentcuration/contentcuration/utils/incidents.py +++ b/contentcuration/contentcuration/utils/incidents.py @@ -4,69 +4,76 @@ # General message to display when readonly mode is off "default": { "readonly": False, - "message": _("There was a problem with a third-party service. " - "This means certain operations might be blocked. " - "We appreciate your patience while these issues " - "are being resolved.") + "message": _( + "There was a problem with a third-party service. " + "This means certain operations might be blocked. " + "We appreciate your patience while these issues " + "are being resolved." + ), }, - # General message to display when readonly mode is turned on "readonly": { "readonly": True, - "message": _("EMERGENCY MAINTENANCE Kolibri Studio is " - "operating on read-only mode for the time being in " - "order for us to resolve some maintenance issues. " - "This means all editing capabilities are disabled " - "at the moment. We're currently working very hard " - "to fix the issue as soon as possible. If you have " - "any questions please contact us at " - "content@learningequality.org. We apologize for any " - "inconvenience caused and appreciate your patience " - "while we resolve these issues.") + "message": _( + "EMERGENCY MAINTENANCE Kolibri Studio is " + "operating on read-only mode for the time being in " + "order for us to resolve some maintenance issues. " + "This means all editing capabilities are disabled " + "at the moment. We're currently working very hard " + "to fix the issue as soon as possible. If you have " + "any questions please contact us at " + "content@learningequality.org. We apologize for any " + "inconvenience caused and appreciate your patience " + "while we resolve these issues." + ), }, - # Our database has crashed "database": { "readonly": True, - "message": _("EMERGENCY MAINTENANCE Kolibri Studio is " - "operating on read-only mode for the time being in " - "order for us to resolve some database issues. " - "This means all editing capabilities are disabled " - "at the moment. We're currently working very hard " - "to fix the issue as soon as possible. If you have " - "any questions please contact us at " - "content@learningequality.org. We apologize for any " - "inconvenience caused and appreciate your patience " - "while we resolve these issues.") + "message": _( + "EMERGENCY MAINTENANCE Kolibri Studio is " + "operating on read-only mode for the time being in " + "order for us to resolve some database issues. " + "This means all editing capabilities are disabled " + "at the moment. We're currently working very hard " + "to fix the issue as soon as possible. If you have " + "any questions please contact us at " + "content@learningequality.org. We apologize for any " + "inconvenience caused and appreciate your patience " + "while we resolve these issues." + ), }, - # Google Cloud Storage is down (used for file storage) "google_cloud_storage": { "readonly": False, - "message": _("We are encountering issues with Google Cloud Storage. " - "This means any file uploading and publishing operations " - "are currently unavailable. We appreciate your patience " - "while these issues are being resolved. To check the status " - "of this service, please visit here") + "message": _( + "We are encountering issues with Google Cloud Storage. " + "This means any file uploading and publishing operations " + "are currently unavailable. We appreciate your patience " + "while these issues are being resolved. To check the status " + "of this service, please visit here" + ), }, - # Redis is down (used for celery tasks like publishing) "redis": { "readonly": False, - "message": _("We are encountering issues with a third-party service. " - "This means publishing is currently unavailable. We appreciate " - "your patience while these issues are being resolved.") + "message": _( + "We are encountering issues with a third-party service. " + "This means publishing is currently unavailable. We appreciate " + "your patience while these issues are being resolved." + ), }, - # Kubernetes is down (used for hosting Studio) "kubernetes": { "readonly": False, - "message": _("We are encountering issues with our data center. " - "This means you may encounter networking problems " - "while using Studio. We appreciate your patience " - "while these issues are being resolved. To check " - "the status of this service, please visit here") - } + "message": _( + "We are encountering issues with our data center. " + "This means you may encounter networking problems " + "while using Studio. We appreciate your patience " + "while these issues are being resolved. To check " + "the status of this service, please visit here" + ), + }, } diff --git a/contentcuration/contentcuration/utils/nodes.py b/contentcuration/contentcuration/utils/nodes.py index 78e9cdc80f..200c640a9c 100644 --- a/contentcuration/contentcuration/utils/nodes.py +++ b/contentcuration/contentcuration/utils/nodes.py @@ -49,32 +49,40 @@ def map_files_to_node(user, node, data): # noqa: C901 ext = ext1.lstrip(".") # Determine a preset if none is given - kind_preset = FormatPreset.get_preset(file_data["preset"]) or FormatPreset.guess_format_preset(filename) + kind_preset = FormatPreset.get_preset( + file_data["preset"] + ) or FormatPreset.guess_format_preset(filename) file_path = generate_object_storage_name(checksum, filename) storage = default_storage if not storage.exists(file_path): - raise IOError('{} not found'.format(file_path)) + raise IOError("{} not found".format(file_path)) try: - if file_data.get('language'): + if file_data.get("language"): # TODO: Remove DB call per file? - file_data['language'] = Language.objects.get(pk=file_data['language']) + file_data["language"] = Language.objects.get(pk=file_data["language"]) except ObjectDoesNotExist: - invalid_lang = file_data.get('language') - logging.warning("file_data with language {} does not exist.".format(invalid_lang)) - return ValidationError("file_data given was invalid; expected string, got {}".format(invalid_lang)) + invalid_lang = file_data.get("language") + logging.warning( + "file_data with language {} does not exist.".format(invalid_lang) + ) + return ValidationError( + "file_data given was invalid; expected string, got {}".format( + invalid_lang + ) + ) resource_obj = File( checksum=checksum, contentnode=node, file_format_id=ext, - original_filename=file_data.get('original_filename') or 'file', - source_url=file_data.get('source_url'), - file_size=file_data['size'], + original_filename=file_data.get("original_filename") or "file", + source_url=file_data.get("source_url"), + file_size=file_data["size"], preset=kind_preset, - language_id=file_data.get('language'), + language_id=file_data.get("language"), uploaded_by=user, duration=file_data.get("duration"), ) @@ -89,11 +97,13 @@ def map_files_to_node(user, node, data): # noqa: C901 # Handle thumbnail if resource_obj.preset and resource_obj.preset.thumbnail: - node.thumbnail_encoding = json.dumps({ - 'base64': get_thumbnail_encoding(str(resource_obj)), - 'points': [], - 'zoom': 0 - }) + node.thumbnail_encoding = json.dumps( + { + "base64": get_thumbnail_encoding(str(resource_obj)), + "points": [], + "zoom": 0, + } + ) node.save() @@ -120,16 +130,16 @@ def map_files_to_assessment_item(user, assessment_item, data): file_path = generate_object_storage_name(checksum, filename) storage = default_storage if not storage.exists(file_path): - raise IOError('{} not found'.format(file_path)) + raise IOError("{} not found".format(file_path)) resource_obj = File( checksum=checksum, assessment_item=assessment_item, file_format_id=ext, - original_filename=file_data.get('original_filename') or 'file', - source_url=file_data.get('source_url'), - file_size=file_data['size'], - preset_id=file_data["preset"], # assessment_item-files always have a preset + original_filename=file_data.get("original_filename") or "file", + source_url=file_data.get("source_url"), + file_size=file_data["size"], + preset_id=file_data["preset"], # assessment_item-files always have a preset uploaded_by=user, ) resource_obj.file_on_disk.name = file_path @@ -144,17 +154,19 @@ def map_files_to_slideshow_slide_item(user, node, slides, files): filename = file_data["filename"] checksum, ext = filename.split(".") - matching_slide = next((slide for slide in slides if slide.metadata["checksum"] == checksum), None) + matching_slide = next( + (slide for slide in slides if slide.metadata["checksum"] == checksum), None + ) if not matching_slide: # TODO(Jacob) Determine proper error type... raise it. - print("NO MATCH") + print("NO MATCH") # noqa: T201 file_path = generate_object_storage_name(checksum, filename) storage = default_storage if not storage.exists(file_path): - raise IOError('{} not found'.format(file_path)) + raise IOError("{} not found".format(file_path)) file_obj = File( slideshow_slide=matching_slide, @@ -164,7 +176,7 @@ def map_files_to_slideshow_slide_item(user, node, slides, files): source_url=file_data.get("source_url"), file_size=file_data["size"], preset_id=file_data["preset"], - uploaded_by=user + uploaded_by=user, ) file_obj.file_on_disk.name = file_path @@ -179,19 +191,19 @@ def filter_out_nones(data): def _get_diff_filepath(node_id1, node_id2): - return os.path.join(settings.DIFFS_ROOT, node_id1, '{}.json'.format(node_id2)) + return os.path.join(settings.DIFFS_ROOT, node_id1, "{}.json".format(node_id2)) def _get_created_time(node): - return node.created.strftime('%Y-%m-%d %H:%M:%S') + return node.created.strftime("%Y-%m-%d %H:%M:%S") def get_diff(updated, original): jsonpath = _get_diff_filepath(updated.pk, original.pk) if default_storage.exists(jsonpath): - with default_storage.open(jsonpath, 'rb') as jsonfile: + with default_storage.open(jsonpath, "rb") as jsonfile: data = json.load(jsonfile) - if data['generated'] == _get_created_time(updated): + if data["generated"] == _get_created_time(updated): return data return None @@ -203,28 +215,54 @@ def generate_diff(updated_id, original_id): main_descendants = original.get_descendants() if original else None updated_descendants = updated.get_descendants() if updated else None - original_stats = main_descendants.values('kind_id').annotate(count=Count('kind_id')).order_by() if original else {} - updated_stats = updated_descendants.values('kind_id').annotate(count=Count('kind_id')).order_by() if updated else {} - - original_file_sizes = main_descendants.aggregate( - resource_size=Sum('files__file_size'), - assessment_size=Sum('assessment_items__files__file_size'), - assessment_count=Count('assessment_items'), - ) if original else {} - - updated_file_sizes = updated_descendants.aggregate( - resource_size=Sum('files__file_size'), - assessment_size=Sum('assessment_items__files__file_size'), - assessment_count=Count('assessment_items') - ) if updated else {} - - original_file_size = (original_file_sizes.get('resource_size') or 0) + (original_file_sizes.get('assessment_size') or 0) - updated_file_size = (updated_file_sizes.get('resource_size') or 0) + (updated_file_sizes.get('assessment_size') or 0) - original_question_count = original_file_sizes.get('assessment_count') or 0 - updated_question_count = updated_file_sizes.get('assessment_count') or 0 - - original_resource_count = original.get_descendants().exclude(kind_id='topic').count() if original else 0 - updated_resource_count = updated.get_descendants().exclude(kind_id='topic').count() if updated else 0 + original_stats = ( + main_descendants.values("kind_id").annotate(count=Count("kind_id")).order_by() + if original + else {} + ) + updated_stats = ( + updated_descendants.values("kind_id") + .annotate(count=Count("kind_id")) + .order_by() + if updated + else {} + ) + + original_file_sizes = ( + main_descendants.aggregate( + resource_size=Sum("files__file_size"), + assessment_size=Sum("assessment_items__files__file_size"), + assessment_count=Count("assessment_items"), + ) + if original + else {} + ) + + updated_file_sizes = ( + updated_descendants.aggregate( + resource_size=Sum("files__file_size"), + assessment_size=Sum("assessment_items__files__file_size"), + assessment_count=Count("assessment_items"), + ) + if updated + else {} + ) + + original_file_size = (original_file_sizes.get("resource_size") or 0) + ( + original_file_sizes.get("assessment_size") or 0 + ) + updated_file_size = (updated_file_sizes.get("resource_size") or 0) + ( + updated_file_sizes.get("assessment_size") or 0 + ) + original_question_count = original_file_sizes.get("assessment_count") or 0 + updated_question_count = updated_file_sizes.get("assessment_count") or 0 + + original_resource_count = ( + original.get_descendants().exclude(kind_id="topic").count() if original else 0 + ) + updated_resource_count = ( + updated.get_descendants().exclude(kind_id="topic").count() if updated else 0 + ) stats = [ { @@ -234,8 +272,12 @@ def generate_diff(updated_id, original_id): }, { "field": "ricecooker_version", - "original": original.extra_fields.get('ricecooker_version') if original and original.extra_fields else "", - "changed": updated.extra_fields.get('ricecooker_version') if updated and updated.extra_fields else "", + "original": original.extra_fields.get("ricecooker_version") + if original and original.extra_fields + else "", + "changed": updated.extra_fields.get("ricecooker_version") + if updated and updated.extra_fields + else "", }, { "field": "file_size_in_bytes", @@ -249,44 +291,70 @@ def generate_diff(updated_id, original_id): "original": original_resource_count, "changed": updated_resource_count, "difference": updated_resource_count - original_resource_count, - } + }, ] for kind, name in content_kinds.choices: - original_kind = original_stats.get(kind_id=kind)['count'] if original and original_stats.filter(kind_id=kind).exists() else 0 - updated_kind = updated_stats.get(kind_id=kind)['count'] if updated and updated_stats.filter(kind_id=kind).exists() else 0 - stats.append({"field": "count_{}s".format(kind), "original": original_kind, "changed": updated_kind, "difference": updated_kind - original_kind}) + original_kind = ( + original_stats.get(kind_id=kind)["count"] + if original and original_stats.filter(kind_id=kind).exists() + else 0 + ) + updated_kind = ( + updated_stats.get(kind_id=kind)["count"] + if updated and updated_stats.filter(kind_id=kind).exists() + else 0 + ) + stats.append( + { + "field": "count_{}s".format(kind), + "original": original_kind, + "changed": updated_kind, + "difference": updated_kind - original_kind, + } + ) # Add number of questions - stats.append({ - "field": "count_questions", - "original": original_question_count, - "changed": updated_question_count, - "difference": updated_question_count - original_question_count, - }) + stats.append( + { + "field": "count_questions", + "original": original_question_count, + "changed": updated_question_count, + "difference": updated_question_count - original_question_count, + } + ) # Add number of subtitles - original_subtitle_count = main_descendants.filter(files__preset_id=format_presets.VIDEO_SUBTITLE).count() if original else 0 - updated_subtitle_count = updated_descendants.filter(files__preset_id=format_presets.VIDEO_SUBTITLE).count() if updated else 0 - stats.append({ - "field": "count_subtitles", - "original": original_subtitle_count, - "changed": updated_subtitle_count, - "difference": updated_subtitle_count - original_subtitle_count, - }) + original_subtitle_count = ( + main_descendants.filter(files__preset_id=format_presets.VIDEO_SUBTITLE).count() + if original + else 0 + ) + updated_subtitle_count = ( + updated_descendants.filter( + files__preset_id=format_presets.VIDEO_SUBTITLE + ).count() + if updated + else 0 + ) + stats.append( + { + "field": "count_subtitles", + "original": original_subtitle_count, + "changed": updated_subtitle_count, + "difference": updated_subtitle_count - original_subtitle_count, + } + ) # Do one more check before we write the json file in case multiple tasks were triggered # and we need to ensure that we don't overwrite the latest version of the changed diff jsondata = get_diff(updated, original) creation_time = _get_created_time(updated) - if not jsondata or jsondata['generated'] <= creation_time: - jsondata = { - 'generated': creation_time, - 'stats': stats - } + if not jsondata or jsondata["generated"] <= creation_time: + jsondata = {"generated": creation_time, "stats": stats} jsonpath = _get_diff_filepath(updated_id, original_id) - default_storage.save(jsonpath, BytesIO(json.dumps(jsondata).encode('utf-8'))) + default_storage.save(jsonpath, BytesIO(json.dumps(jsondata).encode("utf-8"))) return jsondata @@ -334,8 +402,12 @@ def get_size(self): :return: An integer representing the resource size """ - sizes = self.queryset.values("checksum").distinct().aggregate(resource_size=Sum("file_size")) - return sizes['resource_size'] + sizes = ( + self.queryset.values("checksum") + .distinct() + .aggregate(resource_size=Sum("file_size")) + ) + return sizes["resource_size"] def modified_since(self, compare_datetime): """ @@ -404,7 +476,11 @@ def calculate_resource_size(node, force=False): # since we added file.modified as nullable, if the result is None/Null, then we know that it # hasn't been modified since our last cached value, so we only need to check is False - if size is not None and modified is not None and db.modified_since(modified) is False: + if ( + size is not None + and modified is not None + and db.modified_since(modified) is False + ): # use cache if not modified since cache modified timestamp return size, False @@ -438,7 +514,10 @@ def migrate_extra_fields(extra_fields): m = extra_fields.pop("m", None) n = extra_fields.pop("n", None) mastery_model = extra_fields.pop("mastery_model", None) - if not extra_fields.get("options", {}).get("completion_criteria", {}) and mastery_model is not None: + if ( + not extra_fields.get("options", {}).get("completion_criteria", {}) + and mastery_model is not None + ): extra_fields["options"] = extra_fields.get("options", {}) extra_fields["options"]["completion_criteria"] = { "threshold": { @@ -454,6 +533,9 @@ def migrate_extra_fields(extra_fields): def validate_and_conform_to_schema_threshold_none(completion_criteria_validated): model = completion_criteria_validated.get("model", {}) if model in ["reference", "determined_by_resource"]: - if "threshold" not in completion_criteria_validated or completion_criteria_validated["threshold"] is not None: + if ( + "threshold" not in completion_criteria_validated + or completion_criteria_validated["threshold"] is not None + ): completion_criteria_validated["threshold"] = None return completion_criteria_validated diff --git a/contentcuration/contentcuration/utils/pagination.py b/contentcuration/contentcuration/utils/pagination.py index 2a6fb8c946..57b724dc59 100644 --- a/contentcuration/contentcuration/utils/pagination.py +++ b/contentcuration/contentcuration/utils/pagination.py @@ -28,7 +28,9 @@ def __init__(self, object_list, number, paginator): class ValuesViewsetPaginator(Paginator): def __init__(self, object_list, *args, **kwargs): if not isinstance(object_list, QuerySet): - raise TypeError("ValuesViewsetPaginator is only intended for use with Querysets") + raise TypeError( + "ValuesViewsetPaginator is only intended for use with Querysets" + ) self.queryset = object_list object_list = object_list.values_list("pk", flat=True).distinct() super(ValuesViewsetPaginator, self).__init__(object_list, *args, **kwargs) @@ -48,10 +50,7 @@ def count(self): """ try: query_string = str(self.object_list.query).encode("utf8") - cache_key = ( - "query-count:" - + hashlib.md5(query_string).hexdigest() - ) + cache_key = "query-count:" + hashlib.md5(query_string).hexdigest() value = cache.get(cache_key) if value is None: value = super(CachedValuesViewsetPaginator, self).count @@ -113,20 +112,20 @@ def get_paginated_response(self, data): def get_paginated_response_schema(self, schema): return { - 'type': 'object', - 'properties': { - 'count': { - 'type': 'integer', - 'example': 123, + "type": "object", + "properties": { + "count": { + "type": "integer", + "example": 123, }, - 'results': schema, - 'page': { - 'type': 'integer', - 'example': 123, + "results": schema, + "page": { + "type": "integer", + "example": 123, }, - 'total_pages': { - 'type': 'integer', - 'example': 123, + "total_pages": { + "type": "integer", + "example": 123, }, }, } diff --git a/contentcuration/contentcuration/utils/parser.py b/contentcuration/contentcuration/utils/parser.py index aeadcad33a..7b4ab38fd8 100644 --- a/contentcuration/contentcuration/utils/parser.py +++ b/contentcuration/contentcuration/utils/parser.py @@ -24,32 +24,62 @@ LANGUAGE = get_language() or "" -SEP = r',' -POINT = r'\.' +SEP = r"," +POINT = r"\." if LANGUAGE.startswith("es"): - SEP = r'\.' - POINT = r',' + SEP = r"\." + POINT = r"," -SIGN = r'-?' -DIGIT = r'[0-9]' -NON_ZERO_DIGIT = r'[1-9]' +SIGN = r"-?" +DIGIT = r"[0-9]" +NON_ZERO_DIGIT = r"[1-9]" UNFORMATTED_INT = re.compile("{digit}*".format(digit=DIGIT)) -FORMATTED_INT = re.compile("{digit}{{1,3}}(?:{sep}{digit}{{3}})+".format(digit=DIGIT, sep=SEP)) -INTEGER = re.compile("({sign}(?:{formatted}|{unformatted}))".format(sign=SIGN, formatted=FORMATTED_INT.pattern, unformatted=UNFORMATTED_INT.pattern)) -DECIMAL = re.compile("({integer}{point}{unformatted})".format(integer=INTEGER.pattern, unformatted=UNFORMATTED_INT.pattern, point=POINT)) -NON_ZERO_INT = re.compile("({sign}{non_zero}(?:{digit}{{0,2}}(?:{sep}{digit}{{3}})+|{unformatted})?)".format(sign=SIGN, - non_zero=NON_ZERO_DIGIT, - digit=DIGIT, - unformatted=UNFORMATTED_INT.pattern, - sep=SEP)) -FRACTION = re.compile("({integer}/{non_zero})".format(integer=INTEGER.pattern, non_zero=NON_ZERO_INT.pattern)) -MIXED_NUMBER = re.compile("({integer}) +({fraction})".format(integer=INTEGER.pattern, fraction=FRACTION.pattern)) -VALID_NUMBER = re.compile("({decimal}|{mixed_number}|{fraction}|{integer})".format(decimal=DECIMAL.pattern, - mixed_number=MIXED_NUMBER.pattern, - fraction=FRACTION.pattern, - integer=INTEGER.pattern)) +FORMATTED_INT = re.compile( + "{digit}{{1,3}}(?:{sep}{digit}{{3}})+".format(digit=DIGIT, sep=SEP) +) +INTEGER = re.compile( + "({sign}(?:{formatted}|{unformatted}))".format( + sign=SIGN, formatted=FORMATTED_INT.pattern, unformatted=UNFORMATTED_INT.pattern + ) +) +DECIMAL = re.compile( + "({integer}{point}{unformatted})".format( + integer=INTEGER.pattern, unformatted=UNFORMATTED_INT.pattern, point=POINT + ) +) +NON_ZERO_INT = re.compile( + "({sign}{non_zero}(?:{digit}{{0,2}}(?:{sep}{digit}{{3}})+|{unformatted})?)".format( + sign=SIGN, + non_zero=NON_ZERO_DIGIT, + digit=DIGIT, + unformatted=UNFORMATTED_INT.pattern, + sep=SEP, + ) +) +FRACTION = re.compile( + "({integer}/{non_zero})".format( + integer=INTEGER.pattern, non_zero=NON_ZERO_INT.pattern + ) +) +MIXED_NUMBER = re.compile( + "({integer}) +({fraction})".format( + integer=INTEGER.pattern, fraction=FRACTION.pattern + ) +) +VALID_NUMBER = re.compile( + "({decimal}|{mixed_number}|{fraction}|{integer})".format( + decimal=DECIMAL.pattern, + mixed_number=MIXED_NUMBER.pattern, + fraction=FRACTION.pattern, + integer=INTEGER.pattern, + ) +) PERCENTAGE = re.compile("({num})%".format(num=VALID_NUMBER.pattern)) -EXPONENT = re.compile("((?:{decimal}|{integer})e\\+?{integer})".format(decimal=DECIMAL.pattern, integer=INTEGER.pattern)) +EXPONENT = re.compile( + "((?:{decimal}|{integer})e\\+?{integer})".format( + decimal=DECIMAL.pattern, integer=INTEGER.pattern + ) +) def extract_value(text): @@ -58,7 +88,14 @@ def extract_value(text): def parse_valid_number(text): try: - return parse_exponent(text) or parse_percentage(text) or parse_mixed_number(text) or parse_fraction(text) or parse_decimal(text) or parse_integer(text) + return ( + parse_exponent(text) + or parse_percentage(text) + or parse_mixed_number(text) + or parse_fraction(text) + or parse_decimal(text) + or parse_integer(text) + ) except Exception: return None @@ -75,14 +112,18 @@ def parse_decimal(text): def parse_fraction(text): match = FRACTION.search(text) - return match and float(parse_integer(match.group(2))) / float(parse_integer(match.group(3))) + return match and float(parse_integer(match.group(2))) / float( + parse_integer(match.group(3)) + ) def parse_mixed_number(text): match = MIXED_NUMBER.search(text) if match: number = parse_integer(match.group(1)) - return (abs(number) + parse_fraction(match.group(3))) * (float(number) / float(abs(number))) + return (abs(number) + parse_fraction(match.group(3))) * ( + float(number) / float(abs(number)) + ) return None @@ -102,17 +143,21 @@ def parse_exponent(text): def to_en(text): - return text.replace(SEP, '').replace(POINT, '.') + return text.replace(SEP, "").replace(POINT, ".") def load_json_string(json_string): """ - Using code from https://grimhacker.com/2016/04/24/loading-dirty-json-with-python/ - Instead of using ast.literal_eval to process malformed json, load this way - Arg: json_string (str) to parse - Returns json generated from string + Using code from https://grimhacker.com/2016/04/24/loading-dirty-json-with-python/ + Instead of using ast.literal_eval to process malformed json, load this way + Arg: json_string (str) to parse + Returns json generated from string """ - regex_replace = [(r"([ \{,:\[])(u)?'([^']+)'", r'\1"\3"'), (r" False([, \}\]])", r' false\1'), (r" True([, \}\]])", r' true\1')] + regex_replace = [ + (r"([ \{,:\[])(u)?'([^']+)'", r'\1"\3"'), + (r" False([, \}\]])", r" false\1"), + (r" True([, \}\]])", r" true\1"), + ] for r, s in regex_replace: json_string = re.sub(r, s, json_string) clean_json = json.loads(json_string) diff --git a/contentcuration/contentcuration/utils/publish.py b/contentcuration/contentcuration/utils/publish.py index 0b564751dc..bd2f59e73c 100644 --- a/contentcuration/contentcuration/utils/publish.py +++ b/contentcuration/contentcuration/utils/publish.py @@ -89,9 +89,7 @@ def __init__(self, time, channel_id): self.time = time self.channel_id = channel_id - message = ( - "publishing the channel with channel_id {} took {} seconds to complete, exceeding {} second threshold." - ) + message = "publishing the channel with channel_id {} took {} seconds to complete, exceeding {} second threshold." self.message = message.format( self.channel_id, self.time, PUBLISHING_UPDATE_THRESHOLD ) @@ -99,26 +97,57 @@ def __init__(self, time, channel_id): super(SlowPublishError, self).__init__(self.message) -def send_emails(channel, user_id, version_notes=''): - subject = render_to_string('registration/custom_email_subject.txt', {'subject': _('Kolibri Studio Channel Published')}) +def send_emails(channel, user_id, version_notes=""): + subject = render_to_string( + "registration/custom_email_subject.txt", + {"subject": _("Kolibri Studio Channel Published")}, + ) + subject = "".join(subject.splitlines()) token = channel.secret_tokens.filter(is_primary=True).first() - token = '{}-{}'.format(token.token[:5], token.token[-5:]) + token = "{}-{}".format(token.token[:5], token.token[-5:]) domain = "https://{}".format(Site.objects.get_current().domain) if user_id: user = ccmodels.User.objects.get(pk=user_id) - message = render_to_string('registration/channel_published_email.html', - {'channel': channel, 'user': user, 'token': token, 'notes': version_notes, 'domain': domain}) - user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL, html_message=message) + message = render_to_string( + "registration/channel_published_email.html", + { + "channel": channel, + "user": user, + "token": token, + "notes": version_notes, + "domain": domain, + }, + ) + user.email_user( + subject, message, settings.DEFAULT_FROM_EMAIL, html_message=message + ) else: # Email all users about updates to channel for user in itertools.chain(channel.editors.all(), channel.viewers.all()): - message = render_to_string('registration/channel_published_email.html', - {'channel': channel, 'user': user, 'token': token, 'notes': version_notes, 'domain': domain}) - user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL, html_message=message) + message = render_to_string( + "registration/channel_published_email.html", + { + "channel": channel, + "user": user, + "token": token, + "notes": version_notes, + "domain": domain, + }, + ) + user.email_user( + subject, message, settings.DEFAULT_FROM_EMAIL, html_message=message + ) -def create_content_database(channel, force, user_id, force_exercises, progress_tracker=None, use_staging_tree=False): +def create_content_database( + channel, + force, + user_id, + force_exercises, + progress_tracker=None, + use_staging_tree=False, +): """ :type progress_tracker: contentcuration.utils.celery.ProgressTracker|None """ @@ -131,10 +160,9 @@ def create_content_database(channel, force, user_id, force_exercises, progress_t if not use_staging_tree and not channel.main_tree.publishing: channel.mark_publishing(user_id) - call_command("migrate", - "content", - database=get_active_content_database(), - no_input=True) + call_command( + "migrate", "content", database=get_active_content_database(), no_input=True + ) if progress_tracker: progress_tracker.track(10) base_tree = channel.staging_tree if use_staging_tree else channel.main_tree @@ -154,10 +182,12 @@ def create_content_database(channel, force, user_id, force_exercises, progress_t if progress_tracker: progress_tracker.track(90) map_prerequisites(base_tree) - # Need to save as version being published, not current version + # Need to save as version being published, not current version version = "next" if use_staging_tree else channel.version + 1 save_export_database( - channel.pk, version, use_staging_tree, + channel.pk, + version, + use_staging_tree, ) if channel.public: mapper = ChannelMapper(kolibri_channel) @@ -170,7 +200,9 @@ def create_kolibri_license_object(ccnode): use_license_description = not ccnode.license.is_custom return kolibrimodels.License.objects.get_or_create( license_name=ccnode.license.license_name, - license_description=ccnode.license.license_description if use_license_description else ccnode.license_description + license_description=ccnode.license.license_description + if use_license_description + else ccnode.license_description, ) @@ -208,11 +240,15 @@ def __init__( inherit_metadata=False, ): if not root_node.is_publishable(): - raise ChannelIncompleteError("Attempted to publish a channel with an incomplete root node or no resources") + raise ChannelIncompleteError( + "Attempted to publish a channel with an incomplete root node or no resources" + ) self.root_node = root_node task_percent_total = 80.0 - total_nodes = root_node.get_descendant_count() + 1 # make sure we include root_node + total_nodes = ( + root_node.get_descendant_count() + 1 + ) # make sure we include root_node self.percent_per_node = task_percent_total / float(total_nodes) self.progress_tracker = progress_tracker self.default_language = default_language @@ -234,10 +270,16 @@ def _gather_inherited_metadata(self, node, inherited_fields): for field in inheritable_map_fields: metadata[field] = {} - inherited_keys = (inherited_fields.get(field) or {}).keys() if self.inherit_metadata else [] + inherited_keys = ( + (inherited_fields.get(field) or {}).keys() + if self.inherit_metadata + else [] + ) own_keys = (getattr(node, field) or {}).keys() # Get a list of all keys in reverse order of length so we can remove any less specific values - all_keys = sorted(set(inherited_keys).union(set(own_keys)), key=len, reverse=True) + all_keys = sorted( + set(inherited_keys).union(set(own_keys)), key=len, reverse=True + ) for key in all_keys: if not any(k != key and k.startswith(key) for k in all_keys): metadata[field][key] = True @@ -263,18 +305,33 @@ def recurse_nodes(self, node, inherited_fields): # noqa C901 if not mastery_model: raise ValueError("Exercise does not have a mastery model") except Exception as e: - logging.warning("Unable to parse exercise {id} mastery model: {error}".format(id=node.pk, error=str(e))) + logging.warning( + "Unable to parse exercise {id} mastery model: {error}".format( + id=node.pk, error=str(e) + ) + ) return metadata = self._gather_inherited_metadata(node, inherited_fields) - kolibrinode = create_bare_contentnode(node, self.default_language, self.channel_id, self.channel_name, metadata) + kolibrinode = create_bare_contentnode( + node, + self.default_language, + self.channel_id, + self.channel_name, + metadata, + ) if node.kind_id == content_kinds.EXERCISE: exercise_data = process_assessment_metadata(node, kolibrinode) - if self.force_exercises or node.changed or not \ - node.files.filter(preset_id=format_presets.EXERCISE).exists(): - create_perseus_exercise(node, kolibrinode, exercise_data, user_id=self.user_id) + if ( + self.force_exercises + or node.changed + or not node.files.filter(preset_id=format_presets.EXERCISE).exists() + ): + create_perseus_exercise( + node, kolibrinode, exercise_data, user_id=self.user_id + ) elif node.kind_id == content_kinds.SLIDESHOW: create_slideshow_manifest(node, user_id=self.user_id) elif node.kind_id == content_kinds.TOPIC: @@ -287,21 +344,23 @@ def recurse_nodes(self, node, inherited_fields): # noqa C901 def create_slideshow_manifest(ccnode, user_id=None): - print("Creating slideshow manifest...") + print("Creating slideshow manifest...") # noqa: T201 preset = ccmodels.FormatPreset.objects.filter(pk="slideshow_manifest")[0] ext = file_formats.JSON filename = "{0}.{ext}".format(ccnode.title, ext=ext) try: - with tempfile.NamedTemporaryFile(prefix="slideshow_manifest_", delete=False) as temp_manifest: + with tempfile.NamedTemporaryFile( + prefix="slideshow_manifest_", delete=False + ) as temp_manifest: temp_filepath = temp_manifest.name - temp_manifest.write(json.dumps(ccnode.extra_fields).encode('utf-8')) + temp_manifest.write(json.dumps(ccnode.extra_fields).encode("utf-8")) size_on_disk = temp_manifest.tell() temp_manifest.seek(0) - file_on_disk = File(open(temp_filepath, mode='rb'), name=filename) + file_on_disk = File(open(temp_filepath, mode="rb"), name=filename) # Create the file in Studio ccmodels.File.objects.create( file_on_disk=file_on_disk, @@ -310,21 +369,28 @@ def create_slideshow_manifest(ccnode, user_id=None): preset_id=preset, original_filename=filename, file_size=size_on_disk, - uploaded_by_id=user_id + uploaded_by_id=user_id, ) finally: temp_manifest.close() -def create_bare_contentnode(ccnode, default_language, channel_id, channel_name, metadata): # noqa: C901 - logging.debug("Creating a Kolibri contentnode for instance id {}".format( - ccnode.node_id)) +def create_bare_contentnode( # noqa: C901 + ccnode, default_language, channel_id, channel_name, metadata +): + logging.debug( + "Creating a Kolibri contentnode for instance id {}".format(ccnode.node_id) + ) kolibri_license = None if ccnode.license is not None: kolibri_license = create_kolibri_license_object(ccnode)[0] - language = (ccnode.language if ccnode.kind_id == content_kinds.TOPIC else metadata.get("language")) or default_language + language = ( + ccnode.language + if ccnode.kind_id == content_kinds.TOPIC + else metadata.get("language") + ) or default_language if language: language, _new = get_or_create_language(language) @@ -334,15 +400,21 @@ def create_bare_contentnode(ccnode, default_language, channel_id, channel_name, duration = ccnode.files.aggregate(duration=Max("duration")).get("duration") options = {} - if ccnode.extra_fields and 'options' in ccnode.extra_fields: - options = ccnode.extra_fields['options'] + if ccnode.extra_fields and "options" in ccnode.extra_fields: + options = ccnode.extra_fields["options"] duration = None ccnode_completion_criteria = options.get("completion_criteria") if ccnode_completion_criteria: - if ccnode_completion_criteria["model"] == completion_criteria.TIME or ccnode_completion_criteria["model"] == completion_criteria.APPROX_TIME: + if ( + ccnode_completion_criteria["model"] == completion_criteria.TIME + or ccnode_completion_criteria["model"] == completion_criteria.APPROX_TIME + ): duration = ccnode_completion_criteria["threshold"] - if duration is None and ccnode.kind_id in [content_kinds.AUDIO, content_kinds.VIDEO]: + if duration is None and ccnode.kind_id in [ + content_kinds.AUDIO, + content_kinds.VIDEO, + ]: # aggregate duration from associated files, choosing maximum if there are multiple, like hi and lo res videos. duration = ccnode.files.aggregate(duration=Max("duration")).get("duration") @@ -355,51 +427,80 @@ def create_bare_contentnode(ccnode, default_language, channel_id, channel_name, accessibility_labels = ",".join(ccnode.accessibility_labels.keys()) # Do not use the inherited metadata if this is a topic, just read from its own metadata instead. - grade_levels = ccnode.grade_levels if ccnode.kind_id == content_kinds.TOPIC else metadata["grade_levels"] - resource_types = ccnode.resource_types if ccnode.kind_id == content_kinds.TOPIC else metadata["resource_types"] - categories = ccnode.categories if ccnode.kind_id == content_kinds.TOPIC else metadata["categories"] - learner_needs = ccnode.learner_needs if ccnode.kind_id == content_kinds.TOPIC else metadata["learner_needs"] + grade_levels = ( + ccnode.grade_levels + if ccnode.kind_id == content_kinds.TOPIC + else metadata["grade_levels"] + ) + resource_types = ( + ccnode.resource_types + if ccnode.kind_id == content_kinds.TOPIC + else metadata["resource_types"] + ) + categories = ( + ccnode.categories + if ccnode.kind_id == content_kinds.TOPIC + else metadata["categories"] + ) + learner_needs = ( + ccnode.learner_needs + if ccnode.kind_id == content_kinds.TOPIC + else metadata["learner_needs"] + ) kolibrinode, is_new = kolibrimodels.ContentNode.objects.update_or_create( pk=ccnode.node_id, defaults={ - 'kind': ccnode.kind.kind, - 'title': ccnode.title if ccnode.parent else channel_name, - 'content_id': ccnode.content_id, - 'channel_id': channel_id, - 'author': ccnode.author or "", - 'description': ccnode.description, - 'sort_order': ccnode.sort_order, - 'license_owner': ccnode.copyright_holder or "", - 'license': kolibri_license, - 'available': ccnode.get_descendants(include_self=True).exclude(kind_id=content_kinds.TOPIC).exists(), # Hide empty topics - 'stemmed_metaphone': "", # Stemmed metaphone is no longer used, and will cause no harm if blank - 'lang': language, - 'license_name': kolibri_license.license_name if kolibri_license is not None else None, - 'license_description': kolibri_license.license_description if kolibri_license is not None else None, - 'coach_content': ccnode.role_visibility == roles.COACH, - 'duration': duration, - 'options': options, + "kind": ccnode.kind.kind, + "title": ccnode.title if ccnode.parent else channel_name, + "content_id": ccnode.content_id, + "channel_id": channel_id, + "author": ccnode.author or "", + "description": ccnode.description, + "sort_order": ccnode.sort_order, + "license_owner": ccnode.copyright_holder or "", + "license": kolibri_license, + "available": ccnode.get_descendants(include_self=True) + .exclude(kind_id=content_kinds.TOPIC) + .exists(), # Hide empty topics + "stemmed_metaphone": "", # Stemmed metaphone is no longer used, and will cause no harm if blank + "lang": language, + "license_name": kolibri_license.license_name + if kolibri_license is not None + else None, + "license_description": kolibri_license.license_description + if kolibri_license is not None + else None, + "coach_content": ccnode.role_visibility == roles.COACH, + "duration": duration, + "options": options, # Fields for metadata labels "grade_levels": ",".join(grade_levels.keys()) if grade_levels else None, - "resource_types": ",".join(resource_types.keys()) if resource_types else None, + "resource_types": ",".join(resource_types.keys()) + if resource_types + else None, "learning_activities": learning_activities, "accessibility_labels": accessibility_labels, "categories": ",".join(categories.keys()) if categories else None, "learner_needs": ",".join(learner_needs.keys()) if learner_needs else None, - } + }, ) if ccnode.parent: - logging.debug("Associating {child} with parent {parent}".format( - child=kolibrinode.pk, - parent=ccnode.parent.node_id - )) - kolibrinode.parent = kolibrimodels.ContentNode.objects.get(pk=ccnode.parent.node_id) + logging.debug( + "Associating {child} with parent {parent}".format( + child=kolibrinode.pk, parent=ccnode.parent.node_id + ) + ) + kolibrinode.parent = kolibrimodels.ContentNode.objects.get( + pk=ccnode.parent.node_id + ) kolibrinode.save() logging.debug("Created Kolibri ContentNode with node id {}".format(ccnode.node_id)) - logging.debug("Kolibri node count: {}".format(kolibrimodels.ContentNode.objects.all().count())) + logging.debug( + "Kolibri node count: {}".format(kolibrimodels.ContentNode.objects.all().count()) + ) return kolibrinode @@ -409,24 +510,32 @@ def get_or_create_language(language): id=language.pk, lang_code=language.lang_code, lang_subcode=language.lang_subcode, - lang_name=language.lang_name if hasattr(language, 'lang_name') else language.native_name, - lang_direction=language.lang_direction + lang_name=language.lang_name + if hasattr(language, "lang_name") + else language.native_name, + lang_direction=language.lang_direction, ) def create_associated_thumbnail(ccnode, ccfilemodel): """ - Gets the appropriate thumbnail for export (uses or generates a base64 encoding) - Args: - ccnode (): node to derive thumbnail from (if encoding is provided) - ccfilemodel (): file to get thumbnail from if no encoding is available - Returns model of encoded, resized thumbnail + Gets the appropriate thumbnail for export (uses or generates a base64 encoding) + Args: + ccnode (): node to derive thumbnail from (if encoding is provided) + ccfilemodel (): file to get thumbnail from if no encoding is available + Returns model of encoded, resized thumbnail """ encoding = None try: - encoding = ccnode.thumbnail_encoding and load_json_string(ccnode.thumbnail_encoding).get('base64') + encoding = ccnode.thumbnail_encoding and load_json_string( + ccnode.thumbnail_encoding + ).get("base64") except ValueError: - logging.error("ERROR: node thumbnail is not in correct format ({}: {})".format(ccnode.id, ccnode.thumbnail_encoding)) + logging.error( + "ERROR: node thumbnail is not in correct format ({}: {})".format( + ccnode.id, ccnode.thumbnail_encoding + ) + ) return # Save the encoding if it doesn't already have an encoding @@ -435,40 +544,53 @@ def create_associated_thumbnail(ccnode, ccfilemodel): encoding = get_thumbnail_encoding(str(ccfilemodel)) except IOError: # ImageMagick may raise an IOError if the file is not a thumbnail. Catch that then just return early. - logging.error("ERROR: cannot identify the thumbnail ({}: {})".format(ccnode.id, ccnode.thumbnail_encoding)) + logging.error( + "ERROR: cannot identify the thumbnail ({}: {})".format( + ccnode.id, ccnode.thumbnail_encoding + ) + ) return - ccnode.thumbnail_encoding = json.dumps({ - "base64": encoding, - "points": [], - "zoom": 0, - }) + ccnode.thumbnail_encoding = json.dumps( + { + "base64": encoding, + "points": [], + "zoom": 0, + } + ) ccnode.save(update_fields=("thumbnail_encoding",)) return create_thumbnail_from_base64( encoding, uploaded_by=ccfilemodel.uploaded_by, file_format_id=ccfilemodel.file_format_id, - preset_id=ccfilemodel.preset_id + preset_id=ccfilemodel.preset_id, ) def create_associated_file_objects(kolibrinode, ccnode): - logging.debug("Creating LocalFile and File objects for Node {}".format(kolibrinode.id)) - for ccfilemodel in ccnode.files.exclude(Q(preset_id=format_presets.EXERCISE_IMAGE) | Q(preset_id=format_presets.EXERCISE_GRAPHIE)): + logging.debug( + "Creating LocalFile and File objects for Node {}".format(kolibrinode.id) + ) + for ccfilemodel in ccnode.files.exclude( + Q(preset_id=format_presets.EXERCISE_IMAGE) + | Q(preset_id=format_presets.EXERCISE_GRAPHIE) + ): preset = ccfilemodel.preset fformat = ccfilemodel.file_format if ccfilemodel.language: get_or_create_language(ccfilemodel.language) if preset.thumbnail: - ccfilemodel = create_associated_thumbnail(ccnode, ccfilemodel) or ccfilemodel + ccfilemodel = ( + create_associated_thumbnail(ccnode, ccfilemodel) or ccfilemodel + ) kolibrilocalfilemodel, new = kolibrimodels.LocalFile.objects.get_or_create( pk=ccfilemodel.checksum, defaults={ - 'extension': fformat.extension, - 'file_size': ccfilemodel.file_size, - } + "extension": fformat.extension, + "file_size": ccfilemodel.file_size, + }, ) kolibrimodels.File.objects.create( @@ -502,7 +624,7 @@ def create_perseus_exercise(ccnode, kolibrinode, exercise_data, user_id=None): ccnode.files.filter(preset_id=format_presets.EXERCISE).delete() assessment_file_obj = ccmodels.File.objects.create( - file_on_disk=File(open(temppath, 'rb'), name=filename), + file_on_disk=File(open(temppath, "rb"), name=filename), contentnode=ccnode, file_format_id=file_formats.PERSEUS, preset_id=format_presets.EXERCISE, @@ -510,7 +632,11 @@ def create_perseus_exercise(ccnode, kolibrinode, exercise_data, user_id=None): file_size=file_size, uploaded_by_id=user_id, ) - logging.debug("Created exercise for {0} with checksum {1}".format(ccnode.title, assessment_file_obj.checksum)) + logging.debug( + "Created exercise for {0} with checksum {1}".format( + ccnode.title, assessment_file_obj.checksum + ) + ) finally: temppath and os.unlink(temppath) @@ -520,44 +646,63 @@ def parse_assessment_metadata(ccnode): if isinstance(extra_fields, str): extra_fields = json.loads(extra_fields) extra_fields = migrate_extra_fields(extra_fields) or {} - randomize = extra_fields.get('randomize') if extra_fields.get('randomize') is not None else True - return randomize, extra_fields.get('options').get('completion_criteria').get('threshold') + randomize = ( + extra_fields.get("randomize") + if extra_fields.get("randomize") is not None + else True + ) + return randomize, extra_fields.get("options").get("completion_criteria").get( + "threshold" + ) def process_assessment_metadata(ccnode, kolibrinode): # Get mastery model information, set to default if none provided - assessment_items = ccnode.assessment_items.all().order_by('order') + assessment_items = ccnode.assessment_items.all().order_by("order") assessment_item_ids = [a.assessment_id for a in assessment_items] randomize, mastery_criteria = parse_assessment_metadata(ccnode) exercise_data = deepcopy(mastery_criteria) - exercise_data_type = exercise_data.get('mastery_model', "") - - mastery_model = {'type': exercise_data_type or exercises.M_OF_N} - if mastery_model['type'] == exercises.M_OF_N: - mastery_model.update({'n': exercise_data.get('n') or min(5, assessment_items.count()) or 1}) - mastery_model.update({'m': exercise_data.get('m') or min(5, assessment_items.count()) or 1}) - elif mastery_model['type'] == exercises.DO_ALL: - mastery_model.update({'n': assessment_items.count() or 1, 'm': assessment_items.count() or 1}) - elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_2: - mastery_model.update({'n': 2, 'm': 2}) - elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_3: - mastery_model.update({'n': 3, 'm': 3}) - elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_5: - mastery_model.update({'n': 5, 'm': 5}) - elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_10: - mastery_model.update({'n': 10, 'm': 10}) - - exercise_data.update({ - 'mastery_model': exercises.M_OF_N, - 'legacy_mastery_model': mastery_model['type'], - 'randomize': randomize, - 'n': mastery_model.get('n'), - 'm': mastery_model.get('m'), - 'all_assessment_items': assessment_item_ids, - 'assessment_mapping': {a.assessment_id: a.type if a.type != 'true_false' else exercises.SINGLE_SELECTION for a in assessment_items}, - }) + exercise_data_type = exercise_data.get("mastery_model", "") + + mastery_model = {"type": exercise_data_type or exercises.M_OF_N} + if mastery_model["type"] == exercises.M_OF_N: + mastery_model.update( + {"n": exercise_data.get("n") or min(5, assessment_items.count()) or 1} + ) + mastery_model.update( + {"m": exercise_data.get("m") or min(5, assessment_items.count()) or 1} + ) + elif mastery_model["type"] == exercises.DO_ALL: + mastery_model.update( + {"n": assessment_items.count() or 1, "m": assessment_items.count() or 1} + ) + elif mastery_model["type"] == exercises.NUM_CORRECT_IN_A_ROW_2: + mastery_model.update({"n": 2, "m": 2}) + elif mastery_model["type"] == exercises.NUM_CORRECT_IN_A_ROW_3: + mastery_model.update({"n": 3, "m": 3}) + elif mastery_model["type"] == exercises.NUM_CORRECT_IN_A_ROW_5: + mastery_model.update({"n": 5, "m": 5}) + elif mastery_model["type"] == exercises.NUM_CORRECT_IN_A_ROW_10: + mastery_model.update({"n": 10, "m": 10}) + + exercise_data.update( + { + "mastery_model": exercises.M_OF_N, + "legacy_mastery_model": mastery_model["type"], + "randomize": randomize, + "n": mastery_model.get("n"), + "m": mastery_model.get("m"), + "all_assessment_items": assessment_item_ids, + "assessment_mapping": { + a.assessment_id: a.type + if a.type != "true_false" + else exercises.SINGLE_SELECTION + for a in assessment_items + }, + } + ) kolibrimodels.AssessmentMetaData.objects.create( id=uuid.uuid4(), @@ -576,34 +721,67 @@ def create_perseus_zip(ccnode, exercise_data, write_to_path, resized_images_map) with zipfile.ZipFile(write_to_path, "w") as zf: try: exercise_context = { - 'exercise': json.dumps(exercise_data, sort_keys=True, indent=4) + "exercise": json.dumps(exercise_data, sort_keys=True, indent=4) } - exercise_result = render_to_string('perseus/exercise.json', exercise_context) + exercise_result = render_to_string( + "perseus/exercise.json", exercise_context + ) write_to_zipfile("exercise.json", exercise_result, zf) channel_id = ccnode.get_channel_id() - for question in ccnode.assessment_items.prefetch_related('files').all().order_by('order'): + for question in ( + ccnode.assessment_items.prefetch_related("files") + .all() + .order_by("order") + ): try: - for image in question.files.filter(preset_id=format_presets.EXERCISE_IMAGE).order_by('checksum'): - image_name = "images/{}.{}".format(image.checksum, image.file_format_id) + for image in question.files.filter( + preset_id=format_presets.EXERCISE_IMAGE + ).order_by("checksum"): + image_name = "images/{}.{}".format( + image.checksum, image.file_format_id + ) if image_name not in zf.namelist(): - with storage.open(ccmodels.generate_object_storage_name(image.checksum, str(image)), 'rb') as content: + with storage.open( + ccmodels.generate_object_storage_name( + image.checksum, str(image) + ), + "rb", + ) as content: write_to_zipfile(image_name, content.read(), zf) - for image in question.files.filter(preset_id=format_presets.EXERCISE_GRAPHIE).order_by('checksum'): + for image in question.files.filter( + preset_id=format_presets.EXERCISE_GRAPHIE + ).order_by("checksum"): svg_name = "images/{0}.svg".format(image.original_filename) - json_name = "images/{0}-data.json".format(image.original_filename) - if svg_name not in zf.namelist() or json_name not in zf.namelist(): - with storage.open(ccmodels.generate_object_storage_name(image.checksum, str(image)), 'rb') as content: + json_name = "images/{0}-data.json".format( + image.original_filename + ) + if ( + svg_name not in zf.namelist() + or json_name not in zf.namelist() + ): + with storage.open( + ccmodels.generate_object_storage_name( + image.checksum, str(image) + ), + "rb", + ) as content: content = content.read() # in Python 3, delimiter needs to be in bytes format - content = content.split(exercises.GRAPHIE_DELIMITER.encode('ascii')) + content = content.split( + exercises.GRAPHIE_DELIMITER.encode("ascii") + ) write_to_zipfile(svg_name, content[0], zf) write_to_zipfile(json_name, content[1], zf) write_assessment_item(question, zf, channel_id, resized_images_map) except Exception as e: - logging.error("Error while publishing channel `{}`: {}".format(channel_id, str(e))) + logging.error( + "Error while publishing channel `{}`: {}".format( + channel_id, str(e) + ) + ) logging.error(traceback.format_exc()) # In production, these errors have historically been handled silently. # Retain that behavior for now, but raise an error locally so we can @@ -611,8 +789,10 @@ def create_perseus_zip(ccnode, exercise_data, write_to_path, resized_images_map) report_exception(e) # if we're in a testing or development environment, raise the error - if os.environ.get('BRANCH_ENVIRONMENT', '') != "master": - logging.warning("NOTE: the following error would have been swallowed silently in production") + if os.environ.get("BRANCH_ENVIRONMENT", "") != "master": + logging.warning( + "NOTE: the following error would have been swallowed silently in production" + ) raise finally: zf.close() @@ -626,67 +806,88 @@ def write_to_zipfile(filename, content, zf): zf.writestr(info, content) -def write_assessment_item(assessment_item, zf, channel_id, resized_images_map): # noqa C901 +def write_assessment_item( # noqa C901 + assessment_item, zf, channel_id, resized_images_map +): if assessment_item.type == exercises.MULTIPLE_SELECTION: - template = 'perseus/multiple_selection.json' - elif assessment_item.type == exercises.SINGLE_SELECTION or assessment_item.type == 'true_false': - template = 'perseus/multiple_selection.json' + template = "perseus/multiple_selection.json" + elif ( + assessment_item.type == exercises.SINGLE_SELECTION + or assessment_item.type == "true_false" + ): + template = "perseus/multiple_selection.json" elif assessment_item.type == exercises.INPUT_QUESTION: - template = 'perseus/input_question.json' + template = "perseus/input_question.json" elif assessment_item.type == exercises.PERSEUS_QUESTION: - template = 'perseus/perseus_question.json' + template = "perseus/perseus_question.json" else: - raise TypeError("Unrecognized question type on item {}".format(assessment_item.assessment_id)) + raise TypeError( + "Unrecognized question type on item {}".format( + assessment_item.assessment_id + ) + ) question = process_formulas(assessment_item.question) - question, question_images = process_image_strings(question, zf, channel_id, resized_images_map) + question, question_images = process_image_strings( + question, zf, channel_id, resized_images_map + ) answer_data = json.loads(assessment_item.answers) for answer in answer_data: if assessment_item.type == exercises.INPUT_QUESTION: - answer['answer'] = extract_value(answer['answer']) + answer["answer"] = extract_value(answer["answer"]) else: - answer['answer'] = answer['answer'].replace(exercises.CONTENT_STORAGE_PLACEHOLDER, PERSEUS_IMG_DIR) - answer['answer'] = process_formulas(answer['answer']) + answer["answer"] = answer["answer"].replace( + exercises.CONTENT_STORAGE_PLACEHOLDER, PERSEUS_IMG_DIR + ) + answer["answer"] = process_formulas(answer["answer"]) # In case perseus doesn't support =wxh syntax, use below code - answer['answer'], answer_images = process_image_strings(answer['answer'], zf, channel_id, resized_images_map) - answer.update({'images': answer_images}) + answer["answer"], answer_images = process_image_strings( + answer["answer"], zf, channel_id, resized_images_map + ) + answer.update({"images": answer_images}) - answer_data = [a for a in answer_data if a['answer'] or a['answer'] == 0] # Filter out empty answers, but not 0 + answer_data = [ + a for a in answer_data if a["answer"] or a["answer"] == 0 + ] # Filter out empty answers, but not 0 hint_data = json.loads(assessment_item.hints) for hint in hint_data: - hint['hint'] = process_formulas(hint['hint']) - hint['hint'], hint_images = process_image_strings(hint['hint'], zf, channel_id, resized_images_map) - hint.update({'images': hint_images}) + hint["hint"] = process_formulas(hint["hint"]) + hint["hint"], hint_images = process_image_strings( + hint["hint"], zf, channel_id, resized_images_map + ) + hint.update({"images": hint_images}) answers_sorted = answer_data try: - answers_sorted = sorted(answer_data, key=lambda x: x.get('order')) + answers_sorted = sorted(answer_data, key=lambda x: x.get("order")) except TypeError: logging.error("Unable to sort answers, leaving unsorted.") hints_sorted = hint_data try: - hints_sorted = sorted(hint_data, key=lambda x: x.get('order')) + hints_sorted = sorted(hint_data, key=lambda x: x.get("order")) except TypeError: logging.error("Unable to sort hints, leaving unsorted.") context = { - 'question': question, - 'question_images': question_images, - 'answers': answers_sorted, - 'multiple_select': assessment_item.type == exercises.MULTIPLE_SELECTION, - 'raw_data': assessment_item.raw_data.replace(exercises.CONTENT_STORAGE_PLACEHOLDER, PERSEUS_IMG_DIR), - 'hints': hints_sorted, - 'randomize': assessment_item.randomize, + "question": question, + "question_images": question_images, + "answers": answers_sorted, + "multiple_select": assessment_item.type == exercises.MULTIPLE_SELECTION, + "raw_data": assessment_item.raw_data.replace( + exercises.CONTENT_STORAGE_PLACEHOLDER, PERSEUS_IMG_DIR + ), + "hints": hints_sorted, + "randomize": assessment_item.randomize, } - result = render_to_string(template, context).encode('utf-8', "ignore") + result = render_to_string(template, context).encode("utf-8", "ignore") write_to_zipfile("{0}.json".format(assessment_item.assessment_id), result, zf) def process_formulas(content): - for match in re.finditer(r'\$(\$.+\$)\$', content): + for match in re.finditer(r"\$(\$.+\$)\$", content): content = content.replace(match.group(0), match.group(1)) return content @@ -708,27 +909,39 @@ def get_resized_image_checksum(image_content): return hashlib.md5(image_content).hexdigest() -def process_image_strings(content, zf, channel_id, resized_images_map): +def process_image_strings(content, zf, channel_id, resized_images_map): # noqa C901 image_list = [] content = content.replace(exercises.CONTENT_STORAGE_PLACEHOLDER, PERSEUS_IMG_DIR) - for match in re.finditer(r'!\[(?:[^\]]*)]\(([^\)]+)\)', content): - img_match = re.search(r'(.+/images/[^\s]+)(?:\s=([0-9\.]+)x([0-9\.]+))*', match.group(1)) + for match in re.finditer(r"!\[(?:[^\]]*)]\(([^\)]+)\)", content): + img_match = re.search( + r"(.+/images/[^\s]+)(?:\s=([0-9\.]+)x([0-9\.]+))*", match.group(1) + ) if img_match: # Add any image files that haven't been written to the zipfile - filename = img_match.group(1).split('/')[-1] + filename = img_match.group(1).split("/")[-1] checksum, ext = os.path.splitext(filename) if not ext: - logging.warning("While publishing channel `{}` a filename with no extension was encountered: `{}`".format(channel_id, filename)) + logging.warning( + "While publishing channel `{}` a filename with no extension was encountered: `{}`".format( + channel_id, filename + ) + ) try: # make sure the checksum is actually a hex string int(checksum, 16) except Exception: - logging.warning("while publishing channel `{}` a filename with an improper checksum was encountered: `{}`".format(channel_id, filename)) + logging.warning( + "while publishing channel `{}` a filename with an improper checksum was encountered: `{}`".format( + channel_id, filename + ) + ) # if we're in a testing or development environment, raise the error - if os.environ.get('BRANCH_ENVIRONMENT', '') != "master": - logging.warning("NOTE: the following error would have been swallowed silently in production") + if os.environ.get("BRANCH_ENVIRONMENT", "") != "master": + logging.warning( + "NOTE: the following error would have been swallowed silently in production" + ) raise original_image_name = "images/{}.{}".format(checksum, ext[1:]) @@ -756,33 +969,48 @@ def process_image_strings(content, zf, channel_id, resized_images_map): if similar_image: new_img_ref = similar_image else: - with storage.open(ccmodels.generate_object_storage_name(checksum, filename), 'rb') as imgfile: + with storage.open( + ccmodels.generate_object_storage_name(checksum, filename), + "rb", + ) as imgfile: original_content = imgfile.read() resized_content = resize_image(original_content, width, height) if resized_content: - resized_checksum = get_resized_image_checksum(resized_content) - new_image_name = "images/{}.{}".format(resized_checksum, ext[1:]) + resized_checksum = get_resized_image_checksum( + resized_content + ) + new_image_name = "images/{}.{}".format( + resized_checksum, ext[1:] + ) if new_image_name not in zf.namelist(): write_to_zipfile(new_image_name, resized_content, zf) - new_img_ref = original_img_ref.replace(filename, f"{resized_checksum}{ext}") + new_img_ref = original_img_ref.replace( + filename, f"{resized_checksum}{ext}" + ) resized_images_map[resized_key] = new_img_ref else: - logging.warning(f"Failed to resize image {filename}. Using original image.") + logging.warning( + f"Failed to resize image {filename}. Using original image." + ) new_img_ref = img_match.group(1) - new_img_match = re.search(r'(.+/images/[^\s]+)(?:\s=([0-9\.]+)x([0-9\.]+))*', new_img_ref) - image_data = {'name': new_img_match.group(1)} - image_data.update({'width': width}) - image_data.update({'height': height}) + new_img_match = re.search( + r"(.+/images/[^\s]+)(?:\s=([0-9\.]+)x([0-9\.]+))*", new_img_ref + ) + image_data = {"name": new_img_match.group(1)} + image_data.update({"width": width}) + image_data.update({"height": height}) image_list.append(image_data) content = content.replace(original_img_ref, new_img_match.group(1)) else: if original_image_name not in zf.namelist(): - with storage.open(ccmodels.generate_object_storage_name(checksum, filename), 'rb') as imgfile: + with storage.open( + ccmodels.generate_object_storage_name(checksum, filename), "rb" + ) as imgfile: original_content = imgfile.read() write_to_zipfile(original_image_name, original_content, zf) content = content.replace(match.group(1), img_match.group(1)) @@ -791,15 +1019,22 @@ def process_image_strings(content, zf, channel_id, resized_images_map): def map_prerequisites(root_node): - for n in ccmodels.PrerequisiteContentRelationship.objects.filter(prerequisite__tree_id=root_node.tree_id)\ - .values('prerequisite__node_id', 'target_node__node_id'): + for n in ccmodels.PrerequisiteContentRelationship.objects.filter( + prerequisite__tree_id=root_node.tree_id + ).values("prerequisite__node_id", "target_node__node_id"): try: - target_node = kolibrimodels.ContentNode.objects.get(pk=n['target_node__node_id']) - target_node.has_prerequisite.add(n['prerequisite__node_id']) + target_node = kolibrimodels.ContentNode.objects.get( + pk=n["target_node__node_id"] + ) + target_node.has_prerequisite.add(n["prerequisite__node_id"]) except kolibrimodels.ContentNode.DoesNotExist as e: - logging.error('Unable to find prerequisite {}'.format(str(e))) + logging.error("Unable to find prerequisite {}".format(str(e))) except IntegrityError as e: - logging.error('Unable to find source node for prerequisite relationship {}'.format(str(e))) + logging.error( + "Unable to find source node for prerequisite relationship {}".format( + str(e) + ) + ) def map_channel_to_kolibri_channel(channel, use_staging_tree=False): @@ -810,7 +1045,8 @@ def map_channel_to_kolibri_channel(channel, use_staging_tree=False): name=channel.name, description=channel.description, tagline=channel.tagline, - version=channel.version + 1, # Need to save as version being published, not current version + version=channel.version + + 1, # Need to save as version being published, not current version thumbnail=channel.icon_encoding, root_pk=base_tree.node_id, root_id=base_tree.node_id, @@ -827,12 +1063,16 @@ def set_channel_icon_encoding(channel): def convert_channel_thumbnail(channel): - """ encode_thumbnail: gets base64 encoding of thumbnail - Args: - thumbnail (str): file path or url to channel's thumbnail - Returns: base64 encoding of thumbnail + """encode_thumbnail: gets base64 encoding of thumbnail + Args: + thumbnail (str): file path or url to channel's thumbnail + Returns: base64 encoding of thumbnail """ - if not channel.thumbnail or channel.thumbnail == '' or 'static' in channel.thumbnail: + if ( + not channel.thumbnail + or channel.thumbnail == "" + or "static" in channel.thumbnail + ): return "" if channel.thumbnail_encoding: @@ -841,21 +1081,27 @@ def convert_channel_thumbnail(channel): if thumbnail_data.get("base64"): return thumbnail_data["base64"] except ValueError: - logging.error("ERROR: channel thumbnail is not in correct format ({}: {})".format(channel.id, channel.thumbnail_encoding)) + logging.error( + "ERROR: channel thumbnail is not in correct format ({}: {})".format( + channel.id, channel.thumbnail_encoding + ) + ) return get_thumbnail_encoding(channel.thumbnail) def map_tags_to_node(kolibrinode, ccnode): - """ map_tags_to_node: assigns tags to nodes (creates fk relationship) - Args: - kolibrinode (kolibri.models.ContentNode): node to map tag to - ccnode (contentcuration.models.ContentNode): node with tags to map - Returns: None + """map_tags_to_node: assigns tags to nodes (creates fk relationship) + Args: + kolibrinode (kolibri.models.ContentNode): node to map tag to + ccnode (contentcuration.models.ContentNode): node with tags to map + Returns: None """ tags_to_add = [] for tag in ccnode.tags.all(): - t, _new = kolibrimodels.ContentTag.objects.get_or_create(pk=tag.pk, tag_name=tag.tag_name) + t, _new = kolibrimodels.ContentTag.objects.get_or_create( + pk=tag.pk, tag_name=tag.tag_name + ) if len(t.tag_name) <= MAX_TAG_LENGTH: tags_to_add.append(t) @@ -888,18 +1134,16 @@ def save_export_database(channel_id, version, use_staging_tree=False): logging.debug("Saving export database") current_export_db_location = get_active_content_database() target_paths = [ - os.path.join( - settings.DB_ROOT, "{}-{}.sqlite3".format(channel_id, version) - ) + os.path.join(settings.DB_ROOT, "{}-{}.sqlite3".format(channel_id, version)) ] # Only create non-version path if not using the staging tree if not use_staging_tree: target_paths.append( - os.path.join(settings.DB_ROOT, "{id}.sqlite3".format(id=channel_id) - )) + os.path.join(settings.DB_ROOT, "{id}.sqlite3".format(id=channel_id)) + ) for target_export_db_location in target_paths: - with open(current_export_db_location, 'rb') as currentf: + with open(current_export_db_location, "rb") as currentf: storage.save(target_export_db_location, currentf) logging.info("Successfully copied to {}".format(target_export_db_location)) @@ -912,15 +1156,31 @@ def add_tokens_to_channel(channel): def fill_published_fields(channel, version_notes): channel.last_published = timezone.now() - published_nodes = channel.main_tree.get_descendants().filter(published=True).prefetch_related('files') - channel.total_resource_count = published_nodes.exclude(kind_id=content_kinds.TOPIC).count() - kind_counts = list(published_nodes.values('kind_id').annotate(count=Count('kind_id')).order_by('kind_id')) + published_nodes = ( + channel.main_tree.get_descendants() + .filter(published=True) + .prefetch_related("files") + ) + channel.total_resource_count = published_nodes.exclude( + kind_id=content_kinds.TOPIC + ).count() + kind_counts = list( + published_nodes.values("kind_id") + .annotate(count=Count("kind_id")) + .order_by("kind_id") + ) channel.published_kind_count = json.dumps(kind_counts) - channel.published_size = published_nodes.values('files__checksum', 'files__file_size').distinct( - ).aggregate(resource_size=Sum('files__file_size'))['resource_size'] or 0 + channel.published_size = ( + published_nodes.values("files__checksum", "files__file_size") + .distinct() + .aggregate(resource_size=Sum("files__file_size"))["resource_size"] + or 0 + ) - node_languages = published_nodes.exclude(language=None).values_list('language', flat=True) - file_languages = published_nodes.values_list('files__language', flat=True) + node_languages = published_nodes.exclude(language=None).values_list( + "language", flat=True + ) + file_languages = published_nodes.values_list("files__language", flat=True) language_list = list(set(chain(node_languages, file_languages))) for lang in language_list: @@ -928,16 +1188,20 @@ def fill_published_fields(channel, version_notes): channel.included_languages.add(lang) # TODO: Eventually, consolidate above operations to just use this field for storing historical data - channel.published_data.update({ - channel.version: { - 'resource_count': channel.total_resource_count, - 'kind_count': kind_counts, - 'size': channel.published_size, - 'date_published': channel.last_published.strftime(settings.DATE_TIME_FORMAT), - 'version_notes': version_notes, - 'included_languages': language_list + channel.published_data.update( + { + channel.version: { + "resource_count": channel.total_resource_count, + "kind_count": kind_counts, + "size": channel.published_size, + "date_published": channel.last_published.strftime( + settings.DATE_TIME_FORMAT + ), + "version_notes": version_notes, + "included_languages": language_list, + } } - }) + ) channel.save() @@ -949,11 +1213,16 @@ def sync_contentnode_and_channel_tsvectors(channel_id): # Update or create channel tsvector entry. logging.info("Setting tsvector for channel with id {}.".format(channel_id)) - channel = (get_fts_annotated_channel_qs() - .values("keywords_tsvector", "main_tree__tree_id") - .get(pk=channel_id)) + channel = ( + get_fts_annotated_channel_qs() + .values("keywords_tsvector", "main_tree__tree_id") + .get(pk=channel_id) + ) - obj, is_created = ChannelFullTextSearch.objects.update_or_create(channel_id=channel_id, defaults={"keywords_tsvector": channel["keywords_tsvector"]}) + obj, is_created = ChannelFullTextSearch.objects.update_or_create( + channel_id=channel_id, + defaults={"keywords_tsvector": channel["keywords_tsvector"]}, + ) del obj if is_created: @@ -962,19 +1231,37 @@ def sync_contentnode_and_channel_tsvectors(channel_id): logging.info("Updated 1 channel tsvector.") # Update or create contentnodes tsvector entry for channel_id. - logging.info("Setting tsvectors for all main tree contentnodes in channel {}.".format(channel_id)) + logging.info( + "Setting tsvectors for all main tree contentnodes in channel {}.".format( + channel_id + ) + ) if ContentNodeFullTextSearch.objects.filter(channel_id=channel_id).exists(): # First, delete nodes that are no longer in main_tree. - nodes_no_longer_in_main_tree = ~Exists(ccmodels.ContentNode.objects.filter(id=OuterRef("contentnode_id"), tree_id=channel["main_tree__tree_id"])) - ContentNodeFullTextSearch.objects.filter(nodes_no_longer_in_main_tree, channel_id=channel_id).delete() + nodes_no_longer_in_main_tree = ~Exists( + ccmodels.ContentNode.objects.filter( + id=OuterRef("contentnode_id"), tree_id=channel["main_tree__tree_id"] + ) + ) + ContentNodeFullTextSearch.objects.filter( + nodes_no_longer_in_main_tree, channel_id=channel_id + ).delete() # Now, all remaining nodes are in main_tree, so let's update them. # Update only changed nodes. - node_tsv_subquery = get_fts_annotated_contentnode_qs(channel_id).filter(id=OuterRef("contentnode_id")).order_by() - ContentNodeFullTextSearch.objects.filter(channel_id=channel_id, contentnode__complete=True, contentnode__changed=True).update( - keywords_tsvector=Subquery(node_tsv_subquery.values("keywords_tsvector")[:1]), - author_tsvector=Subquery(node_tsv_subquery.values("author_tsvector")[:1]) + node_tsv_subquery = ( + get_fts_annotated_contentnode_qs(channel_id) + .filter(id=OuterRef("contentnode_id")) + .order_by() + ) + ContentNodeFullTextSearch.objects.filter( + channel_id=channel_id, contentnode__complete=True, contentnode__changed=True + ).update( + keywords_tsvector=Subquery( + node_tsv_subquery.values("keywords_tsvector")[:1] + ), + author_tsvector=Subquery(node_tsv_subquery.values("author_tsvector")[:1]), ) # Insert newly created nodes. @@ -983,10 +1270,10 @@ def sync_contentnode_and_channel_tsvectors(channel_id): @delay_user_storage_calculation -def publish_channel( +def publish_channel( # noqa: C901 user_id, channel_id, - version_notes='', + version_notes="", force=False, force_exercises=False, send_email=False, @@ -1041,7 +1328,11 @@ def publish_channel( if progress_tracker: progress_tracker.track(100) except NoNodesChangedError: - logging.warning("No nodes have changed for channel {} so no publish will happen".format(channel_id)) + logging.warning( + "No nodes have changed for channel {} so no publish will happen".format( + channel_id + ) + ) # No matter what, make sure publishing is set to False once the run is done finally: if kolibri_temp_db and os.path.exists(kolibri_temp_db): diff --git a/contentcuration/contentcuration/utils/recommendations.py b/contentcuration/contentcuration/utils/recommendations.py index 9e88cd8dc2..13679d9f41 100644 --- a/contentcuration/contentcuration/utils/recommendations.py +++ b/contentcuration/contentcuration/utils/recommendations.py @@ -58,13 +58,13 @@ def __init__(self, results: List[Any], **kwargs): class EmbedTopicsRequest(EmbeddingsRequest): - path = '/embed-topics' - method = 'POST' + path = "/embed-topics" + method = "POST" class EmbedContentRequest(EmbeddingsRequest): - path = '/embed-content' - method = 'POST' + path = "/embed-content" + method = "POST" class EmbeddingsResponse(RecommendationsBackendResponse): @@ -73,7 +73,6 @@ def __init__(self, **kwargs): class RecommendationsBackendFactory(BackendFactory): - def _ensure_url_has_scheme(self, url): """ Checks whether the URL has a scheme. Default to http:// if no scheme exists. @@ -89,13 +88,14 @@ def _ensure_url_has_scheme(self, url): def create_backend(self) -> Backend: backend = Recommendations() - backend.base_url = self._ensure_url_has_scheme(settings.CURRICULUM_AUTOMATION_API_URL) + backend.base_url = self._ensure_url_has_scheme( + settings.CURRICULUM_AUTOMATION_API_URL + ) backend.connect_endpoint = "/connect" return backend class RecommendationsAdapter(Adapter): - def generate_embeddings(self, request: EmbeddingsRequest) -> EmbeddingsResponse: """ Generates embeddings for the given request. @@ -129,10 +129,13 @@ def response_exists(self, request) -> Union[EmbeddingsResponse, None]: try: request_hash = self._generate_request_hash(request) override_threshold = self._extract_override_threshold(request) - data = list(RecommendationsCache.objects - .filter(request_hash=request_hash, override_threshold=override_threshold) - .order_by('override_threshold', 'rank') - .values('topic_id', 'rank', 'channel_id', node_id=F('contentnode_id'))) + data = list( + RecommendationsCache.objects.filter( + request_hash=request_hash, override_threshold=override_threshold + ) + .order_by("override_threshold", "rank") + .values("topic_id", "rank", "channel_id", node_id=F("contentnode_id")) + ) if len(data) > 0: return EmbeddingsResponse(data=self._unflatten_response(data)) else: @@ -154,16 +157,21 @@ def _generate_request_hash(self, request) -> str: """ params_copy = request.params.copy() if request.params else {} - params_copy.pop('override_threshold', None) + params_copy.pop("override_threshold", None) - unique_attributes = json.dumps({ - 'params': params_copy, - 'json': request.json, - }, sort_keys=True).encode('utf-8') + unique_attributes = json.dumps( + { + "params": params_copy, + "json": request.json, + }, + sort_keys=True, + ).encode("utf-8") return hashlib.md5(unique_attributes).hexdigest() - def cache_embeddings_request(self, request: BackendRequest, response: BackendResponse) -> bool: + def cache_embeddings_request( + self, request: BackendRequest, response: BackendResponse + ) -> bool: """ Caches the recommendations request and response. It performs a bulk insert of the recommendations into the RecommendationsCache table, ignoring any conflicts. @@ -182,12 +190,13 @@ def cache_embeddings_request(self, request: BackendRequest, response: BackendRes new_cache = [ RecommendationsCache( request_hash=request_hash, - topic_id=node['topic_id'], - contentnode_id=node['node_id'], - channel_id=node['channel_id'], - rank=node['rank'], + topic_id=node["topic_id"], + contentnode_id=node["node_id"], + channel_id=node["channel_id"], + rank=node["rank"], override_threshold=override_threshold, - ) for node in valid_nodes + ) + for node in valid_nodes ] RecommendationsCache.objects.bulk_create(new_cache, ignore_conflicts=True) return True @@ -203,10 +212,13 @@ def _extract_override_threshold(self, request) -> bool: :return: The value of the override_threshold parameter, or False if not present. :rtype: bool """ - return request.params.get('override_threshold', False) if request.params else False + return ( + request.params.get("override_threshold", False) if request.params else False + ) - def get_recommendations(self, request_data: Dict[str, Any], - override_threshold=False) -> RecommendationsResponse: + def get_recommendations( + self, request_data: Dict[str, Any], override_threshold=False + ) -> RecommendationsResponse: """ Get recommendations for the given topic(s). @@ -218,9 +230,9 @@ def get_recommendations(self, request_data: Dict[str, Any], recommendations = [] request = EmbedTopicsRequest( - method='POST', - path='/recommend', - params={'override_threshold': override_threshold}, + method="POST", + path="/recommend", + params={"override_threshold": override_threshold}, json=request_data, ) @@ -229,10 +241,10 @@ def get_recommendations(self, request_data: Dict[str, Any], response = cached_response else: response = self.generate_embeddings(request=request) - if not getattr(response, 'error', None): + if not getattr(response, "error", None): self.cache_embeddings_request(request, response) else: - exception = getattr(response, 'error') + exception = getattr(response, "error") if isinstance(exception, Exception): raise exception else: @@ -243,37 +255,44 @@ def get_recommendations(self, request_data: Dict[str, Any], node_ids = self._extract_node_ids(recommended_nodes) cast_node_ids = [uuid.UUID(node_id) for node_id in node_ids] channel_cte = With( - Channel.objects.annotate( - channel_id=self._cast_to_uuid(F('id')) - ).filter( + Channel.objects.annotate(channel_id=self._cast_to_uuid(F("id"))) + .filter( Exists( PublicContentNode.objects.filter( - id__in=cast_node_ids, - channel_id=OuterRef('channel_id') + id__in=cast_node_ids, channel_id=OuterRef("channel_id") ) ) - ).values( - 'main_tree_id', - tree_id=F('main_tree__tree_id'), - ).distinct() + ) + .values( + "main_tree_id", + tree_id=F("main_tree__tree_id"), + ) + .distinct() ) - recommendations = channel_cte.join( - ContentNode.objects.filter(node_id__in=node_ids), - tree_id=channel_cte.col.tree_id - ).with_cte(channel_cte).annotate( - main_tree_id=channel_cte.col.main_tree_id - ).values( - 'id', - 'node_id', - 'main_tree_id', - 'parent_id', + recommendations = ( + channel_cte.join( + ContentNode.objects.filter(node_id__in=node_ids), + tree_id=channel_cte.col.tree_id, + ) + .with_cte(channel_cte) + .annotate(main_tree_id=channel_cte.col.main_tree_id) + .values( + "id", + "node_id", + "main_tree_id", + "parent_id", + ) ) # Add the corresponding channel_id to the recommendations - node_to_channel = {node['node_id']: node['channel_id'] for node in recommended_nodes} + node_to_channel = { + node["node_id"]: node["channel_id"] for node in recommended_nodes + } for recommendation in recommendations: - recommendation['channel_id'] = node_to_channel.get(recommendation['node_id']) + recommendation["channel_id"] = node_to_channel.get( + recommendation["node_id"] + ) return RecommendationsResponse(results=list(recommendations)) @@ -312,22 +331,26 @@ def _flatten_response(self, response: BackendResponse) -> List[Dict[str, Any]]: :rtype: List[Dict[str, Any]] """ flattened_response = [] - if hasattr(response, 'data') and isinstance(response.data, dict): - topics = response.data.get('topics', []) + if hasattr(response, "data") and isinstance(response.data, dict): + topics = response.data.get("topics", []) for topic in topics: - topic_id = topic.get('id') - recommendations = topic.get('recommendations', []) + topic_id = topic.get("id") + recommendations = topic.get("recommendations", []) for recommendation in recommendations: - flattened_response.append({ - 'topic_id': topic_id, - 'node_id': recommendation.get('id'), - 'channel_id': recommendation.get('channel_id'), - 'rank': recommendation.get('rank'), - }) + flattened_response.append( + { + "topic_id": topic_id, + "node_id": recommendation.get("id"), + "channel_id": recommendation.get("channel_id"), + "rank": recommendation.get("rank"), + } + ) return flattened_response - def _unflatten_response(self, flattened_data: List[Dict[str, Any]]) -> Dict[str, Any]: + def _unflatten_response( + self, flattened_data: List[Dict[str, Any]] + ) -> Dict[str, Any]: """ Transforms a flat list of recommendations back into the nested structure. @@ -364,24 +387,26 @@ def _unflatten_response(self, flattened_data: List[Dict[str, Any]]) -> Dict[str, topics_dict = {} for item in flattened_data: - topic_id = item.get('topic_id') + topic_id = item.get("topic_id") if topic_id not in topics_dict: topics_dict[topic_id] = { "id": topic_id.hex if isinstance(topic_id, uuid.UUID) else topic_id, - "recommendations": [] + "recommendations": [], } - node_id = item.get('node_id') - channel_id = item.get('channel_id') - topics_dict[topic_id]["recommendations"].append({ - "id": node_id.hex if isinstance(node_id, uuid.UUID) else node_id, - "channel_id": channel_id.hex if isinstance(channel_id, uuid.UUID) else channel_id, - "rank": item.get('rank') - }) - return { - "topics": list(topics_dict.values()) - } + node_id = item.get("node_id") + channel_id = item.get("channel_id") + topics_dict[topic_id]["recommendations"].append( + { + "id": node_id.hex if isinstance(node_id, uuid.UUID) else node_id, + "channel_id": channel_id.hex + if isinstance(channel_id, uuid.UUID) + else channel_id, + "rank": item.get("rank"), + } + ) + return {"topics": list(topics_dict.values())} def _validate_nodes(self, nodes: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """ @@ -393,8 +418,11 @@ def _validate_nodes(self, nodes: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """ node_ids = self._extract_node_ids(nodes) existing_node_ids = set( - PublicContentNode.objects.filter(id__in=node_ids).values_list('id', flat=True)) - return [node for node in nodes if node.get('node_id') in existing_node_ids] + PublicContentNode.objects.filter(id__in=node_ids).values_list( + "id", flat=True + ) + ) + return [node for node in nodes if node.get("node_id") in existing_node_ids] def _extract_node_ids(self, nodes: List[Dict[str, Any]]) -> List[str]: """ @@ -404,7 +432,7 @@ def _extract_node_ids(self, nodes: List[Dict[str, Any]]) -> List[str]: :return: A list of node IDs. :rtype: List[str] """ - return [node.get('node_id') for node in nodes] + return [node.get("node_id") for node in nodes] def _cast_to_uuid(self, field): """ @@ -415,8 +443,9 @@ def _cast_to_uuid(self, field): """ return Cast(field, output_field=UUIDField()) - def embed_content(self, channel_id: str, - nodes: List[Union[ContentNode, PublicContentNode]]) -> bool: + def embed_content( + self, channel_id: str, nodes: List[Union[ContentNode, PublicContentNode]] + ) -> bool: """ Embeds the content for the given nodes. This is an asynchronous process and could take a while to complete. This process is handled by our curriculum automation service. @@ -434,13 +463,13 @@ def embed_content(self, channel_id: str, for i in range(0, len(nodes), 20): try: - batch = nodes[i:i + 20] + batch = nodes[i : i + 20] content = [self.extract_content(node) for node in batch] content_body = { - 'resources': content, - 'metadata': { - 'channel_id': channel_id, - } + "resources": content, + "metadata": { + "channel_id": channel_id, + }, } request = EmbedContentRequest(json=content_body) self.backend.make_request(request) @@ -529,9 +558,9 @@ def _format_file_data(self, file) -> Dict[str, Any]: :rtype: Dict[str, Any] """ return { - 'url': file.file_on_disk, - 'preset': file.preset_id, - 'language': file.language.lang_code if file.language else None, + "url": file.file_on_disk, + "preset": file.preset_id, + "language": file.language.lang_code if file.language else None, } @@ -545,9 +574,11 @@ def connect(self) -> bool: self._connected = super().connect() return self._connected - def make_request(self, request) -> Union[EmbeddingsResponse, RecommendationsResponse]: + def make_request( + self, request + ) -> Union[EmbeddingsResponse, RecommendationsResponse]: return super().make_request(request) @classmethod - def _create_instance(cls) -> 'Recommendations': + def _create_instance(cls) -> "Recommendations": return cls() diff --git a/contentcuration/contentcuration/utils/secretmanagement.py b/contentcuration/contentcuration/utils/secretmanagement.py index c17b8c00b1..ef0f82ea22 100644 --- a/contentcuration/contentcuration/utils/secretmanagement.py +++ b/contentcuration/contentcuration/utils/secretmanagement.py @@ -50,7 +50,10 @@ def get_secret(secret_name, secret_storage=None): if secret_storage in [None, "", ENV_VARS]: return os.getenv(secret_name) if secret_storage == "KMS_GCS": - env = os.getenv("SECRET_STORAGE_ENVIRONMENT") or SECRET_STORAGE_DEFAULT_ENVIRONMENT + env = ( + os.getenv("SECRET_STORAGE_ENVIRONMENT") + or SECRET_STORAGE_DEFAULT_ENVIRONMENT + ) project_id = os.getenv("SECRET_STORAGE_GCP_PROJECT_ID") kms_location = ( os.getenv("SECRET_STORAGE_GCP_KMS_LOCATION") @@ -58,7 +61,9 @@ def get_secret(secret_name, secret_storage=None): ) if not project_id: - raise KeyError("The env variable SECRET_STORAGE_GCP_PROJECT_ID was not defined!") + raise KeyError( + "The env variable SECRET_STORAGE_GCP_PROJECT_ID was not defined!" + ) ciphertext = get_encrypted_secret(secret_name, project_id, env) @@ -81,13 +86,20 @@ def decrypt_secret(ciphertext, project_id, loc, env, secret_name): ciphertext_crc32c = crc32c(ciphertext) response = kms_client.decrypt( - request={'name': key_path, 'ciphertext': ciphertext, 'ciphertext_crc32c': ciphertext_crc32c}) + request={ + "name": key_path, + "ciphertext": ciphertext, + "ciphertext_crc32c": ciphertext_crc32c, + } + ) # Optional, but recommended: perform integrity verification on decrypt_response. # For more details on ensuring E2E in-transit integrity to and from Cloud KMS visit: # https://cloud.google.com/kms/docs/data-integrity-guidelines if not response.plaintext_crc32c == crc32c(response.plaintext): - raise Exception('The response received from the server was corrupted in-transit.') + raise Exception( + "The response received from the server was corrupted in-transit." + ) return response.plaintext diff --git a/contentcuration/contentcuration/utils/sentry.py b/contentcuration/contentcuration/utils/sentry.py index c9566de78a..6e9b5eae24 100644 --- a/contentcuration/contentcuration/utils/sentry.py +++ b/contentcuration/contentcuration/utils/sentry.py @@ -5,9 +5,7 @@ def report_exception(exception=None, user=None, contexts=None): if getattr(settings, "SENTRY_ACTIVE", False): from sentry_sdk import capture_exception - scope_args = { - "contexts": contexts - } + scope_args = {"contexts": contexts} if user and not user.is_anonymous: scope_args["user"] = { diff --git a/contentcuration/contentcuration/utils/storage_common.py b/contentcuration/contentcuration/utils/storage_common.py index f2ba6e3188..10d79bd5c5 100644 --- a/contentcuration/contentcuration/utils/storage_common.py +++ b/contentcuration/contentcuration/utils/storage_common.py @@ -36,7 +36,12 @@ def determine_content_type(filename): def get_presigned_upload_url( - filepath, md5sum_b64, lifetime_sec, content_length, storage=default_storage, client=None + filepath, + md5sum_b64, + lifetime_sec, + content_length, + storage=default_storage, + client=None, ): """Return a presigned URL that can modify the given filepath through a PUT request. Performing a PUT request on the returned URL changes the object's @@ -65,23 +70,31 @@ def get_presigned_upload_url( if isinstance(storage, (GoogleCloudStorage, CompositeGCS)): client = client or storage.get_client() bucket = settings.AWS_S3_BUCKET_NAME - upload_url = _get_gcs_presigned_put_url(client, bucket, filepath, md5sum_b64, lifetime_sec, mimetype=mimetype) + upload_url = _get_gcs_presigned_put_url( + client, bucket, filepath, md5sum_b64, lifetime_sec, mimetype=mimetype + ) elif isinstance(storage, S3Storage): bucket = settings.AWS_S3_BUCKET_NAME client = client or storage.s3_connection - upload_url = _get_s3_presigned_put_url(client, bucket, filepath, md5sum_b64, lifetime_sec) + upload_url = _get_s3_presigned_put_url( + client, bucket, filepath, md5sum_b64, lifetime_sec + ) else: raise UnknownStorageBackendError( "Please ensure your storage backend is either Google Cloud Storage or S3 Storage!" ) - return { - "mimetype": mimetype, - "uploadURL": upload_url - } + return {"mimetype": mimetype, "uploadURL": upload_url} -def _get_gcs_presigned_put_url(gcs_client, bucket, filepath, md5sum, lifetime_sec, mimetype="application/octet-stream"): +def _get_gcs_presigned_put_url( + gcs_client, + bucket, + filepath, + md5sum, + lifetime_sec, + mimetype="application/octet-stream", +): bucket_obj = gcs_client.get_bucket(bucket) blob_obj = bucket_obj.blob(filepath) diff --git a/contentcuration/contentcuration/utils/sync.py b/contentcuration/contentcuration/utils/sync.py index 1ed11a181c..a11ce4aeab 100644 --- a/contentcuration/contentcuration/utils/sync.py +++ b/contentcuration/contentcuration/utils/sync.py @@ -83,9 +83,7 @@ def sync_node( sync_node_tags(node, original_node) if sync_files: sync_node_files(node, original_node) - if ( - sync_assessment_items and node.kind_id == content_kinds.EXERCISE - ): + if sync_assessment_items and node.kind_id == content_kinds.EXERCISE: sync_node_assessment_items(node, original_node) return node @@ -109,7 +107,8 @@ def sync_node_tags(node, original): tag_name__in=node.tags.values_list("tag_name", flat=True) ): new_tag = ContentTag.objects.filter( - tag_name=tag.tag_name, channel_id=None, + tag_name=tag.tag_name, + channel_id=None, ).first() if not new_tag: new_tag = ContentTag.objects.create(tag_name=tag.tag_name, channel_id=None) diff --git a/contentcuration/contentcuration/utils/transcription.py b/contentcuration/contentcuration/utils/transcription.py index 105b1b0608..1d9738bc18 100644 --- a/contentcuration/contentcuration/utils/transcription.py +++ b/contentcuration/contentcuration/utils/transcription.py @@ -9,6 +9,7 @@ class WhisperRequest(BackendRequest): def __init__(self) -> None: super().__init__() + class WhisperResponse(BackendResponse): def __init__(self) -> None: super().__init__() @@ -16,16 +17,19 @@ def __init__(self) -> None: class Whisper(Backend): def connect(self) -> None: - raise NotImplementedError("The 'connect' method is not implemented for the 'Whisper' backend.") + raise NotImplementedError( + "The 'connect' method is not implemented for the 'Whisper' backend." + ) def make_request(self, request: WhisperRequest) -> WhisperResponse: # Implement production backend here. pass @classmethod - def _create_instance(cls) -> 'Whisper': + def _create_instance(cls) -> "Whisper": return cls() + class LocalWhisper(Backend): def make_request(self, request: WhisperRequest) -> WhisperResponse: # Implement your local backend here. diff --git a/contentcuration/contentcuration/utils/user.py b/contentcuration/contentcuration/utils/user.py index aeeffbf5b5..0c8d918937 100644 --- a/contentcuration/contentcuration/utils/user.py +++ b/contentcuration/contentcuration/utils/user.py @@ -19,4 +19,8 @@ def calculate_user_storage(user_id): if not user.is_admin: calculate_user_storage_task.fetch_or_enqueue(user, user_id=user_id) except User.DoesNotExist: - logging.error("Tried to calculate user storage for user with id {} but they do not exist".format(user_id)) + logging.error( + "Tried to calculate user storage for user with id {} but they do not exist".format( + user_id + ) + ) diff --git a/contentcuration/contentcuration/views/admin.py b/contentcuration/contentcuration/views/admin.py index 7a55052cbb..8e674c3ac4 100644 --- a/contentcuration/contentcuration/views/admin.py +++ b/contentcuration/contentcuration/views/admin.py @@ -20,11 +20,13 @@ @is_admin -@api_view(['POST']) +@api_view(["POST"]) def send_custom_email(request): data = json.loads(request.body) try: - sendcustomemails_task.enqueue(request.user, data["subject"], data["message"], data['query']) + sendcustomemails_task.enqueue( + request.user, data["subject"], data["message"], data["query"] + ) except KeyError: raise ObjectDoesNotExist("Missing attribute from data: {}".format(data)) @@ -33,10 +35,16 @@ def send_custom_email(request): @login_required @browser_is_supported -@authentication_classes((SessionAuthentication, BasicAuthentication, TokenAuthentication)) +@authentication_classes( + (SessionAuthentication, BasicAuthentication, TokenAuthentication) +) def administration(request): - return render(request, 'administration.html', { - "current_user": current_user_for_context(request.user), - "default_sender": settings.DEFAULT_FROM_EMAIL, - "messages": json_for_parse_from_data(get_messages()), - }) + return render( + request, + "administration.html", + { + "current_user": current_user_for_context(request.user), + "default_sender": settings.DEFAULT_FROM_EMAIL, + "messages": json_for_parse_from_data(get_messages()), + }, + ) diff --git a/contentcuration/contentcuration/views/base.py b/contentcuration/contentcuration/views/base.py index 0c4b8cb815..082d96376d 100644 --- a/contentcuration/contentcuration/views/base.py +++ b/contentcuration/contentcuration/views/base.py @@ -123,28 +123,36 @@ def get_prober_channel(request): channel = Channel.objects.filter(editors=request.user).first() if not channel: - channel = Channel.objects.create(actor_id=request.user.id, name="Prober channel", editors=[request.user]) + channel = Channel.objects.create( + actor_id=request.user.id, name="Prober channel", editors=[request.user] + ) return Response(SimplifiedChannelProbeCheckSerializer(channel).data) + @api_view(["GET"]) @authentication_classes((TokenAuthentication, SessionAuthentication)) @permission_classes((IsAuthenticated,)) def publishing_status(request): if not request.user.is_admin: return HttpResponseForbidden() - associated_custom_task_metadata_ids = CustomTaskMetadata.objects.filter(channel_id=Cast(OuterRef(OuterRef("channel_id")), UUIDField())).values_list("task_id",flat=True) + associated_custom_task_metadata_ids = CustomTaskMetadata.objects.filter( + channel_id=Cast(OuterRef(OuterRef("channel_id")), UUIDField()) + ).values_list("task_id", flat=True) associated_tasks = TaskResult.objects.filter( task_name="export-channel", task_id__in=Subquery(associated_custom_task_metadata_ids), ) channel_publish_status = ( - ChannelHistory.objects - .filter( + ChannelHistory.objects.filter( action=channel_history.PUBLICATION, - channel_id__in=Channel.objects.filter(main_tree__publishing=True).values("id"), + channel_id__in=Channel.objects.filter(main_tree__publishing=True).values( + "id" + ), + ) + .annotate( + task_id=associated_tasks.order_by("-date_created").values("task_id")[:1] ) - .annotate(task_id=associated_tasks.order_by("-date_created").values("task_id")[:1]) .distinct("channel_id") .order_by("channel_id", "-performed") .values("channel_id", "performed", "task_id") @@ -176,9 +184,11 @@ def task_queue_status(request): from contentcuration.celery import app - return Response({ - 'queued_task_count': app.count_queued_tasks(), - }) + return Response( + { + "queued_task_count": app.count_queued_tasks(), + } + ) @api_view(["GET"]) @@ -194,10 +204,14 @@ def unapplied_changes_status(request): for _ in app.get_active_and_reserved_tasks(): active_task_count += 1 - return Response({ - 'active_task_count': active_task_count, - 'unapplied_changes_count': Change.objects.filter(applied=False, errored=False).count(), - }) + return Response( + { + "active_task_count": active_task_count, + "unapplied_changes_count": Change.objects.filter( + applied=False, errored=False + ).count(), + } + ) """ END HEALTH CHECKS """ @@ -213,7 +227,9 @@ def channel_list(request): public_channel_list = cache.get(PUBLIC_CHANNELS_CACHE_KEYS["list"]) if public_channel_list is None: public_channel_list = Channel.objects.filter( - public=True, main_tree__published=True, deleted=False, + public=True, + main_tree__published=True, + deleted=False, ).values_list("main_tree__tree_id", flat=True) cache.set(PUBLIC_CHANNELS_CACHE_KEYS["list"], public_channel_list, None) @@ -231,7 +247,11 @@ def channel_list(request): .order_by("lang_code") ) languages = {lang["lang_code"]: lang["count"] for lang in public_lang_query} - cache.set(PUBLIC_CHANNELS_CACHE_KEYS["languages"], json_for_parse_from_data(languages), None) + cache.set( + PUBLIC_CHANNELS_CACHE_KEYS["languages"], + json_for_parse_from_data(languages), + None, + ) # Get public channel licenses licenses = cache.get(PUBLIC_CHANNELS_CACHE_KEYS["licenses"]) @@ -243,7 +263,11 @@ def channel_list(request): .distinct() ) licenses = list(public_license_query) - cache.set(PUBLIC_CHANNELS_CACHE_KEYS["licenses"], json_for_parse_from_data(licenses), None) + cache.set( + PUBLIC_CHANNELS_CACHE_KEYS["licenses"], + json_for_parse_from_data(licenses), + None, + ) # Get public channel kinds kinds = cache.get(PUBLIC_CHANNELS_CACHE_KEYS["kinds"]) @@ -255,7 +279,9 @@ def channel_list(request): .distinct() ) kinds = list(public_kind_query) - cache.set(PUBLIC_CHANNELS_CACHE_KEYS["kinds"], json_for_parse_from_data(kinds), None) + cache.set( + PUBLIC_CHANNELS_CACHE_KEYS["kinds"], json_for_parse_from_data(kinds), None + ) return render( request, @@ -299,7 +325,9 @@ def channel(request, channel_id): # Check if channel exists try: - channel = Channel.filter_view_queryset(Channel.objects.all(), request.user).get(id=channel_id) + channel = Channel.filter_view_queryset(Channel.objects.all(), request.user).get( + id=channel_id + ) except Channel.DoesNotExist: channel_error = "CHANNEL_EDIT_ERROR_CHANNEL_NOT_FOUND" channel = None @@ -308,7 +336,7 @@ def channel(request, channel_id): # If user can view channel, but it's deleted, then we show # an option to restore the channel in the Administration page if channel.deleted: - channel_error = 'CHANNEL_EDIT_ERROR_CHANNEL_DELETED' + channel_error = "CHANNEL_EDIT_ERROR_CHANNEL_DELETED" else: channel_rev = channel.get_server_rev() @@ -317,7 +345,11 @@ def channel(request, channel_id): "channel_edit.html", { CHANNEL_EDIT_GLOBAL: json_for_parse_from_data( - {"channel_id": channel_id, "channel_error": channel_error, "channel_rev": channel_rev} + { + "channel_id": channel_id, + "channel_error": channel_error, + "channel_rev": channel_rev, + } ), CURRENT_USER: current_user_for_context(request.user), PREFERENCES: json_for_parse_from_data(request.user.content_defaults), @@ -353,36 +385,36 @@ def set_language(request): next_url = payload.get("next") if ( - (next_url or request.accepts('text/html')) and - not url_has_allowed_host_and_scheme( - url=next_url, - allowed_hosts={request.get_host()}, - require_https=request.is_secure(), - ) + next_url or request.accepts("text/html") + ) and not url_has_allowed_host_and_scheme( + url=next_url, + allowed_hosts={request.get_host()}, + require_https=request.is_secure(), ): - next_url = request.META.get('HTTP_REFERER') + next_url = request.META.get("HTTP_REFERER") if not url_has_allowed_host_and_scheme( url=next_url, allowed_hosts={request.get_host()}, require_https=request.is_secure(), ): - next_url = translate_url(reverse('base'), lang_code) + next_url = translate_url(reverse("base"), lang_code) next_url_split = urlsplit(next_url) if next_url else None if next_url and not is_valid_path(next_url_split.path): - next_url = translate_url(reverse('base'), lang_code) + next_url = translate_url(reverse("base"), lang_code) response = HttpResponse(next_url) if next_url else HttpResponse(status=204) - if request.method == 'POST': + if request.method == "POST": if lang_code and check_for_language(lang_code): if next_url: next_trans = translate_url(next_url, lang_code) if next_trans != next_url: response = HttpResponse(next_trans) - if hasattr(request, 'session'): + if hasattr(request, "session"): # Storing the language in the session is deprecated. # (RemovedInDjango40Warning) request.session[LANGUAGE_SESSION_KEY] = lang_code response.set_cookie( - settings.LANGUAGE_COOKIE_NAME, lang_code, + settings.LANGUAGE_COOKIE_NAME, + lang_code, max_age=settings.LANGUAGE_COOKIE_AGE, path=settings.LANGUAGE_COOKIE_PATH, domain=settings.LANGUAGE_COOKIE_DOMAIN, diff --git a/contentcuration/contentcuration/views/internal.py b/contentcuration/contentcuration/views/internal.py index 677e062c80..7404506cbe 100644 --- a/contentcuration/contentcuration/views/internal.py +++ b/contentcuration/contentcuration/views/internal.py @@ -15,7 +15,9 @@ from django.http import JsonResponse from le_utils.constants import content_kinds from le_utils.constants import roles -from le_utils.constants.labels.accessibility_categories import ACCESSIBILITYCATEGORIESLIST +from le_utils.constants.labels.accessibility_categories import ( + ACCESSIBILITYCATEGORIESLIST, +) from le_utils.constants.labels.learning_activities import LEARNINGACTIVITIESLIST from le_utils.constants.labels.levels import LEVELSLIST from le_utils.constants.labels.needs import NEEDSLIST @@ -81,7 +83,12 @@ def handle_server_error(e, request): @api_view(["POST", "GET"]) -@authentication_classes((TokenAuthentication, SessionAuthentication,)) +@authentication_classes( + ( + TokenAuthentication, + SessionAuthentication, + ) +) @permission_classes((IsAuthenticated,)) def authenticate_user_internal(request): """ Verify user is valid """ @@ -99,7 +106,12 @@ def authenticate_user_internal(request): @api_view(["POST"]) -@authentication_classes((TokenAuthentication, SessionAuthentication,)) +@authentication_classes( + ( + TokenAuthentication, + SessionAuthentication, + ) +) @permission_classes((IsAuthenticated,)) def check_version(request): """ Get version of Ricecooker with which CC is compatible """ @@ -129,7 +141,12 @@ def check_version(request): @api_view(["POST"]) -@authentication_classes((TokenAuthentication, SessionAuthentication,)) +@authentication_classes( + ( + TokenAuthentication, + SessionAuthentication, + ) +) @permission_classes((IsAuthenticated,)) def file_diff(request): """ Determine which files don't exist on server """ @@ -179,7 +196,12 @@ def api_file_upload(request): @api_view(["POST"]) -@authentication_classes((TokenAuthentication, SessionAuthentication,)) +@authentication_classes( + ( + TokenAuthentication, + SessionAuthentication, + ) +) @permission_classes((IsAuthenticated,)) def api_create_channel_endpoint(request): """ Create the channel node """ @@ -189,20 +211,29 @@ def api_create_channel_endpoint(request): obj = create_channel(channel_data, request.user) - return Response({ - "success": True, - "root": obj.chef_tree.pk, - "channel_id": obj.pk, - }) + return Response( + { + "success": True, + "root": obj.chef_tree.pk, + "channel_id": obj.pk, + } + ) except KeyError: - return HttpResponseBadRequest("Required attribute missing from data: {}".format(data)) + return HttpResponseBadRequest( + "Required attribute missing from data: {}".format(data) + ) except Exception as e: handle_server_error(e, request) return HttpResponseServerError(content=str(e), reason=str(e)) @api_view(["POST"]) -@authentication_classes((TokenAuthentication, SessionAuthentication,)) +@authentication_classes( + ( + TokenAuthentication, + SessionAuthentication, + ) +) @permission_classes((IsAuthenticated,)) def api_commit_channel(request): """ @@ -246,25 +277,36 @@ def api_commit_channel(request): old_staging.title = "Old staging tree for channel {}".format(obj.pk) old_staging.save() - async_result = generatenodediff_task.enqueue(request.user, updated_id=obj.staging_tree.id, original_id=obj.main_tree.id) + async_result = generatenodediff_task.enqueue( + request.user, updated_id=obj.staging_tree.id, original_id=obj.main_tree.id + ) # Send response back to the content integration script - return Response({ - "success": True, - "new_channel": obj.pk, - "diff_task_id": async_result.task_id, - }) + return Response( + { + "success": True, + "new_channel": obj.pk, + "diff_task_id": async_result.task_id, + } + ) except (Channel.DoesNotExist, PermissionDenied): return HttpResponseNotFound("No channel matching: {}".format(channel_id)) except KeyError: - return HttpResponseBadRequest("Required attribute missing from data: {}".format(data)) + return HttpResponseBadRequest( + "Required attribute missing from data: {}".format(data) + ) except Exception as e: handle_server_error(e, request) return HttpResponseServerError(content=str(e), reason=str(e)) @api_view(["POST"]) -@authentication_classes((TokenAuthentication, SessionAuthentication,)) +@authentication_classes( + ( + TokenAuthentication, + SessionAuthentication, + ) +) @permission_classes((IsAuthenticated,)) def api_add_nodes_to_tree(request): """ @@ -289,19 +331,27 @@ def api_add_nodes_to_tree(request): """ data = json.loads(request.body) try: - content_data = data['content_data'] - parent_id = data['root_id'] - ContentNode.filter_edit_queryset(ContentNode.objects.all(), request.user).get(id=parent_id) - return Response({ - "success": True, - "root_ids": convert_data_to_nodes(request.user, content_data, parent_id) - }) + content_data = data["content_data"] + parent_id = data["root_id"] + ContentNode.filter_edit_queryset(ContentNode.objects.all(), request.user).get( + id=parent_id + ) + return Response( + { + "success": True, + "root_ids": convert_data_to_nodes( + request.user, content_data, parent_id + ), + } + ) except ContentNode.DoesNotExist: return HttpResponseNotFound("No content matching: {}".format(parent_id)) except ValidationError as e: return HttpResponseBadRequest(content=str(e)) except KeyError: - return HttpResponseBadRequest("Required attribute missing from data: {}".format(data)) + return HttpResponseBadRequest( + "Required attribute missing from data: {}".format(data) + ) except NodeValidationError as e: return HttpResponseBadRequest(str(e)) except Exception as e: @@ -310,7 +360,12 @@ def api_add_nodes_to_tree(request): @api_view(["POST"]) -@authentication_classes((TokenAuthentication, SessionAuthentication,)) +@authentication_classes( + ( + TokenAuthentication, + SessionAuthentication, + ) +) @permission_classes((IsAuthenticated,)) def api_publish_channel(request): logging.debug("Entering the publish_channel endpoint") @@ -321,16 +376,20 @@ def api_publish_channel(request): # Ensure that the user has permission to edit this channel. Channel.get_editable(request.user, channel_id) - event = generate_publish_event(channel_id, version_notes=data.get('version_notes')) + event = generate_publish_event( + channel_id, version_notes=data.get("version_notes") + ) Change.create_change(event, created_by_id=request.user.pk) apply_channel_changes_task.fetch_or_enqueue(request.user, channel_id=channel_id) - return Response({ - "success": True, - "channel": channel_id, - }) + return Response( + { + "success": True, + "channel": channel_id, + } + ) except (KeyError, Channel.DoesNotExist): return HttpResponseNotFound("No channel matching: {}".format(data)) except Exception as e: @@ -339,7 +398,12 @@ def api_publish_channel(request): @api_view(["POST"]) -@authentication_classes((TokenAuthentication, SessionAuthentication,)) +@authentication_classes( + ( + TokenAuthentication, + SessionAuthentication, + ) +) @permission_classes((IsAuthenticated,)) def check_user_is_editor(request): """ Create the channel node """ @@ -357,7 +421,12 @@ def check_user_is_editor(request): @api_view(["POST"]) -@authentication_classes((TokenAuthentication, SessionAuthentication,)) +@authentication_classes( + ( + TokenAuthentication, + SessionAuthentication, + ) +) @permission_classes((IsAuthenticated,)) def get_tree_data(request): """ @@ -369,9 +438,9 @@ def get_tree_data(request): if not serializer.is_valid(): return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) try: - channel_id = serializer.validated_data['channel_id'] + channel_id = serializer.validated_data["channel_id"] channel = Channel.get_editable(request.user, channel_id) - tree_name = "{}_tree".format(serializer.validated_data['tree']) + tree_name = "{}_tree".format(serializer.validated_data["tree"]) tree_root = getattr(channel, tree_name, None) if tree_root is None: raise ValueError("Invalid tree name") @@ -388,7 +457,12 @@ def get_tree_data(request): @api_view(["POST"]) -@authentication_classes((TokenAuthentication, SessionAuthentication,)) +@authentication_classes( + ( + TokenAuthentication, + SessionAuthentication, + ) +) @permission_classes((IsAuthenticated,)) def get_node_tree_data(request): """ @@ -400,9 +474,9 @@ def get_node_tree_data(request): if not serializer.is_valid(): return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) try: - channel_id = serializer.validated_data['channel_id'] + channel_id = serializer.validated_data["channel_id"] channel = Channel.get_editable(request.user, channel_id) - tree_name = "{}_tree".format(serializer.validated_data['tree']) + tree_name = "{}_tree".format(serializer.validated_data["tree"]) tree_root = getattr(channel, tree_name, None) if "node_id" in serializer.validated_data: node = ( @@ -428,17 +502,26 @@ def get_node_tree_data(request): @api_view(["POST"]) -@authentication_classes((TokenAuthentication, SessionAuthentication,)) +@authentication_classes( + ( + TokenAuthentication, + SessionAuthentication, + ) +) @permission_classes((IsAuthenticated,)) def get_channel_status_bulk(request): """ Create the channel node """ data = json.loads(request.body) try: - channel_ids = data['channel_ids'] - permissioned_ids = set(Channel.filter_edit_queryset(Channel.objects.all(), request.user).filter(id__in=channel_ids).values_list("id", flat=True)) + channel_ids = data["channel_ids"] + permissioned_ids = set( + Channel.filter_edit_queryset(Channel.objects.all(), request.user) + .filter(id__in=channel_ids) + .values_list("id", flat=True) + ) if permissioned_ids != set(channel_ids): raise PermissionDenied() - statuses = {cid: get_status(cid) for cid in data['channel_ids']} + statuses = {cid: get_status(cid) for cid in data["channel_ids"]} return Response({"success": True, "statuses": statuses}) except (Channel.DoesNotExist, PermissionDenied): @@ -469,7 +552,9 @@ def get_status(channel_id): def create_channel(channel_data, user): """ Set up channel """ # Set up initial channel - channel, isNew = Channel.objects.get_or_create(id=channel_data["id"], actor_id=user.id) + channel, isNew = Channel.objects.get_or_create( + id=channel_data["id"], actor_id=user.id + ) # Add user as editor if channel is new if isNew: @@ -480,14 +565,14 @@ def create_channel(channel_data, user): except Channel.DoesNotExist: raise SuspiciousOperation("User is not authorized to edit this channel") - extra_fields = channel_data.get('extra_fields') or {} + extra_fields = channel_data.get("extra_fields") or {} if isinstance(extra_fields, str): extra_fields = json.loads(extra_fields) - extra_fields.update({'ricecooker_version': channel.ricecooker_version}) + extra_fields.update({"ricecooker_version": channel.ricecooker_version}) - channel.name = channel_data['name'] - channel.description = channel_data['description'] - channel.thumbnail = channel_data['thumbnail'] + channel.name = channel_data["name"] + channel.description = channel_data["description"] + channel.thumbnail = channel_data["thumbnail"] channel.deleted = False channel.source_id = channel_data.get("source_id") channel.source_domain = channel_data.get("source_domain") @@ -542,8 +627,8 @@ class IncompleteNodeError(Exception): """ def __init__(self, node, errors): - self.message = ( - "Node {} had the following errors: {}".format(node, ",".join(errors)) + self.message = "Node {} had the following errors: {}".format( + node, ",".join(errors) ) super(IncompleteNodeError, self).__init__(self.message) @@ -560,7 +645,9 @@ def add_tags(node, node_data): raise ValidationError("tag is greater than 30 characters") else: tags.append( - ContentTag.objects.get_or_create(tag_name=tag, channel=channel)[0] + ContentTag.objects.get_or_create(tag_name=tag, channel=channel)[ + 0 + ] ) if len(tags) > 0: @@ -577,7 +664,9 @@ def validate_metadata_labels(node_data): metadata_labels[label] = {} for value in node_data[label]: if value not in valid_values: - raise NodeValidationError("{} is not a valid value for {}".format(value, label)) + raise NodeValidationError( + "{} is not a valid value for {}".format(value, label) + ) metadata_labels[label][value] = True return metadata_labels @@ -596,13 +685,17 @@ def handle_remote_node(user, node_data, parent_node): raise NodeValidationError("Both source_node_id and source_content_id are None") try: - channel = Channel.filter_view_queryset(Channel.objects.all(), user).get(id=source_channel_id) + channel = Channel.filter_view_queryset(Channel.objects.all(), user).get( + id=source_channel_id + ) except Channel.DoesNotExist: raise NodeValidationError("source_channel_id does not exist") contentnode = None - channel_resource_nodes = channel.main_tree.get_descendants().exclude(kind=content_kinds.TOPIC) + channel_resource_nodes = channel.main_tree.get_descendants().exclude( + kind=content_kinds.TOPIC + ) if source_node_id: try: @@ -611,7 +704,9 @@ def handle_remote_node(user, node_data, parent_node): pass if contentnode is None and source_content_id: - contentnode = channel_resource_nodes.filter(content_id=source_content_id).first() + contentnode = channel_resource_nodes.filter( + content_id=source_content_id + ).first() if contentnode is None: raise NodeValidationError( @@ -630,7 +725,11 @@ def handle_remote_node(user, node_data, parent_node): ContentNode.filter_by_pk(pk=contentnode.id), user=user ).exists() - return contentnode.copy_to(target=parent_node, mods=node_data, can_edit_source_channel=can_edit_source_channel) + return contentnode.copy_to( + target=parent_node, + mods=node_data, + can_edit_source_channel=can_edit_source_channel, + ) @delay_user_storage_calculation @@ -640,7 +739,11 @@ def convert_data_to_nodes(user, content_data, parent_node): # noqa: C901 root_mapping = {} parent_node = ContentNode.objects.get(pk=parent_node) if parent_node.kind_id != content_kinds.TOPIC: - raise NodeValidationError("Parent node must be a topic/folder | actual={}".format(parent_node.kind_id)) + raise NodeValidationError( + "Parent node must be a topic/folder | actual={}".format( + parent_node.kind_id + ) + ) sort_order = parent_node.children.count() + 1 existing_node_ids = ContentNode.objects.filter( @@ -732,14 +835,18 @@ def create_node(node_data, parent_node, sort_order): # noqa: C901 # validate completion criteria if "options" in extra_fields and "completion_criteria" in extra_fields["options"]: try: - completion_criteria.validate(extra_fields["options"]["completion_criteria"], kind=node_data['kind']) + completion_criteria.validate( + extra_fields["options"]["completion_criteria"], kind=node_data["kind"] + ) except completion_criteria.ValidationError: - raise NodeValidationError("Node {} has invalid completion criteria".format(node_data["node_id"])) + raise NodeValidationError( + "Node {} has invalid completion criteria".format(node_data["node_id"]) + ) # Validate title and license fields - title = node_data.get('title', "") - license_description = node_data.get('license_description', "") - copyright_holder = node_data.get('copyright_holder', "") + title = node_data.get("title", "") + license_description = node_data.get("license_description", "") + copyright_holder = node_data.get("copyright_holder", "") metadata_labels = validate_metadata_labels(node_data) @@ -763,7 +870,7 @@ def create_node(node_data, parent_node, sort_order): # noqa: C901 source_domain=node_data.get("source_domain"), language_id=node_data.get("language"), freeze_authoring_data=True, - role_visibility=node_data.get('role') or roles.LEARNER, + role_visibility=node_data.get("role") or roles.LEARNER, # Assume it is complete to start with, we will do validation # later when we have all data available to determine if it is # complete or not. @@ -782,7 +889,9 @@ def create_exercises(user, node, data): # First check that all assessment_ids are unique within the node assessment_ids = [question.get("assessment_id") for question in data] if len(assessment_ids) != len(set(assessment_ids)): - raise NodeValidationError("Duplicate assessment_ids found in node {}".format(node.node_id)) + raise NodeValidationError( + "Duplicate assessment_ids found in node {}".format(node.node_id) + ) with transaction.atomic(): order = 0 diff --git a/contentcuration/contentcuration/views/json_dump.py b/contentcuration/contentcuration/views/json_dump.py index b3502e7a9e..0678420ecd 100644 --- a/contentcuration/contentcuration/views/json_dump.py +++ b/contentcuration/contentcuration/views/json_dump.py @@ -1,4 +1,5 @@ import json + from rest_framework.renderers import JSONRenderer """ diff --git a/contentcuration/contentcuration/views/nodes.py b/contentcuration/contentcuration/views/nodes.py index 635c7b00c1..bb5f153ae1 100644 --- a/contentcuration/contentcuration/views/nodes.py +++ b/contentcuration/contentcuration/views/nodes.py @@ -29,12 +29,14 @@ @api_view(["GET"]) @permission_classes((AllowAny,)) def get_channel_details(request, channel_id): - """ Generates data for channel contents. Used for look-inside previews - Keyword arguments: - channel_id (str): id of channel to get details from + """Generates data for channel contents. Used for look-inside previews + Keyword arguments: + channel_id (str): id of channel to get details from """ # Get nodes and channel - channel = get_object_or_404(Channel.filter_view_queryset(Channel.objects.all(), request.user), id=channel_id) + channel = get_object_or_404( + Channel.filter_view_queryset(Channel.objects.all(), request.user), id=channel_id + ) if not channel.main_tree: raise Http404 data = get_node_details_cached(request.user, channel.main_tree, channel) @@ -66,7 +68,9 @@ def get_node_details_cached(user, node, channel): # for the CTE select query. cte = With( ContentNode.objects.filter(tree_id=node.tree_id) - .values("id", "modified", "changed", "tree_id", "parent_id", "lft", "rght") + .values( + "id", "modified", "changed", "tree_id", "parent_id", "lft", "rght" + ) .order_by() ) last_update_qs = cte.queryset().with_cte(cte).filter(changed=True) @@ -82,13 +86,18 @@ def get_node_details_cached(user, node, channel): # Maintain that query should not 'include_self' last_update_qs = last_update_qs.filter(parent_id__isnull=False) - last_update = last_update_qs.aggregate(latest_update=Max("modified")).get("latest_update") + last_update = last_update_qs.aggregate(latest_update=Max("modified")).get( + "latest_update" + ) if last_update: last_cache_update = datetime.strptime( json.loads(cached_data)["last_update"], settings.DATE_TIME_FORMAT ) - if not user.is_anonymous and last_update.replace(tzinfo=None) > last_cache_update: + if ( + not user.is_anonymous + and last_update.replace(tzinfo=None) > last_cache_update + ): # update the stats async, then return the cached value getnodedetails_task.enqueue(user, node_id=node.pk) return json.loads(cached_data) @@ -102,7 +111,9 @@ def get_node_details_cached(user, node, channel): def get_node_diff(request, updated_id, original_id): try: # Get queryset to test permissions - nodes = ContentNode.filter_view_queryset(ContentNode.objects.all(), request.user) + nodes = ContentNode.filter_view_queryset( + ContentNode.objects.all(), request.user + ) updated = nodes.get(pk=updated_id) original = nodes.get(pk=original_id) @@ -111,14 +122,16 @@ def get_node_diff(request, updated_id, original_id): if data: return Response(data) - signature = generatenodediff_task.generate_signature(dict(updated_id=updated_id, original_id=original_id)) + signature = generatenodediff_task.generate_signature( + dict(updated_id=updated_id, original_id=original_id) + ) # See if there's already a staging task in progress if generatenodediff_task.find_incomplete_ids(signature).exists(): - return Response('Diff is being generated', status=status.HTTP_302_FOUND) + return Response("Diff is being generated", status=status.HTTP_302_FOUND) except ContentNode.DoesNotExist: pass - return Response('Diff is not available', status=status.HTTP_404_NOT_FOUND) + return Response("Diff is not available", status=status.HTTP_404_NOT_FOUND) @api_view(["POST"]) @@ -127,13 +140,17 @@ def get_node_diff(request, updated_id, original_id): def generate_node_diff(request, updated_id, original_id): try: # Get queryset to test permissions - nodes = ContentNode.filter_view_queryset(ContentNode.objects.all(), request.user).values("id") + nodes = ContentNode.filter_view_queryset( + ContentNode.objects.all(), request.user + ).values("id") nodes.get(pk=updated_id) nodes.get(pk=original_id) except ContentNode.DoesNotExist: - return Response('Diff is not available', status=status.HTTP_403_FORBIDDEN) + return Response("Diff is not available", status=status.HTTP_403_FORBIDDEN) # See if there's already a staging task in progress - generatenodediff_task.fetch_or_enqueue(request.user, updated_id=updated_id, original_id=original_id) - return Response('Diff is being generated') + generatenodediff_task.fetch_or_enqueue( + request.user, updated_id=updated_id, original_id=original_id + ) + return Response("Diff is being generated") diff --git a/contentcuration/contentcuration/views/pwa.py b/contentcuration/contentcuration/views/pwa.py index a150527d1b..ff7af3ce97 100644 --- a/contentcuration/contentcuration/views/pwa.py +++ b/contentcuration/contentcuration/views/pwa.py @@ -17,6 +17,7 @@ def get_context_data(self, **kwargs): content = None if getattr(settings, "DEBUG", False): import requests + try: request = requests.get("http://127.0.0.1:4000/dist/serviceWorker.js") content = request.content.decode("utf-8") diff --git a/contentcuration/contentcuration/views/settings.py b/contentcuration/contentcuration/views/settings.py index 9900e9e794..8f2444b158 100644 --- a/contentcuration/contentcuration/views/settings.py +++ b/contentcuration/contentcuration/views/settings.py @@ -47,7 +47,9 @@ @browser_is_supported def settings(request): current_user = current_user_for_context(request.user) - channel_query = request.user.editable_channels.filter(deleted=False).annotate(editor_count=Count("editors")) + channel_query = request.user.editable_channels.filter(deleted=False).annotate( + editor_count=Count("editors") + ) return render( request, @@ -65,7 +67,9 @@ def settings(request): @login_required @api_view(["GET"]) def export_user_data(request): - generateusercsv_task.enqueue(request.user, user_id=request.user.pk, language=get_language()) + generateusercsv_task.enqueue( + request.user, user_id=request.user.pk, language=get_language() + ) return HttpResponse({"success": True}) diff --git a/contentcuration/contentcuration/views/users.py b/contentcuration/contentcuration/views/users.py index ecf5d85ac3..66a6652d0b 100644 --- a/contentcuration/contentcuration/views/users.py +++ b/contentcuration/contentcuration/views/users.py @@ -71,7 +71,13 @@ def send_invitation_email(request): } # Need to break into two steps to avoid MultipleObjectsReturned error - invitation = Invitation.objects.filter(channel_id=channel_id, email=user_email, revoked=False, accepted=False, declined=False).first() + invitation = Invitation.objects.filter( + channel_id=channel_id, + email=user_email, + revoked=False, + accepted=False, + declined=False, + ).first() if not invitation: invitation = Invitation.objects.create(**fields) @@ -81,19 +87,23 @@ def send_invitation_email(request): invitation.sender = invitation.sender or request.user invitation.save() - ctx_dict = {'sender': request.user, - 'site': get_current_site(request), - 'user': recipient, - 'email': user_email, - 'first_name': recipient.first_name if recipient else user_email, - 'share_mode': share_mode, - 'channel_id': channel_id, - 'invitation_key': invitation.id, - 'channel': channel.name, - 'domain': "https://{}".format(Site.objects.get_current().domain), - } - subject = render_to_string('permissions/permissions_email_subject.txt', ctx_dict) - message = render_to_string('permissions/permissions_email.txt', ctx_dict) + ctx_dict = { + "sender": request.user, + "site": get_current_site(request), + "user": recipient, + "email": user_email, + "first_name": recipient.first_name if recipient else user_email, + "share_mode": share_mode, + "channel_id": channel_id, + "invitation_key": invitation.id, + "channel": channel.name, + "domain": "https://{}".format(Site.objects.get_current().domain), + } + subject = render_to_string( + "permissions/permissions_email_subject.txt", ctx_dict + ) + subject = "".join(subject.splitlines()) + message = render_to_string("permissions/permissions_email.txt", ctx_dict) send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [user_email]) except KeyError: return HttpResponseBadRequest( @@ -138,7 +148,9 @@ def login(request): return HttpResponseForbidden() # User is not activated if not user.is_active and user.check_password(password): - return HttpResponseBadRequest(status=405, reason="Account hasn't been activated") + return HttpResponseBadRequest( + status=405, reason="Account hasn't been activated" + ) user = authenticate(username=user.email, password=password) if user is not None: @@ -220,7 +232,7 @@ def get_user(self, username): return User.get_for_email(username, is_active=False) def activate(self, *args, **kwargs): - username = self.validate_key(kwargs.get('activation_key')) + username = self.validate_key(kwargs.get("activation_key")) if not username: return False @@ -228,7 +240,9 @@ def activate(self, *args, **kwargs): user = User.get_for_email(username) if user and user.is_active: if username != user.email: - logger.warning("Attempted to activate alternate-cased username with already active user") + logger.warning( + "Attempted to activate alternate-cased username with already active user" + ) return False return user @@ -240,6 +254,7 @@ def activate(self, *args, **kwargs): "registration/custom_email_subject.txt", {"subject": "New Kolibri Studio Registration"}, ) + subject = "".join(subject.splitlines()) message = render_to_string( "registration/registration_information_email.txt", {"user": user, "information": dict(user.information)}, @@ -251,16 +266,16 @@ def activate(self, *args, **kwargs): [settings.REGISTRATION_INFORMATION_EMAIL], ) # Send email to welcome new user - subject = render_to_string("registration/welcome_new_user_email_subject.txt") + subject = render_to_string( + "registration/welcome_new_user_email_subject.txt" + ) + subject = "".join(subject.splitlines()) message = render_to_string( "registration/welcome_new_user_email.html", - {"domain": "https://{}".format(Site.objects.get_current().domain)} + {"domain": "https://{}".format(Site.objects.get_current().domain)}, ) user.email_user( - subject, - message, - settings.DEFAULT_FROM_EMAIL, - html_message=message + subject, message, settings.DEFAULT_FROM_EMAIL, html_message=message ) return user @@ -329,7 +344,7 @@ def request_activation_link(request): ) data = json.loads(request.body) try: - user = User.get_for_email(data['email']) + user = User.get_for_email(data["email"]) if user and not user.is_active: registration_view = UserRegistrationView() registration_view.request = request diff --git a/contentcuration/contentcuration/views/zip.py b/contentcuration/contentcuration/views/zip.py index 7cac41ce35..48480bdff5 100644 --- a/contentcuration/contentcuration/views/zip.py +++ b/contentcuration/contentcuration/views/zip.py @@ -57,7 +57,7 @@ def parse_html(content): # always create head and body tags if they are missing. head = document.find("head") for file in get_files("htmlScreenshot", "js"): - SubElement(head, "script", attrib={"src": file['url']}) + SubElement(head, "script", attrib={"src": file["url"]}) # Currently, html5lib strips the doctype, but it's important for correct rendering, so check the original # content for the doctype and, if found, prepend it to the content serialized by html5lib doctype = None @@ -97,11 +97,11 @@ def parse_html(content): except html5lib.html5parser.ParseError: return content + # DISK PATHS class ZipContentView(View): - @xframe_options_exempt def options(self, request, *args, **kwargs): """ @@ -117,7 +117,9 @@ def get(self, request, zipped_filename, embedded_filepath): # noqa: C901 Handles GET requests and serves a static file from within the zip file. """ if not VALID_STORAGE_FILENAME.match(zipped_filename): - return HttpResponseNotFound("'{}' is not a valid URL for this zip file".format(zipped_filename)) + return HttpResponseNotFound( + "'{}' is not a valid URL for this zip file".format(zipped_filename) + ) storage = default_storage @@ -130,10 +132,12 @@ def get(self, request, zipped_filename, embedded_filepath): # noqa: C901 # if the zipfile does not exist on disk, return a 404 if not storage.exists(zipped_path): - return HttpResponseNotFound('"%(filename)s" does not exist in storage' % {'filename': zipped_path}) + return HttpResponseNotFound( + '"%(filename)s" does not exist in storage' % {"filename": zipped_path} + ) # if client has a cached version, use that (we can safely assume nothing has changed, due to MD5) - if request.META.get('HTTP_IF_MODIFIED_SINCE'): + if request.META.get("HTTP_IF_MODIFIED_SINCE"): return HttpResponseNotModified() zf_obj = storage.open(zipped_path) @@ -148,41 +152,63 @@ def get(self, request, zipped_filename, embedded_filepath): # noqa: C901 try: info = zf.getinfo(embedded_filepath) except KeyError: - return HttpResponseNotFound('"{}" does not exist inside "{}"'.format(embedded_filepath, zipped_filename)) + return HttpResponseNotFound( + '"{}" does not exist inside "{}"'.format( + embedded_filepath, zipped_filename + ) + ) # try to guess the MIME type of the embedded file being referenced - content_type = mimetypes.guess_type(embedded_filepath)[0] or 'application/octet-stream' + content_type = ( + mimetypes.guess_type(embedded_filepath)[0] + or "application/octet-stream" + ) - if embedded_filepath.endswith(".html") and request.GET.get("screenshot"): - content_type = 'text/html' + if embedded_filepath.endswith(".html") and request.GET.get( + "screenshot" + ): + content_type = "text/html" content = zf.open(info).read() - response = HttpResponse(parse_html(content), content_type=content_type) + response = HttpResponse( + parse_html(content), content_type=content_type + ) file_size = info.file_size - elif not os.path.splitext(embedded_filepath)[1] == '.json': + elif not os.path.splitext(embedded_filepath)[1] == ".json": # generate a streaming response object, pulling data from within the zip file response = FileResponse(zf.open(info), content_type=content_type) file_size = info.file_size else: # load the stream from json file into memory, replace the path_place_holder. content = zf.open(info).read() - str_to_be_replaced = ('$' + exercises.IMG_PLACEHOLDER).encode() - zipcontent = ('/' + request.resolver_match.url_name + "/" + zipped_filename).encode() + str_to_be_replaced = ("$" + exercises.IMG_PLACEHOLDER).encode() + zipcontent = ( + "/" + request.resolver_match.url_name + "/" + zipped_filename + ).encode() content_with_path = content.replace(str_to_be_replaced, zipcontent) - response = HttpResponse(content_with_path, content_type=content_type) + response = HttpResponse( + content_with_path, content_type=content_type + ) file_size = len(content_with_path) except zipfile.BadZipfile: - just_downloaded = getattr(zf_obj, 'just_downloaded', "Unknown (Most likely local file)") - capture_message("Unable to open zip file. File info: name={}, size={}, mode={}, just_downloaded={}".format( - zf_obj.name, zf_obj.size, zf_obj.mode, just_downloaded)) + just_downloaded = getattr( + zf_obj, "just_downloaded", "Unknown (Most likely local file)" + ) + capture_message( + "Unable to open zip file. File info: name={}, size={}, mode={}, just_downloaded={}".format( + zf_obj.name, zf_obj.size, zf_obj.mode, just_downloaded + ) + ) return HttpResponseServerError( "Attempt to open zip file failed. Please try again, and if you continue to receive this message, please check that the zip file is valid." ) # set the last-modified header to the date marked on the embedded file if info.date_time: - response["Last-Modified"] = http_date(time.mktime(datetime.datetime(*info.date_time).timetuple())) + response["Last-Modified"] = http_date( + time.mktime(datetime.datetime(*info.date_time).timetuple()) + ) # cache these resources forever; this is safe due to the MD5-naming used on content files response["Expires"] = "Sun, 17-Jan-2038 19:14:07 GMT" @@ -198,10 +224,14 @@ def get(self, request, zipped_filename, embedded_filepath): # noqa: C901 # restrict CSP to only allow resources to be loaded from the Studio host, to prevent info leakage # (e.g. via passing user info out as GET parameters to an attacker's server), or inadvertent data usage - host = request.build_absolute_uri('/').strip("/") - response["Content-Security-Policy"] = "default-src 'self' 'unsafe-inline' 'unsafe-eval' data: " + host + host = request.build_absolute_uri("/").strip("/") + response["Content-Security-Policy"] = ( + "default-src 'self' 'unsafe-inline' 'unsafe-eval' data: " + host + ) if getattr(settings, "DEBUG", False): - response["Content-Security-Policy"] += " http://127.0.0.1:4000 ws://127.0.0.1:4000" + response[ + "Content-Security-Policy" + ] += " http://127.0.0.1:4000 ws://127.0.0.1:4000" return response diff --git a/contentcuration/contentcuration/viewsets/assessmentitem.py b/contentcuration/contentcuration/viewsets/assessmentitem.py index 06996edd90..86a724a1d2 100644 --- a/contentcuration/contentcuration/viewsets/assessmentitem.py +++ b/contentcuration/contentcuration/viewsets/assessmentitem.py @@ -107,18 +107,18 @@ def validate_answers(self, value): answers = json.loads(value) for answer in answers: if not type(answer) is dict: - raise ValidationError('JSON Data Invalid for answers') - if not all(k in answer for k in ('answer', 'correct', 'order')): - raise ValidationError('Incorrect field in answers') + raise ValidationError("JSON Data Invalid for answers") + if not all(k in answer for k in ("answer", "correct", "order")): + raise ValidationError("Incorrect field in answers") return value def validate_hints(self, value): hints = json.loads(value) for hint in hints: if not type(hint) is dict: - raise ValidationError('JSON Data Invalid for hints') - if not all(k in hint for k in ('hint', 'order')): - raise ValidationError('Incorrect field in hints') + raise ValidationError("JSON Data Invalid for hints") + if not all(k in hint for k in ("hint", "order")): + raise ValidationError("Incorrect field in hints") return value def set_files(self, all_objects, all_validated_data=None): # noqa C901 @@ -131,15 +131,21 @@ def set_files(self, all_objects, all_validated_data=None): # noqa C901 # If this is an update operation, check the validated data for which items # have had these fields modified. md_fields_modified = { - self.id_value_lookup(ai) for ai in all_validated_data if "question" in ai or "hints" in ai or "answers" in ai + self.id_value_lookup(ai) + for ai in all_validated_data + if "question" in ai or "hints" in ai or "answers" in ai } else: # If this is a create operation, just check if these fields are not null. md_fields_modified = { - self.id_value_lookup(ai) for ai in all_objects if ai.question or ai.hints or ai.answers + self.id_value_lookup(ai) + for ai in all_objects + if ai.question or ai.hints or ai.answers } - all_objects = [ai for ai in all_objects if self.id_value_lookup(ai) in md_fields_modified] + all_objects = [ + ai for ai in all_objects if self.id_value_lookup(ai) in md_fields_modified + ] for file in File.objects.filter(assessment_item__in=all_objects): if file.assessment_item_id not in current_files_by_aitem: @@ -192,7 +198,7 @@ def set_files(self, all_objects, all_validated_data=None): # noqa C901 raise ValidationError( "Attempted to set files to an assessment item that do not have a file on the server" ) - File.objects.bulk_update(source_files, ['assessment_item']) + File.objects.bulk_update(source_files, ["assessment_item"]) def create(self, validated_data): with transaction.atomic(): diff --git a/contentcuration/contentcuration/viewsets/base.py b/contentcuration/contentcuration/viewsets/base.py index 161588a675..1627148fe4 100644 --- a/contentcuration/contentcuration/viewsets/base.py +++ b/contentcuration/contentcuration/viewsets/base.py @@ -89,7 +89,7 @@ def id_value_lookup(self, data): id_values = (self.get_value(data, attr) for attr in id_attr) # For the combined index, use any related objects' primary key - combined_index = (idx.pk if hasattr(idx, 'pk') else idx for idx in id_values) + combined_index = (idx.pk if hasattr(idx, "pk") else idx for idx in id_values) return tuple(combined_index) def set_id_values(self, data, obj): @@ -317,11 +317,14 @@ def update(self, queryset, all_validated_data): self.changes.extend(self.child.changes) if len(all_validated_data_by_id) != len(updated_keys): - self.missing_keys = set(all_validated_data_by_id.keys())\ - .difference(updated_keys) + self.missing_keys = set(all_validated_data_by_id.keys()).difference( + updated_keys + ) if len(properties_to_update) > 0: - self.child.Meta.model.objects.bulk_update(updated_objects, list(properties_to_update)) + self.child.Meta.model.objects.bulk_update( + updated_objects, list(properties_to_update) + ) return updated_objects @@ -366,7 +369,6 @@ def save(self, **kwargs): class ValuesViewsetOrderingFilter(OrderingFilter): - def get_default_valid_fields(self, queryset, view, context=None): """ The original implementation of this makes the assumption that the DRF serializer for the class @@ -397,7 +399,11 @@ def get_default_valid_fields(self, queryset, view, context=None): fk_ref = field.split("__")[0] # Check either if the field is a model field, a currently annotated annotation, or # is a foreign key lookup on an FK on this model. - if field in model_fields or field in queryset.query.annotations or fk_ref in model_fields: + if ( + field in model_fields + or field in queryset.query.annotations + or fk_ref in model_fields + ): # If the field is a mapped field, we store the field name as returned to the client # not the actual internal field - this will later be mapped when we come to do the ordering. if field in mapped_fields: @@ -414,7 +420,10 @@ def remove_invalid_fields(self, queryset, fields, view, request): """ # We filter the mapped fields to ones that do simple string mappings here, any functional maps are excluded. mapped_fields = {k: v for k, v in view.field_map.items() if isinstance(v, str)} - valid_fields = [item[0] for item in self.get_valid_fields(queryset, view, {'request': request})] + valid_fields = [ + item[0] + for item in self.get_valid_fields(queryset, view, {"request": request}) + ] ordering = [] for term in fields: if term.lstrip("-") in valid_fields: @@ -432,7 +441,6 @@ def remove_invalid_fields(self, queryset, fields, view, request): class RequiredFilterSet(FilterSet): - def __init__(self, required=False, **kwargs): self._required = required super().__init__(**kwargs) @@ -449,7 +457,9 @@ def qs(self): has_filtering_queries = True break if not has_filtering_queries and self.request.method == "GET": - raise MissingRequiredParamsException("No valid filter parameters supplied") + raise MissingRequiredParamsException( + "No valid filter parameters supplied" + ) return super(FilterSet, self).qs @@ -458,6 +468,7 @@ class RequiredFiltersFilterBackend(DjangoFilterBackend): Override the default filter backend to conditionalize initialization if we are using a RequiredFilterSet """ + def get_filterset(self, request, queryset, view): filterset_class = self.get_filterset_class(view, queryset) if filterset_class is None: @@ -481,6 +492,7 @@ class BaseValuesViewset(SimpleReprMixin, GenericViewSet): a single database query, rather than delegating serialization to a DRF ModelSerializer. """ + filter_backends = (RequiredFiltersFilterBackend, ValuesViewsetOrderingFilter) # A tuple of values to get from the queryset @@ -679,10 +691,7 @@ class ReadOnlyValuesViewset(BaseValuesViewset, RetrieveModelMixin, ListModelMixi class CreateModelMixin(object): def _map_create_change(self, change): - return dict( - list(change["obj"].items()) - + self.values_from_key(change["key"]) - ) + return dict(list(change["obj"].items()) + self.values_from_key(change["key"])) def perform_create(self, serializer, change=None): serializer.save() @@ -759,10 +768,7 @@ def destroy(self, request, *args, **kwargs): class UpdateModelMixin(object): def _map_update_change(self, change): - return dict( - list(change["mods"].items()) - + self.values_from_key(change["key"]) - ) + return dict(list(change["mods"].items()) + self.values_from_key(change["key"])) def perform_update(self, serializer): serializer.save() @@ -935,14 +941,20 @@ def delete_from_changes(self, changes): @contextmanager def create_change_tracker(pk, table, channel_id, user, task_name): - task_kwargs = json.dumps({'pk': pk, 'table': table}) + task_kwargs = json.dumps({"pk": pk, "table": table}) # Clean up any previous tasks specific to this in case there were failures. - signature = generate_task_signature(task_name, task_kwargs=task_kwargs, channel_id=channel_id) + signature = generate_task_signature( + task_name, task_kwargs=task_kwargs, channel_id=channel_id + ) - custom_task_metadata_qs = CustomTaskMetadata.objects.filter(channel_id=channel_id, signature=signature) + custom_task_metadata_qs = CustomTaskMetadata.objects.filter( + channel_id=channel_id, signature=signature + ) if custom_task_metadata_qs.exists(): - task_result_qs = TaskResult.objects.filter(task_id=custom_task_metadata_qs[0].task_id, task_name=task_name) + task_result_qs = TaskResult.objects.filter( + task_id=custom_task_metadata_qs[0].task_id, task_name=task_name + ) if task_result_qs.exists(): task_result_qs[0].delete() custom_task_metadata_qs[0].delete() @@ -955,10 +967,7 @@ def create_change_tracker(pk, table, channel_id, user, task_name): task_name=task_name, ) custom_task_metadata_object = CustomTaskMetadata.objects.create( - task_id=task_id, - channel_id=channel_id, - user=user, - signature=signature + task_id=task_id, channel_id=channel_id, user=user, signature=signature ) def update_progress(progress=None): @@ -968,7 +977,11 @@ def update_progress(progress=None): Change.create_change( # These changes are purely for ephemeral progress updating, and do not constitute a publishable change. - generate_update_event(pk, table, {TASK_ID: task_object.task_id}, channel_id=channel_id), applied=True, unpublishable=True + generate_update_event( + pk, table, {TASK_ID: task_object.task_id}, channel_id=channel_id + ), + applied=True, + unpublishable=True, ) tracker = ProgressTracker(task_id, update_progress) @@ -985,7 +998,11 @@ def update_progress(progress=None): # No error reported, cleanup. # Mark as unpublishable, as this is a continuation of the progress updating, and not a publishable change. Change.create_change( - generate_update_event(pk, table, {TASK_ID: None}, channel_id=channel_id), applied=True, unpublishable=True + generate_update_event( + pk, table, {TASK_ID: None}, channel_id=channel_id + ), + applied=True, + unpublishable=True, ) task_object.delete() custom_task_metadata_object.delete() diff --git a/contentcuration/contentcuration/viewsets/bookmark.py b/contentcuration/contentcuration/viewsets/bookmark.py index afa4fade9d..590eb9af97 100644 --- a/contentcuration/contentcuration/viewsets/bookmark.py +++ b/contentcuration/contentcuration/viewsets/bookmark.py @@ -21,9 +21,7 @@ class BookmarkSerializer(BulkModelSerializer): class Meta: model = Channel.bookmarked_by.through - fields = ( - "channel", - ) + fields = ("channel",) list_serializer_class = BulkListSerializer update_lookup_field = "channel" @@ -37,9 +35,7 @@ class BookmarkViewSet(ReadOnlyValuesViewset, CreateModelMixin, DestroyModelMixin permission_classes = [IsAuthenticated] serializer_class = BookmarkSerializer - values = ( - "channel_id", - ) + values = ("channel_id",) field_map = { "channel": "channel_id", diff --git a/contentcuration/contentcuration/viewsets/channel.py b/contentcuration/contentcuration/viewsets/channel.py index f4f51a48f4..c2462836d1 100644 --- a/contentcuration/contentcuration/viewsets/channel.py +++ b/contentcuration/contentcuration/viewsets/channel.py @@ -140,12 +140,19 @@ def filter_keywords(self, queryset, name, value): search_query = get_fts_search_query(value) dash_replaced_search_query = get_fts_search_query(value.replace("-", "")) - channel_keywords_query = (Exists(ChannelFullTextSearch.objects.filter( - Q(keywords_tsvector=search_query) | Q(keywords_tsvector=dash_replaced_search_query), - channel_id=OuterRef("id")))) - contentnode_search_query = (Exists(ContentNodeFullTextSearch.objects.filter( - Q(keywords_tsvector=search_query) | Q(author_tsvector=search_query), - channel_id=OuterRef("id")))) + channel_keywords_query = Exists( + ChannelFullTextSearch.objects.filter( + Q(keywords_tsvector=search_query) + | Q(keywords_tsvector=dash_replaced_search_query), + channel_id=OuterRef("id"), + ) + ) + contentnode_search_query = Exists( + ContentNodeFullTextSearch.objects.filter( + Q(keywords_tsvector=search_query) | Q(author_tsvector=search_query), + channel_id=OuterRef("id"), + ) + ) return queryset.filter(Q(channel_keywords_query) | Q(contentnode_search_query)) @@ -242,7 +249,11 @@ def filter_bookmark(self, queryset, name, value): class Meta: model = Channel - fields = base_channel_filter_fields + ("bookmark", "edit", "view",) + fields = base_channel_filter_fields + ( + "bookmark", + "edit", + "view", + ) class ThumbnailEncodingFieldsSerializer(JSONFieldDictSerializer): @@ -400,11 +411,14 @@ def _unpublished_changes_query(channel): channel_ref = OuterRef(channel) if isinstance(channel, OuterRef) else channel return Change.objects.filter( - server_rev__gt=Coalesce(Change.objects.filter( - channel=channel_ref, - change_type=PUBLISHED, - errored=False - ).values("server_rev").order_by("-server_rev")[:1], Value(0)), + server_rev__gt=Coalesce( + Change.objects.filter( + channel=channel_ref, change_type=PUBLISHED, errored=False + ) + .values("server_rev") + .order_by("-server_rev")[:1], + Value(0), + ), channel=channel, # Going forwards, these changes will be marked as unpublishable, # but leave these filters here for now for backwards compatibility @@ -436,8 +450,12 @@ def create(self, request, *args, **kwargs): return Response({"error": str(e)}, status=409) instance = serializer.instance Change.create_change( - generate_create_event(instance.id, CHANNEL, request.data, channel_id=instance.id), - applied=True, created_by_id=request.user.id) + generate_create_event( + instance.id, CHANNEL, request.data, channel_id=instance.id + ), + applied=True, + created_by_id=request.user.id, + ) return Response(self.serialize_object(pk=instance.pk), status=HTTP_201_CREATED) def destroy(self, request, *args, **kwargs): @@ -446,7 +464,9 @@ def destroy(self, request, *args, **kwargs): Change.create_change( generate_update_event( instance.id, CHANNEL, {"deleted": True}, channel_id=instance.id - ), applied=True, created_by_id=request.user.id + ), + applied=True, + created_by_id=request.user.id, ) return Response(status=HTTP_204_NO_CONTENT) @@ -491,7 +511,8 @@ def annotate_queryset(self, queryset): ) queryset = queryset.annotate( - unpublished_changes=Exists(_unpublished_changes_query(OuterRef("id")))) + unpublished_changes=Exists(_unpublished_changes_query(OuterRef("id"))) + ) return queryset @@ -501,8 +522,9 @@ def publish_from_changes(self, changes): # Publish change will have key, version_notes, and language. try: self.publish( - publish["key"], version_notes=publish.get("version_notes"), - language=publish.get("language") + publish["key"], + version_notes=publish.get("version_notes"), + language=publish.get("language"), ) except Exception as e: log_sync_exception(e, user=self.request.user, change=publish) @@ -522,8 +544,9 @@ def publish(self, pk, version_notes="", language=None): channel.mark_publishing(self.request.user) - with create_change_tracker(pk, CHANNEL, channel.id, self.request.user, - "export-channel") as progress_tracker: + with create_change_tracker( + pk, CHANNEL, channel.id, self.request.user, "export-channel" + ) as progress_tracker: try: channel = publish_channel( self.request.user.pk, @@ -531,33 +554,55 @@ def publish(self, pk, version_notes="", language=None): version_notes=version_notes, send_email=True, progress_tracker=progress_tracker, - language=language + language=language, + ) + Change.create_changes( + [ + generate_update_event( + channel.id, + CHANNEL, + { + "published": True, + "publishing": False, + "primary_token": channel.get_human_token().token, + "last_published": channel.last_published, + "unpublished_changes": _unpublished_changes_query( + channel + ).exists(), + }, + channel_id=channel.id, + ), + ], + applied=True, + unpublishable=True, ) - Change.create_changes([ - generate_update_event( - channel.id, CHANNEL, { - "published": True, - "publishing": False, - "primary_token": channel.get_human_token().token, - "last_published": channel.last_published, - "unpublished_changes": _unpublished_changes_query(channel).exists() - }, channel_id=channel.id - ), - ], applied=True, unpublishable=True) except ChannelIncompleteError: - Change.create_changes([ - generate_update_event( - channel.id, CHANNEL, {"publishing": False}, channel_id=channel.id - ), - ], applied=True, unpublishable=True) + Change.create_changes( + [ + generate_update_event( + channel.id, + CHANNEL, + {"publishing": False}, + channel_id=channel.id, + ), + ], + applied=True, + unpublishable=True, + ) raise ValidationError("Channel is not ready to be published") except Exception: - Change.create_changes([ - generate_update_event( - channel.id, CHANNEL, {"publishing": False, "unpublished_changes": True}, - channel_id=channel.id - ), - ], applied=True, unpublishable=True) + Change.create_changes( + [ + generate_update_event( + channel.id, + CHANNEL, + {"publishing": False, "unpublished_changes": True}, + channel_id=channel.id, + ), + ], + applied=True, + unpublishable=True, + ) raise def publish_next_from_changes(self, changes): @@ -584,8 +629,9 @@ def publish_next(self, pk): channel.staging_tree.publishing = True channel.staging_tree.save() - with create_change_tracker(pk, CHANNEL, channel.id, self.request.user, - "export-channel-staging-tree") as progress_tracker: + with create_change_tracker( + pk, CHANNEL, channel.id, self.request.user, "export-channel-staging-tree" + ) as progress_tracker: try: channel = publish_channel( self.request.user.pk, @@ -593,13 +639,19 @@ def publish_next(self, pk): progress_tracker=progress_tracker, use_staging_tree=True, ) - Change.create_changes([ - generate_update_event( - channel.id, CHANNEL, { - "primary_token": channel.get_human_token().token, - }, channel_id=channel.id - ), - ], applied=True) + Change.create_changes( + [ + generate_update_event( + channel.id, + CHANNEL, + { + "primary_token": channel.get_human_token().token, + }, + channel_id=channel.id, + ), + ], + applied=True, + ) except ChannelIncompleteError: channel.staging_tree.publishing = False channel.staging_tree.save() @@ -619,7 +671,7 @@ def sync_from_changes(self, changes): titles_and_descriptions=sync.get("titles_and_descriptions"), resource_details=sync.get("resource_details"), files=sync.get("files"), - assessment_items=sync.get("assessment_items") + assessment_items=sync.get("assessment_items"), ) except Exception as e: log_sync_exception(e, user=self.request.user, change=sync) @@ -627,8 +679,14 @@ def sync_from_changes(self, changes): errors.append(sync) return errors - def sync(self, pk, titles_and_descriptions=False, resource_details=False, files=False, - assessment_items=False): + def sync( + self, + pk, + titles_and_descriptions=False, + resource_details=False, + files=False, + assessment_items=False, + ): logging.debug("Entering the sync channel endpoint") channel = self.get_edit_queryset().get(pk=pk) @@ -638,19 +696,20 @@ def sync(self, pk, titles_and_descriptions=False, resource_details=False, files= if ( not channel.main_tree.get_descendants() - .filter( + .filter( Q(original_node__isnull=False) | Q( original_channel_id__isnull=False, original_source_node_id__isnull=False, ) ) - .exists() + .exists() ): raise ValidationError("Cannot sync a channel with no imported content") - with create_change_tracker(pk, CHANNEL, channel.id, self.request.user, - "sync-channel") as progress_tracker: + with create_change_tracker( + pk, CHANNEL, channel.id, self.request.user, "sync-channel" + ) as progress_tracker: sync_channel( channel, titles_and_descriptions, @@ -685,7 +744,9 @@ def deploy(self, user, pk): with models.ContentNode.objects.disable_mptt_updates(): garbage_node = get_deleted_chefs_root() channel.previous_tree.parent = garbage_node - channel.previous_tree.title = "Previous tree for channel {}".format(channel.pk) + channel.previous_tree.title = "Previous tree for channel {}".format( + channel.pk + ) channel.previous_tree.save() channel.previous_tree = channel.main_tree @@ -696,18 +757,26 @@ def deploy(self, user, pk): user.staged_files.all().delete() user.set_space_used() - models.Change.create_change(generate_update_event( - channel.id, - CHANNEL, - { - "root_id": channel.main_tree.id, - "staging_root_id": None - }, - channel_id=channel.id, - ), applied=True, created_by_id=user.id) - - @action(detail=True, methods=["get"], url_path='language_exists', url_name='language-exists') - def channel_language_exists(self, request, pk=None) -> Union[JsonResponse, HttpResponse]: + models.Change.create_change( + generate_update_event( + channel.id, + CHANNEL, + {"root_id": channel.main_tree.id, "staging_root_id": None}, + channel_id=channel.id, + ), + applied=True, + created_by_id=user.id, + ) + + @action( + detail=True, + methods=["get"], + url_path="language_exists", + url_name="language-exists", + ) + def channel_language_exists( + self, request, pk=None + ) -> Union[JsonResponse, HttpResponse]: """ Verify that the language set for a channel is present in at least one of its resources. @@ -725,8 +794,10 @@ def channel_language_exists(self, request, pk=None) -> Union[JsonResponse, HttpR return JsonResponse({"exists": lang_exists}) - @action(detail=True, methods=["get"], url_path='languages', url_name='languages') - def get_languages_in_channel(self, request, pk=None) -> Union[JsonResponse, HttpResponse]: + @action(detail=True, methods=["get"], url_path="languages", url_name="languages") + def get_languages_in_channel( + self, request, pk=None + ) -> Union[JsonResponse, HttpResponse]: """ Get all the languages present in a channel's resources. @@ -766,8 +837,11 @@ def _get_channel_details(self, channel_id) -> Dict[str, any]: :rtype: str """ try: - channel_details = (Channel.objects.filter(pk=channel_id) - .values("language_id", "main_tree_id").first()) + channel_details = ( + Channel.objects.filter(pk=channel_id) + .values("language_id", "main_tree_id") + .first() + ) except Channel.DoesNotExist as e: logging.error(str(e)) channel_details = None @@ -777,7 +851,9 @@ def _get_channel_details(self, channel_id) -> Dict[str, any]: return channel_details - def _get_channel_content_languages(self, channel_id, main_tree_id=None) -> List[str]: + def _get_channel_content_languages( + self, channel_id, main_tree_id=None + ) -> List[str]: """ Get all the languages used in a channel's resources. @@ -815,12 +891,10 @@ def _get_channel_content_languages(self, channel_id, main_tree_id=None) -> List[ .values("id", "language_id") .order_by() ) - qs = cte.queryset().with_cte(cte).filter( - language_id__isnull=False - ) + qs = cte.queryset().with_cte(cte).filter(language_id__isnull=False) if main_tree_id: qs = qs.exclude(id=main_tree_id) - lang_ids = qs.values_list('language_id', flat=True).distinct() + lang_ids = qs.values_list("language_id", flat=True).distinct() unique_lang_ids = list(set(lang_ids)) except Exception as e: logging.error(str(e)) @@ -893,17 +967,18 @@ def annotate_queryset(self, queryset): class AdminChannelFilter(BaseChannelFilter): def filter_keywords(self, queryset, name, value): keywords = value.split(" ") - editors_first_name = reduce(or_, (Q(editors__first_name__icontains=k) for k in keywords)) - editors_last_name = reduce(or_, (Q(editors__last_name__icontains=k) for k in keywords)) + editors_first_name = reduce( + or_, (Q(editors__first_name__icontains=k) for k in keywords) + ) + editors_last_name = reduce( + or_, (Q(editors__last_name__icontains=k) for k in keywords) + ) editors_email = reduce(or_, (Q(editors__email__icontains=k) for k in keywords)) - return queryset.annotate(primary_token=primary_token_subquery, ).filter( + return queryset.annotate(primary_token=primary_token_subquery,).filter( Q(name__icontains=value) | Q(pk__istartswith=value) | Q(primary_token=value.replace("-", "")) - | ( - editors_first_name - & editors_last_name - ) + | (editors_first_name & editors_last_name) | editors_email ) @@ -973,8 +1048,12 @@ def update(self, request, *args, **kwargs): self.perform_update(serializer) Change.create_change( - generate_update_event(instance.id, CHANNEL, request.data, channel_id=instance.id), - applied=True, created_by_id=request.user.id) + generate_update_event( + instance.id, CHANNEL, request.data, channel_id=instance.id + ), + applied=True, + created_by_id=request.user.id, + ) return Response(self.serialize_object()) diff --git a/contentcuration/contentcuration/viewsets/channelset.py b/contentcuration/contentcuration/viewsets/channelset.py index 90afb1ed05..8cca678a0d 100644 --- a/contentcuration/contentcuration/viewsets/channelset.py +++ b/contentcuration/contentcuration/viewsets/channelset.py @@ -21,7 +21,10 @@ class ChannelSetSerializer(BulkModelSerializer): channels = UserFilteredPrimaryKeyRelatedField( - many=True, queryset=Channel.objects.all(), edit=False, required=False, + many=True, + queryset=Channel.objects.all(), + edit=False, + required=False, ) def create(self, validated_data): @@ -35,7 +38,10 @@ def create(self, validated_data): instance.editors.add(user) self.changes.append( generate_update_event( - instance.id, CHANNELSET, {"secret_token": instance.secret_token.token}, user_id=user.id + instance.id, + CHANNELSET, + {"secret_token": instance.secret_token.token}, + user_id=user.id, ) ) return instance @@ -78,7 +84,11 @@ class ChannelSetViewSet(ValuesViewset, RESTCreateModelMixin): def get_queryset(self): queryset = super(ChannelSetViewSet, self).get_queryset() user_id = not self.request.user.is_anonymous and self.request.user.id - edit = Exists(User.channel_sets.through.objects.filter(user_id=user_id, channelset_id=OuterRef("id"))) + edit = Exists( + User.channel_sets.through.objects.filter( + user_id=user_id, channelset_id=OuterRef("id") + ) + ) queryset = queryset.annotate(edit=edit).filter(edit=True) return queryset diff --git a/contentcuration/contentcuration/viewsets/common.py b/contentcuration/contentcuration/viewsets/common.py index 7298c99d70..cfb4925d19 100644 --- a/contentcuration/contentcuration/viewsets/common.py +++ b/contentcuration/contentcuration/viewsets/common.py @@ -92,7 +92,9 @@ class SQArrayAgg(AggregateSubquery): class SQRelatedArrayAgg(SQArrayAgg): # For cases where fields are in a related table, for example language__native_name - template = "(SELECT ARRAY_AGG(%(fieldname)s::text) FROM (%(subquery)s) AS %(field)s__sum)" + template = ( + "(SELECT ARRAY_AGG(%(fieldname)s::text) FROM (%(subquery)s) AS %(field)s__sum)" + ) class SQJSONBKeyArrayAgg(AggregateSubquery): @@ -100,10 +102,9 @@ class SQJSONBKeyArrayAgg(AggregateSubquery): An aggregate subquery to get all the distinct keys of a JSON field that contains maps to store e.g. metadata labels. """ + # Include ALIAS at the end to support Postgres - template = ( - "(SELECT ARRAY_AGG(f) FROM (SELECT DISTINCT jsonb_object_keys(%(field)s) AS f FROM (%(subquery)s) AS x) AS %(field)s__sum)" - ) + template = "(SELECT ARRAY_AGG(f) FROM (SELECT DISTINCT jsonb_object_keys(%(field)s) AS f FROM (%(subquery)s) AS x) AS %(field)s__sum)" output_field = ArrayField(CharField()) @@ -159,7 +160,9 @@ def get_value(self, dictionary): # with the value of the child field. # N.B. the get_value method expects a dictionary that references the field's name # not just the value. - nested_value = fields[keys[0]].get_value({keys[0]: {keys[1]: html_value[key]}}) + nested_value = fields[keys[0]].get_value( + {keys[0]: {keys[1]: html_value[key]}} + ) if keys[0] not in value: value[keys[0]] = {} value[keys[0]].update(nested_value) @@ -189,7 +192,9 @@ def update(self, instance, validated_data): elif hasattr(self.fields[key], "update"): # If the nested field has an update method (e.g. a nested serializer), # call the update value so that we can do any recursive updates - instance[key] = self.fields[key].update(instance.get(key, {}), validated_data[key]) + instance[key] = self.fields[key].update( + instance.get(key, {}), validated_data[key] + ) else: # Otherwise, just update the value instance[key] = validated_data[key] diff --git a/contentcuration/contentcuration/viewsets/contentnode.py b/contentcuration/contentcuration/viewsets/contentnode.py index 49f2cf3e35..02aea2ffd3 100644 --- a/contentcuration/contentcuration/viewsets/contentnode.py +++ b/contentcuration/contentcuration/viewsets/contentnode.py @@ -40,7 +40,9 @@ from rest_framework.serializers import Field from rest_framework.serializers import ValidationError -from contentcuration.constants import completion_criteria as completion_criteria_validator +from contentcuration.constants import ( + completion_criteria as completion_criteria_validator, +) from contentcuration.db.models.expressions import IsNull from contentcuration.db.models.query import RIGHT_JOIN from contentcuration.db.models.query import With @@ -139,10 +141,7 @@ def filter__node_id_channel_id(self, queryset, name, value): return queryset.filter(query) -tags_values_cte_fields = { - 'tag': models.CharField(), - 'node_id': UUIDField() -} +tags_values_cte_fields = {"tag": models.CharField(), "node_id": UUIDField()} def set_tags(tags_by_id): @@ -155,7 +154,7 @@ def set_tags(tags_by_id): tag_tuples.append((tag_name, target_node_id)) # create CTE that holds the tag_tuples data - values_cte = WithValues(tags_values_cte_fields, tag_tuples, name='values_cte') + values_cte = WithValues(tags_values_cte_fields, tag_tuples, name="values_cte") # create another CTE which will RIGHT join against the tag table, so we get all of our # tag_tuple data back, plus the tag_id if it exists. Ideally we wouldn't normally use a RIGHT @@ -166,11 +165,11 @@ def set_tags(tags_by_id): .annotate( tag=values_cte.col.tag, node_id=values_cte.col.node_id, - tag_id=F('id'), + tag_id=F("id"), ) - .values('tag', 'node_id', 'tag_id') + .values("tag", "node_id", "tag_id") ) - tags_cte = With(tags_qs, name='tags_cte') + tags_cte = With(tags_qs, name="tags_cte") # the final query, we RIGHT join against the tag relation table so we get the tag_tuple back # again, plus the tag_id from the previous CTE, plus annotate a boolean of whether @@ -180,7 +179,7 @@ def set_tags(tags_by_id): CTEQuerySet(model=ContentNode.tags.through), contenttag_id=tags_cte.col.tag_id, contentnode_id=tags_cte.col.node_id, - _join_type=RIGHT_JOIN + _join_type=RIGHT_JOIN, ) .with_cte(values_cte) .with_cte(tags_cte) @@ -188,9 +187,9 @@ def set_tags(tags_by_id): tag_name=tags_cte.col.tag, node_id=tags_cte.col.node_id, tag_id=tags_cte.col.tag_id, - has_relation=IsNull('contentnode_id', negate=True) + has_relation=IsNull("contentnode_id", negate=True), ) - .values('tag_name', 'node_id', 'tag_id', 'has_relation') + .values("tag_name", "node_id", "tag_id", "has_relation") ) created_tags = {} @@ -209,7 +208,9 @@ def set_tags(tags_by_id): if tag_name in created_tags: tag_id = created_tags[tag_name] else: - tag, _ = ContentTag.objects.get_or_create(tag_name=tag_name, channel_id=None) + tag, _ = ContentTag.objects.get_or_create( + tag_name=tag_name, channel_id=None + ) tag_id = tag.pk created_tags.update({tag_name: tag_id}) @@ -284,11 +285,17 @@ class CompletionCriteriaSerializer(JSONFieldDictSerializer): def update(self, instance, validated_data): validated_data = validate_and_conform_to_schema_threshold_none(validated_data) - return super(CompletionCriteriaSerializer, self).update(instance, validated_data) + return super(CompletionCriteriaSerializer, self).update( + instance, validated_data + ) class ExtraFieldsOptionsSerializer(JSONFieldDictSerializer): - modality = ChoiceField(choices=(("QUIZ", "Quiz"), ("SURVEY", "Survey")), allow_null=True, required=False) + modality = ChoiceField( + choices=(("QUIZ", "Quiz"), ("SURVEY", "Survey")), + allow_null=True, + required=False, + ) completion_criteria = CompletionCriteriaSerializer(required=False) @@ -302,7 +309,11 @@ class InheritedMetadataSerializer(JSONFieldDictSerializer): class ExtraFieldsSerializer(JSONFieldDictSerializer): randomize = BooleanField() options = ExtraFieldsOptionsSerializer(required=False) - suggested_duration_type = ChoiceField(choices=[completion_criteria.TIME, completion_criteria.APPROX_TIME], allow_null=True, required=False) + suggested_duration_type = ChoiceField( + choices=[completion_criteria.TIME, completion_criteria.APPROX_TIME], + allow_null=True, + required=False, + ) inherited_metadata = InheritedMetadataSerializer(required=False) def update(self, instance, validated_data): @@ -334,7 +345,9 @@ def __init__(self, choices, *args, **kwargs): def get_fields(self): fields = {} for label_id, label_name in self.choices: - field = MetadataLabelBooleanField(required=False, label=label_name, allow_null=True) + field = MetadataLabelBooleanField( + required=False, label=label_name, allow_null=True + ) fields[label_id] = field return fields @@ -356,8 +369,12 @@ class ContentNodeSerializer(BulkModelSerializer): # Fields for metadata labels grade_levels = MetadataLabelsField(levels.choices, required=False) resource_types = MetadataLabelsField(resource_type.choices, required=False) - learning_activities = MetadataLabelsField(learning_activities.choices, required=False) - accessibility_labels = MetadataLabelsField(accessibility_categories.choices, required=False) + learning_activities = MetadataLabelsField( + learning_activities.choices, required=False + ) + accessibility_labels = MetadataLabelsField( + accessibility_categories.choices, required=False + ) categories = MetadataLabelsField(subjects.choices, required=False) learner_needs = MetadataLabelsField(needs.choices, required=False) @@ -419,12 +436,18 @@ def validate(self, data): return data def _check_completion_criteria(self, kind, complete, validated_data): - completion_criteria = validated_data.get("extra_fields", {}).get("options", {}).get("completion_criteria", {}) + completion_criteria = ( + validated_data.get("extra_fields", {}) + .get("options", {}) + .get("completion_criteria", {}) + ) try: if complete: completion_criteria_validator.validate(completion_criteria, kind) else: - completion_criteria_validator.check_model_for_kind(completion_criteria, kind) + completion_criteria_validator.check_model_for_kind( + completion_criteria, kind + ) except DjangoValidationError as e: raise ValidationError(e) @@ -442,8 +465,13 @@ def _ensure_complete(self, instance): user_id = self.context["request"].user.id Change.create_change( generate_update_event( - instance.id, CONTENTNODE, {"complete": False}, channel_id=instance.get_channel_id() - ), created_by_id=user_id, applied=True + instance.id, + CONTENTNODE, + {"complete": False}, + channel_id=instance.get_channel_id(), + ), + created_by_id=user_id, + applied=True, ) def create(self, validated_data): @@ -451,7 +479,11 @@ def create(self, validated_data): if "tags" in validated_data: tags = validated_data.pop("tags") - self._check_completion_criteria(validated_data.get("kind"), validated_data.get("complete", False), validated_data) + self._check_completion_criteria( + validated_data.get("kind"), + validated_data.get("complete", False), + validated_data, + ) instance = super(ContentNodeSerializer, self).create(validated_data) @@ -473,7 +505,11 @@ def update(self, instance, validated_data): tags = validated_data.pop("tags") set_tags({instance.id: tags}) - self._check_completion_criteria(validated_data.get("kind", instance.kind_id), validated_data.get("complete", instance.complete), validated_data) + self._check_completion_criteria( + validated_data.get("kind", instance.kind_id), + validated_data.get("complete", instance.complete), + validated_data, + ) instance = super(ContentNodeSerializer, self).update(instance, validated_data) @@ -675,6 +711,7 @@ class ContentNodePagination(ValuesViewsetCursorPagination): that will guarantee membership to a specific MPTT tree, such as parent or tree_id, the pagination scheme will not be predictable. """ + cursor_query_param = "lft__gt" ordering = "lft" page_size_query_param = "max_results" @@ -692,7 +729,9 @@ def decode_cursor(self, request): try: value = int(value) except ValueError: - raise ValidationError("lft must be an integer but an invalid value was given.") + raise ValidationError( + "lft must be an integer but an invalid value was given." + ) return Cursor(offset=0, reverse=False, position=value) @@ -700,16 +739,20 @@ def encode_cursor(self, cursor): """ Given a Cursor instance, return an url with query parameter. """ - return replace_query_param(self.base_url, self.cursor_query_param, str(cursor.position)) + return replace_query_param( + self.base_url, self.cursor_query_param, str(cursor.position) + ) def get_more(self): position, offset = self._get_more_position_offset() if position is None and offset is None: return None params = self.request.query_params.copy() - params.update({ - self.cursor_query_param: position, - }) + params.update( + { + self.cursor_query_param: position, + } + ) return params @@ -782,7 +825,9 @@ class ContentNodeViewSet(BulkUpdateMixin, ValuesViewset): "grade_levels": partial(dict_if_none, field_name="grade_levels"), "resource_types": partial(dict_if_none, field_name="resource_types"), "learning_activities": partial(dict_if_none, field_name="learning_activities"), - "accessibility_labels": partial(dict_if_none, field_name="accessibility_labels"), + "accessibility_labels": partial( + dict_if_none, field_name="accessibility_labels" + ), "categories": partial(dict_if_none, field_name="categories"), "learner_needs": partial(dict_if_none, field_name="learner_needs"), "extra_fields": consolidate_extra_fields, @@ -852,12 +897,16 @@ def size(self, request, pk=None): # We don't really need more than one queued async calculation task, so we use # fetch_or_enqueue to ensure a task is queued, as well as return info about it task_args = dict(node_id=node.pk, channel_id=node.channel_id) - calculate_resource_size_task.fetch_or_enqueue(self.request.user, **task_args) + calculate_resource_size_task.fetch_or_enqueue( + self.request.user, **task_args + ) - return Response({ - "size": size, - "stale": stale, - }) + return Response( + { + "size": size, + "stale": stale, + } + ) def annotate_queryset(self, queryset): queryset = queryset.annotate(total_count=(F("rght") - F("lft") - 1) / 2) @@ -919,7 +968,8 @@ def annotate_queryset(self, queryset): queryset = queryset.annotate( resource_count=SQCount(descendant_resources, field="id"), coach_count=SQCount( - descendant_resources.filter(role_visibility=roles.COACH), field="id", + descendant_resources.filter(role_visibility=roles.COACH), + field="id", ), assessment_item_count=SQCount(assessment_items, field="assessment_id"), error_count=SQCount(descendant_errors, field="id"), @@ -1037,7 +1087,9 @@ def copy( ContentNode.filter_by_pk(pk=source.id), user=self.request.user ).exists() - with create_change_tracker(pk, CONTENTNODE, channel_id, self.request.user, "copy_nodes") as progress_tracker: + with create_change_tracker( + pk, CONTENTNODE, channel_id, self.request.user, "copy_nodes" + ) as progress_tracker: new_node = source.copy_to( target, position, @@ -1052,8 +1104,11 @@ def copy( generate_update_event( pk, CONTENTNODE, - {COPYING_STATUS: COPYING_STATUS_VALUES.SUCCESS, "node_id": new_node.node_id}, - channel_id=channel_id + { + COPYING_STATUS: COPYING_STATUS_VALUES.SUCCESS, + "node_id": new_node.node_id, + }, + channel_id=channel_id, ), applied=True, created_by_id=self.request.user.id, @@ -1074,7 +1129,7 @@ def perform_create(self, serializer, change=None): channel_id=change["channel_id"], ), created_by_id=change["created_by_id"], - applied=True + applied=True, ) def update_descendants(self, pk, mods): @@ -1084,9 +1139,13 @@ def update_descendants(self, pk, mods): if root.kind_id != content_kinds.TOPIC: raise ValidationError("Only topics can have descendants to update") - descendantsIds = root.get_descendants(include_self=True).values_list("id", flat=True) + descendantsIds = root.get_descendants(include_self=True).values_list( + "id", flat=True + ) - changes = [{"key": descendantId, "mods": mods} for descendantId in descendantsIds] + changes = [ + {"key": descendantId, "mods": mods} for descendantId in descendantsIds + ] # Bulk update return self.update_from_changes(changes) @@ -1101,5 +1160,4 @@ def update_descendants_from_changes(self, changes): log_sync_exception(e, user=self.request.user, change=change) change["errors"] = [str(e)] errors.append(change) - print("errorsv", errors) return errors diff --git a/contentcuration/contentcuration/viewsets/feedback.py b/contentcuration/contentcuration/viewsets/feedback.py index 3ee1db6e94..600d6944d1 100644 --- a/contentcuration/contentcuration/viewsets/feedback.py +++ b/contentcuration/contentcuration/viewsets/feedback.py @@ -12,12 +12,12 @@ class IsAdminForListAndDestroy(permissions.BasePermission): def _check_admin_or_feature_flag(self, request, view): # only allow list and destroy of flagged content to admins - if view.action in ['list', 'destroy', 'retrieve']: + if view.action in ["list", "destroy", "retrieve"]: try: return request.user and request.user.is_admin except AttributeError: return False - if request.user.check_feature_flag('test_dev_feature'): + if request.user.check_feature_flag("test_dev_feature"): return True return False @@ -30,50 +30,70 @@ def has_object_permission(self, request, view, obj): class BaseFeedbackSerializer(serializers.ModelSerializer): class Meta: - fields = ['id', 'context', 'contentnode_id', 'content_id'] - read_only_fields = ['id'] + fields = ["id", "context", "contentnode_id", "content_id"] + read_only_fields = ["id"] class BaseFeedbackEventSerializer(serializers.ModelSerializer): class Meta: - fields = ['user', 'target_channel_id'] - read_only_fields = ['user'] + fields = ["user", "target_channel_id"] + read_only_fields = ["user"] class BaseFeedbackInteractionEventSerializer(serializers.ModelSerializer): class Meta: - fields = ['feedback_type', 'feedback_reason'] + fields = ["feedback_type", "feedback_reason"] -class FlagFeedbackEventSerializer(BaseFeedbackSerializer, BaseFeedbackEventSerializer, BaseFeedbackInteractionEventSerializer): +class FlagFeedbackEventSerializer( + BaseFeedbackSerializer, + BaseFeedbackEventSerializer, + BaseFeedbackInteractionEventSerializer, +): class Meta: model = FlagFeedbackEvent - fields = BaseFeedbackSerializer.Meta.fields + BaseFeedbackEventSerializer.Meta.fields + BaseFeedbackInteractionEventSerializer.Meta.fields + fields = ( + BaseFeedbackSerializer.Meta.fields + + BaseFeedbackEventSerializer.Meta.fields + + BaseFeedbackInteractionEventSerializer.Meta.fields + ) -class RecommendationsInteractionEventSerializer(BaseFeedbackSerializer, BaseFeedbackInteractionEventSerializer): +class RecommendationsInteractionEventSerializer( + BaseFeedbackSerializer, BaseFeedbackInteractionEventSerializer +): recommendation_event_id = serializers.UUIDField() class Meta: model = RecommendationsInteractionEvent - fields = BaseFeedbackSerializer.Meta.fields + BaseFeedbackInteractionEventSerializer.Meta.fields + ['recommendation_event_id'] + fields = ( + BaseFeedbackSerializer.Meta.fields + + BaseFeedbackInteractionEventSerializer.Meta.fields + + ["recommendation_event_id"] + ) -class RecommendationsEventSerializer(BaseFeedbackSerializer, BaseFeedbackEventSerializer): +class RecommendationsEventSerializer( + BaseFeedbackSerializer, BaseFeedbackEventSerializer +): content = serializers.JSONField(default=list) time_hidden = serializers.DateTimeField(required=False, read_only=True) class Meta: model = RecommendationsEvent - fields = BaseFeedbackSerializer.Meta.fields + BaseFeedbackEventSerializer.Meta.fields + ['content', 'time_hidden'] + fields = ( + BaseFeedbackSerializer.Meta.fields + + BaseFeedbackEventSerializer.Meta.fields + + ["content", "time_hidden"] + ) def create(self, validated_data): - validated_data.pop('time_hidden', None) + validated_data.pop("time_hidden", None) return super().create(validated_data) def update(self, instance, validated_data): - if 'time_hidden' in validated_data: - validated_data['time_hidden'] = timezone.now() + if "time_hidden" in validated_data: + validated_data["time_hidden"] = timezone.now() return super().update(instance, validated_data) @@ -81,14 +101,14 @@ class RecommendationsInteractionEventViewSet(viewsets.ModelViewSet): # TODO: decide export procedure queryset = RecommendationsInteractionEvent.objects.all() serializer_class = RecommendationsInteractionEventSerializer - http_method_names = ['post', 'put', 'patch'] + http_method_names = ["post", "put", "patch"] class RecommendationsEventViewSet(viewsets.ModelViewSet): # TODO: decide export procedure queryset = RecommendationsEvent.objects.all() serializer_class = RecommendationsEventSerializer - http_method_names = ['post', 'put', 'patch'] + http_method_names = ["post", "put", "patch"] class FlagFeedbackEventViewSet(viewsets.ModelViewSet): diff --git a/contentcuration/contentcuration/viewsets/file.py b/contentcuration/contentcuration/viewsets/file.py index f73f0557f2..2ccef64244 100644 --- a/contentcuration/contentcuration/viewsets/file.py +++ b/contentcuration/contentcuration/viewsets/file.py @@ -53,8 +53,9 @@ class FileUploadURLSerializer(serializers.Serializer): Optional: - duration: a number that will be floored to an integer and must be > 0 """ + size = serializers.FloatField(required=True) - checksum = serializers.RegexField(regex=r'^[0-9a-f]{32}$', required=True) + checksum = serializers.RegexField(regex=r"^[0-9a-f]{32}$", required=True) name = serializers.CharField(required=True) file_format = serializers.ChoiceField(choices=file_formats.choices, required=True) preset = serializers.ChoiceField(choices=format_presets.choices, required=True) @@ -65,13 +66,21 @@ def validate_duration(self, value): return None floored = math.floor(value) if floored <= 0: - raise serializers.ValidationError("File duration is equal to or less than 0") + raise serializers.ValidationError( + "File duration is equal to or less than 0" + ) return floored def validate(self, attrs): - if attrs["file_format"] in {file_formats.MP4, file_formats.WEBM, file_formats.MP3}: + if attrs["file_format"] in { + file_formats.MP4, + file_formats.WEBM, + file_formats.MP3, + }: if "duration" not in attrs or attrs["duration"] is None: - raise serializers.ValidationError("Duration is required for audio/video files") + raise serializers.ValidationError( + "Duration is required for audio/video files" + ) return attrs diff --git a/contentcuration/contentcuration/viewsets/invitation.py b/contentcuration/contentcuration/viewsets/invitation.py index 81b1e5c680..7d8ff577f6 100644 --- a/contentcuration/contentcuration/viewsets/invitation.py +++ b/contentcuration/contentcuration/viewsets/invitation.py @@ -145,8 +145,13 @@ def accept(self, request, pk=None): invitation.save() Change.create_change( generate_update_event( - invitation.id, INVITATION, {"accepted": True}, channel_id=invitation.channel_id - ), applied=True, created_by_id=request.user.id + invitation.id, + INVITATION, + {"accepted": True}, + channel_id=invitation.channel_id, + ), + applied=True, + created_by_id=request.user.id, ) return Response({"status": "success"}) @@ -157,7 +162,12 @@ def decline(self, request, pk=None): invitation.save() Change.create_change( generate_update_event( - invitation.id, INVITATION, {"declined": True}, channel_id=invitation.channel_id - ), applied=True, created_by_id=request.user.id + invitation.id, + INVITATION, + {"declined": True}, + channel_id=invitation.channel_id, + ), + applied=True, + created_by_id=request.user.id, ) return Response({"status": "success"}) diff --git a/contentcuration/contentcuration/viewsets/recommendation.py b/contentcuration/contentcuration/viewsets/recommendation.py index 8b87294ddf..29483a70e1 100644 --- a/contentcuration/contentcuration/viewsets/recommendation.py +++ b/contentcuration/contentcuration/viewsets/recommendation.py @@ -27,24 +27,34 @@ def post(self, request): try: request_data = request.data # Remove and store override_threshold as it isn't defined in the schema - override_threshold = request_data.pop('override_threshold', False) + override_threshold = request_data.pop("override_threshold", False) embed_topics_request.validate(request_data) except jsonschema.ValidationError as e: logger.error("Schema validation error: %s", str(e)) - return JsonResponse({"error": "Invalid request data. Please check the required fields."}, status=HTTPStatus.BAD_REQUEST) + return JsonResponse( + {"error": "Invalid request data. Please check the required fields."}, + status=HTTPStatus.BAD_REQUEST, + ) try: - recommendations = self.manager.load_recommendations(request_data, override_threshold) + recommendations = self.manager.load_recommendations( + request_data, override_threshold + ) return JsonResponse(data=recommendations, safe=False) except errors.InvalidRequest: - return JsonResponse({"error": "Invalid input provided."}, - status=HTTPStatus.BAD_REQUEST) + return JsonResponse( + {"error": "Invalid input provided."}, status=HTTPStatus.BAD_REQUEST + ) except errors.ConnectionError: - return JsonResponse({"error": "Recommendation service unavailable"}, - status=HTTPStatus.SERVICE_UNAVAILABLE) + return JsonResponse( + {"error": "Recommendation service unavailable"}, + status=HTTPStatus.SERVICE_UNAVAILABLE, + ) except errors.TimeoutError: - return JsonResponse({"error": "Connection to recommendation service timed out"}, - status=HTTPStatus.REQUEST_TIMEOUT) + return JsonResponse( + {"error": "Connection to recommendation service timed out"}, + status=HTTPStatus.REQUEST_TIMEOUT, + ) except errors.HttpError: return HttpResponseServerError("Unable to load recommendations") diff --git a/contentcuration/contentcuration/viewsets/sync/base.py b/contentcuration/contentcuration/viewsets/sync/base.py index 68fa0336e2..1d4be37c6b 100644 --- a/contentcuration/contentcuration/viewsets/sync/base.py +++ b/contentcuration/contentcuration/viewsets/sync/base.py @@ -23,16 +23,16 @@ from contentcuration.viewsets.sync.constants import CREATED from contentcuration.viewsets.sync.constants import DELETED from contentcuration.viewsets.sync.constants import DEPLOYED -from contentcuration.viewsets.sync.constants import UPDATED_DESCENDANTS -from contentcuration.viewsets.sync.constants import PUBLISHED_NEXT from contentcuration.viewsets.sync.constants import EDITOR_M2M from contentcuration.viewsets.sync.constants import FILE from contentcuration.viewsets.sync.constants import INVITATION from contentcuration.viewsets.sync.constants import MOVED from contentcuration.viewsets.sync.constants import PUBLISHED +from contentcuration.viewsets.sync.constants import PUBLISHED_NEXT from contentcuration.viewsets.sync.constants import SAVEDSEARCH from contentcuration.viewsets.sync.constants import SYNCED from contentcuration.viewsets.sync.constants import UPDATED +from contentcuration.viewsets.sync.constants import UPDATED_DESCENDANTS from contentcuration.viewsets.sync.constants import USER from contentcuration.viewsets.sync.constants import VIEWER_M2M from contentcuration.viewsets.sync.utils import log_sync_exception @@ -97,7 +97,7 @@ def get_change_type(obj): SYNCED: "sync_from_changes", DEPLOYED: "deploy_from_changes", UPDATED_DESCENDANTS: "update_descendants_from_changes", - PUBLISHED_NEXT: "publish_next_from_changes" + PUBLISHED_NEXT: "publish_next_from_changes", } @@ -127,7 +127,9 @@ def apply_changes(changes_queryset): change.applied = True changed_fields = ("applied",) except Exception as e: - log_sync_exception(e, user=change.created_by, change=change.serialize_to_change_dict()) + log_sync_exception( + e, user=change.created_by, change=change.serialize_to_change_dict() + ) change.errored = True change.kwargs["errors"] = [str(e)] change.save(update_fields=changed_fields) diff --git a/contentcuration/contentcuration/viewsets/sync/constants.py b/contentcuration/contentcuration/viewsets/sync/constants.py index 4733aeeffe..54091c9203 100644 --- a/contentcuration/contentcuration/viewsets/sync/constants.py +++ b/contentcuration/contentcuration/viewsets/sync/constants.py @@ -11,18 +11,20 @@ PUBLISHED_NEXT = 10 -ALL_CHANGES = set([ - CREATED, - UPDATED, - DELETED, - MOVED, - COPIED, - PUBLISHED, - SYNCED, - DEPLOYED, - UPDATED_DESCENDANTS, - PUBLISHED_NEXT, -]) +ALL_CHANGES = set( + [ + CREATED, + UPDATED, + DELETED, + MOVED, + COPIED, + PUBLISHED, + SYNCED, + DEPLOYED, + UPDATED_DESCENDANTS, + PUBLISHED_NEXT, + ] +) # Client-side table constants BOOKMARK = "bookmark" @@ -62,13 +64,15 @@ # but do not affect whether a channel is publishable or not # only edits to these tables are considered publishable changes # although individual changes can still be marked as unpublishable. -PUBLISHABLE_CHANGE_TABLES = set([ - CHANNEL, - CONTENTNODE, - CONTENTNODE_PREREQUISITE, - ASSESSMENTITEM, - FILE, -]) +PUBLISHABLE_CHANGE_TABLES = set( + [ + CHANNEL, + CONTENTNODE, + CONTENTNODE_PREREQUISITE, + ASSESSMENTITEM, + FILE, + ] +) # Enum for copying states diff --git a/contentcuration/contentcuration/viewsets/sync/endpoint.py b/contentcuration/contentcuration/viewsets/sync/endpoint.py index 22a40920c2..6825833823 100644 --- a/contentcuration/contentcuration/viewsets/sync/endpoint.py +++ b/contentcuration/contentcuration/viewsets/sync/endpoint.py @@ -31,20 +31,38 @@ class SyncView(APIView): def handle_changes(self, request): session_key = request.session.session_key - changes = list(filter(lambda x: type(x) is dict, request.data.get("changes", []))) + changes = list( + filter(lambda x: type(x) is dict, request.data.get("changes", [])) + ) if changes: - change_channel_ids = set(x.get("channel_id") for x in changes if x.get("channel_id")) + change_channel_ids = set( + x.get("channel_id") for x in changes if x.get("channel_id") + ) # Channels that have been created on the client side won't exist on the server yet, so we need to add a special exception for them. - created_channel_ids = set(x.get("channel_id") for x in changes if x.get("channel_id") and x.get("table") == CHANNEL and x.get("type") == CREATED) + created_channel_ids = set( + x.get("channel_id") + for x in changes + if x.get("channel_id") + and x.get("table") == CHANNEL + and x.get("type") == CREATED + ) # However, this would also give people a mechanism to edit existing channels on the server side by adding a channel create event for an # already existing channel, so we have to filter out the channel ids that are already created on the server side, regardless of whether # the user making the requests has permissions for those channels. created_channel_ids = created_channel_ids.difference( - set(Channel.objects.filter(id__in=created_channel_ids).values_list("id", flat=True).distinct()) + set( + Channel.objects.filter(id__in=created_channel_ids) + .values_list("id", flat=True) + .distinct() + ) ) allowed_ids = set( - Channel.filter_edit_queryset(Channel.objects.filter(id__in=change_channel_ids), request.user).values_list("id", flat=True).distinct() + Channel.filter_edit_queryset( + Channel.objects.filter(id__in=change_channel_ids), request.user + ) + .values_list("id", flat=True) + .distinct() ).union(created_channel_ids) # Allow changes that are either: # Not related to a channel and instead related to the user if the user is the current user. @@ -60,12 +78,22 @@ def handle_changes(self, request): channel_changes.append(c) else: disallowed_changes.append(c) - change_models = Change.create_changes(user_only_changes + channel_changes, created_by_id=request.user.id, session_key=session_key) + change_models = Change.create_changes( + user_only_changes + channel_changes, + created_by_id=request.user.id, + session_key=session_key, + ) if user_only_changes: - apply_user_changes_task.fetch_or_enqueue(request.user, user_id=request.user.id) + apply_user_changes_task.fetch_or_enqueue( + request.user, user_id=request.user.id + ) for channel_id in allowed_ids: - apply_channel_changes_task.fetch_or_enqueue(request.user, channel_id=channel_id) - allowed_changes = [{"rev": c.client_rev, "server_rev": c.server_rev} for c in change_models] + apply_channel_changes_task.fetch_or_enqueue( + request.user, channel_id=channel_id + ) + allowed_changes = [ + {"rev": c.client_rev, "server_rev": c.server_rev} for c in change_models + ] return {"disallowed": disallowed_changes, "allowed": allowed_changes} return {} @@ -74,8 +102,14 @@ def get_channel_revs(self, request): channel_revs = request.data.get("channel_revs", {}) if channel_revs: # Filter to only the channels that the user has permissions to view. - channel_ids = Channel.filter_view_queryset(Channel.objects.all(), request.user).filter(id__in=channel_revs.keys()).values_list("id", flat=True) - channel_revs = {channel_id: channel_revs[channel_id] for channel_id in channel_ids} + channel_ids = ( + Channel.filter_view_queryset(Channel.objects.all(), request.user) + .filter(id__in=channel_revs.keys()) + .values_list("id", flat=True) + ) + channel_revs = { + channel_id: channel_revs[channel_id] for channel_id in channel_ids + } return channel_revs def return_changes(self, request, channel_revs): @@ -86,17 +120,26 @@ def return_changes(self, request, channel_revs): unapplied_revs_filter = Q(server_rev__in=unapplied_revs) # Create a filter that returns all applied changes, and any errored changes made by this session - relevant_to_session_filter = (Q(applied=True) | Q(errored=True, session_id=session_key)) + relevant_to_session_filter = Q(applied=True) | Q( + errored=True, session_id=session_key + ) - change_filter = (Q(user=request.user) & (unapplied_revs_filter | Q(server_rev__gt=user_rev)) & relevant_to_session_filter) + change_filter = ( + Q(user=request.user) + & (unapplied_revs_filter | Q(server_rev__gt=user_rev)) + & relevant_to_session_filter + ) for channel_id, rev in channel_revs.items(): - change_filter |= (Q(channel_id=channel_id) & (unapplied_revs_filter | Q(server_rev__gt=rev)) & relevant_to_session_filter) + change_filter |= ( + Q(channel_id=channel_id) + & (unapplied_revs_filter | Q(server_rev__gt=rev)) + & relevant_to_session_filter + ) changes_to_return = list( - Change.objects.filter( - change_filter - ).values( + Change.objects.filter(change_filter) + .values( "server_rev", "session_id", "channel_id", @@ -106,8 +149,9 @@ def return_changes(self, request, channel_revs): "errored", "table", "change_type", - "kwargs" - ).order_by("server_rev")[:CHANGE_RETURN_LIMIT] + "kwargs", + ) + .order_by("server_rev")[:CHANGE_RETURN_LIMIT] ) if not changes_to_return: @@ -129,16 +173,28 @@ def return_changes(self, request, channel_revs): return {"changes": changes, "errors": errors, "successes": successes} def return_tasks(self, request, channel_revs): - custom_task_cte = With(CustomTaskMetadata.objects.filter(channel_id__in=channel_revs.keys())) + custom_task_cte = With( + CustomTaskMetadata.objects.filter(channel_id__in=channel_revs.keys()) + ) task_result_querySet = CTEQuerySet(model=TaskResult) - query = custom_task_cte.join(task_result_querySet, task_id=custom_task_cte.col.task_id)\ - .with_cte(custom_task_cte)\ - .filter(status__in=[states.STARTED, states.FAILURE],)\ + query = ( + custom_task_cte.join( + task_result_querySet, task_id=custom_task_cte.col.task_id + ) + .with_cte(custom_task_cte) + .filter( + status__in=[states.STARTED, states.FAILURE], + ) .exclude( - task_name__in=[apply_channel_changes_task.name, apply_user_changes_task.name] - ).annotate( + task_name__in=[ + apply_channel_changes_task.name, + apply_user_changes_task.name, + ] + ) + .annotate( progress=custom_task_cte.col.progress, channel_id=custom_task_cte.col.channel_id, + ) ) response_payload = { @@ -147,7 +203,14 @@ def return_tasks(self, request, channel_revs): if query.exists(): response_payload = { - "tasks": query.values("task_id", "task_name", "traceback", "progress", "channel_id", "status"), + "tasks": query.values( + "task_id", + "task_name", + "traceback", + "progress", + "channel_id", + "status", + ), } return response_payload diff --git a/contentcuration/contentcuration/viewsets/sync/utils.py b/contentcuration/contentcuration/viewsets/sync/utils.py index 43bf280b22..a0073ce731 100644 --- a/contentcuration/contentcuration/viewsets/sync/utils.py +++ b/contentcuration/contentcuration/viewsets/sync/utils.py @@ -12,9 +12,9 @@ from contentcuration.viewsets.sync.constants import DEPLOYED from contentcuration.viewsets.sync.constants import MOVED from contentcuration.viewsets.sync.constants import PUBLISHED +from contentcuration.viewsets.sync.constants import PUBLISHED_NEXT from contentcuration.viewsets.sync.constants import UPDATED from contentcuration.viewsets.sync.constants import UPDATED_DESCENDANTS -from contentcuration.viewsets.sync.constants import PUBLISHED_NEXT def validate_table(table): @@ -60,7 +60,15 @@ def generate_move_event(key, table, target, position, channel_id=None, user_id=N def generate_copy_event( - key, table, from_key, target, position=None, mods=None, excluded_descendants=None, channel_id=None, user_id=None + key, + table, + from_key, + target, + position=None, + mods=None, + excluded_descendants=None, + channel_id=None, + user_id=None, ): event = _generate_event(key, table, COPIED, channel_id, user_id) event["from_key"] = from_key @@ -71,9 +79,7 @@ def generate_copy_event( return event -def generate_publish_event( - key, version_notes="", language=None -): +def generate_publish_event(key, version_notes="", language=None): event = _generate_event(key, CHANNEL, PUBLISHED, key, None) event["version_notes"] = version_notes event["language"] = language @@ -84,17 +90,20 @@ def generate_deploy_event(key, user_id): event = _generate_event(key, CHANNEL, DEPLOYED, channel_id=key, user_id=user_id) return event + def generate_update_descendants_event(key, mods, channel_id=None, user_id=None): event = _generate_event(key, CONTENTNODE, UPDATED_DESCENDANTS, channel_id, user_id) event["mods"] = mods return event + def generate_publish_next_event(key, version_notes="", language=None): event = _generate_event(key, CHANNEL, PUBLISHED_NEXT, key, None) event["version_notes"] = version_notes event["language"] = language return event + def log_sync_exception(e, user=None, change=None, changes=None): # Capture exception and report, but allow sync # to complete properly. diff --git a/contentcuration/contentcuration/viewsets/user.py b/contentcuration/contentcuration/viewsets/user.py index b94f65af19..c779d1be47 100644 --- a/contentcuration/contentcuration/viewsets/user.py +++ b/contentcuration/contentcuration/viewsets/user.py @@ -66,7 +66,7 @@ def _can_user_access_feature(self, request): if request.user.is_admin: return True else: - return request.user.check_feature_flag('ai_feature') + return request.user.check_feature_flag("ai_feature") except AttributeError: return False @@ -102,7 +102,8 @@ def filter_channel(self, queryset, name, value): can_edit=Cast( Cast( SQCount( - channel_queryset.filter(editors=OuterRef("id")), field="id", + channel_queryset.filter(editors=OuterRef("id")), + field="id", ), IntegerField(), ), @@ -111,7 +112,8 @@ def filter_channel(self, queryset, name, value): can_view=Cast( Cast( SQCount( - channel_queryset.filter(viewers=OuterRef("id")), field="id", + channel_queryset.filter(viewers=OuterRef("id")), + field="id", ), IntegerField(), ), @@ -182,7 +184,9 @@ class ChannelUserFilter(RequiredFilterSet): def filter_channel(self, queryset, name, value): # Check permissions if not self.request.user.can_edit(value): - return queryset.none().annotate(can_edit=boolean_val(False), can_view=boolean_val(False)) + return queryset.none().annotate( + can_edit=boolean_val(False), can_view=boolean_val(False) + ) user_queryset = User.objects.filter(id=OuterRef("id")) queryset = queryset.annotate( can_edit=Exists(user_queryset.filter(editable_channels=value)), @@ -292,29 +296,31 @@ def create_from_changes(self, changes): def delete_from_changes(self, changes): return self._handle_relationship_changes(changes) - @action(detail=True, methods=['delete']) + @action(detail=True, methods=["delete"]) def remove_self(self, request, pk=None): """ Allows a user to remove themselves from a channel as a viewer. """ user = self.get_object() - channel_id = request.query_params.get('channel_id', None) + channel_id = request.query_params.get("channel_id", None) if not channel_id: - return HttpResponseBadRequest('Channel ID is required.') + return HttpResponseBadRequest("Channel ID is required.") channel = Channel.objects.get(id=channel_id) if not channel: return HttpResponseNotFound("Channel not found {}".format(channel_id)) if request.user != user and not request.user.can_edit(channel_id): - return HttpResponseForbidden("You do not have permission to remove this user {}".format(user.id)) + return HttpResponseForbidden( + "You do not have permission to remove this user {}".format(user.id) + ) if channel.viewers.filter(id=user.id).exists(): channel.viewers.remove(user) return Response(status=HTTP_204_NO_CONTENT) else: - return HttpResponseBadRequest('User is not a viewer of this channel.') + return HttpResponseBadRequest("User is not a viewer of this channel.") class AdminUserFilter(FilterSet): @@ -396,7 +402,9 @@ class Meta: list_serializer_class = BulkListSerializer -class AdminUserViewSet(ReadOnlyValuesViewset, RESTUpdateModelMixin, RESTDestroyModelMixin): +class AdminUserViewSet( + ReadOnlyValuesViewset, RESTUpdateModelMixin, RESTDestroyModelMixin +): pagination_class = UserListPagination permission_classes = [IsAdminUser] serializer_class = AdminUserSerializer @@ -444,12 +452,18 @@ def annotate_queryset(self, queryset): def metadata(self, request, pk=None): user = self._get_object_from_queryset(self.queryset) information = user.information or {} - information.update({ - 'edit_channels': user.editable_channels.filter(deleted=False).values('id', 'name'), - 'viewonly_channels': user.view_only_channels.filter(deleted=False).values('id', 'name'), - 'total_space': user.disk_space, - 'used_space': user.disk_space_used, - 'policies': user.policies, - 'feature_flags': user.feature_flags or {} - }) + information.update( + { + "edit_channels": user.editable_channels.filter(deleted=False).values( + "id", "name" + ), + "viewonly_channels": user.view_only_channels.filter( + deleted=False + ).values("id", "name"), + "total_space": user.disk_space, + "used_space": user.disk_space_used, + "policies": user.policies, + "feature_flags": user.feature_flags or {}, + } + ) return Response(information) diff --git a/contentcuration/kolibri_content/__init__.py b/contentcuration/kolibri_content/__init__.py index 77850d027a..5c5fecb1bf 100644 --- a/contentcuration/kolibri_content/__init__.py +++ b/contentcuration/kolibri_content/__init__.py @@ -1 +1 @@ -default_app_config = 'kolibri_content.apps.KolibriContentConfig' +default_app_config = "kolibri_content.apps.KolibriContentConfig" diff --git a/contentcuration/kolibri_content/apps.py b/contentcuration/kolibri_content/apps.py index f7a8e2adf0..18f118e0ca 100644 --- a/contentcuration/kolibri_content/apps.py +++ b/contentcuration/kolibri_content/apps.py @@ -2,5 +2,5 @@ class KolibriContentConfig(AppConfig): - name = 'kolibri_content' - label = 'content' + name = "kolibri_content" + label = "content" diff --git a/contentcuration/kolibri_content/base_models.py b/contentcuration/kolibri_content/base_models.py index 786a927740..220558a0bb 100644 --- a/contentcuration/kolibri_content/base_models.py +++ b/contentcuration/kolibri_content/base_models.py @@ -40,7 +40,12 @@ class ContentNode(MPTTModel): id = UUIDField(primary_key=True) parent = TreeForeignKey( - "self", null=True, blank=True, related_name="children", db_index=True, on_delete=models.CASCADE + "self", + null=True, + blank=True, + related_name="children", + db_index=True, + on_delete=models.CASCADE, ) license_name = models.CharField(max_length=50, null=True, blank=True) license_description = models.TextField(null=True, blank=True) @@ -68,7 +73,9 @@ class ContentNode(MPTTModel): author = models.CharField(max_length=200, blank=True) kind = models.CharField(max_length=200, choices=content_kinds.choices, blank=True) available = models.BooleanField(default=False) - lang = models.ForeignKey("Language", blank=True, null=True, on_delete=models.CASCADE) + lang = models.ForeignKey( + "Language", blank=True, null=True, on_delete=models.CASCADE + ) # A JSON Dictionary of properties to configure loading, rendering, etc. the file options = JSONField(default={}, blank=True, null=True) @@ -110,12 +117,18 @@ class File(models.Model): id = UUIDField(primary_key=True) # The foreign key mapping happens here as many File objects can map onto a single local file - local_file = models.ForeignKey("LocalFile", related_name="files", on_delete=models.CASCADE) - contentnode = models.ForeignKey("ContentNode", related_name="files", on_delete=models.CASCADE) + local_file = models.ForeignKey( + "LocalFile", related_name="files", on_delete=models.CASCADE + ) + contentnode = models.ForeignKey( + "ContentNode", related_name="files", on_delete=models.CASCADE + ) preset = models.CharField( max_length=150, choices=format_presets.choices, blank=True ) - lang = models.ForeignKey("Language", blank=True, null=True, on_delete=models.CASCADE) + lang = models.ForeignKey( + "Language", blank=True, null=True, on_delete=models.CASCADE + ) supplementary = models.BooleanField(default=False) thumbnail = models.BooleanField(default=False) priority = models.IntegerField(blank=True, null=True, db_index=True) @@ -150,7 +163,9 @@ class AssessmentMetaData(models.Model): """ id = UUIDField(primary_key=True) - contentnode = models.ForeignKey("ContentNode", related_name="assessmentmetadata", on_delete=models.CASCADE) + contentnode = models.ForeignKey( + "ContentNode", related_name="assessmentmetadata", on_delete=models.CASCADE + ) # A JSON blob containing a serialized list of ids for questions that the assessment can present. assessment_item_ids = JSONField(default=[]) # Length of the above assessment_item_ids for a convenience lookup. diff --git a/contentcuration/kolibri_content/fields.py b/contentcuration/kolibri_content/fields.py index 5220359026..bdfc4366a4 100644 --- a/contentcuration/kolibri_content/fields.py +++ b/contentcuration/kolibri_content/fields.py @@ -108,6 +108,7 @@ class JSONField(JSONFieldBase): implementation here to be sure: https://github.com/learningequality/kolibri/blob/0f6bb6781a4453cd9fdc836d52b65dd69e395b20/kolibri/core/fields.py#L102 """ + def from_db_value(self, value, expression, connection): if isinstance(value, str): try: diff --git a/contentcuration/kolibri_content/migrations/0001_initial.py b/contentcuration/kolibri_content/migrations/0001_initial.py index 6cc24249c8..7d3727d872 100644 --- a/contentcuration/kolibri_content/migrations/0001_initial.py +++ b/contentcuration/kolibri_content/migrations/0001_initial.py @@ -102,7 +102,9 @@ class Migration(migrations.Migration): ), ), ], - options={"ordering": ("lft",),}, + options={ + "ordering": ("lft",), + }, ), migrations.CreateModel( name="ContentTag", @@ -184,7 +186,9 @@ class Migration(migrations.Migration): ), ), ], - options={"ordering": ["priority"],}, + options={ + "ordering": ["priority"], + }, ), migrations.CreateModel( name="Language", diff --git a/contentcuration/kolibri_content/migrations/0003_contentnode_coach_content.py b/contentcuration/kolibri_content/migrations/0003_contentnode_coach_content.py index da83434af1..541c60e3f8 100644 --- a/contentcuration/kolibri_content/migrations/0003_contentnode_coach_content.py +++ b/contentcuration/kolibri_content/migrations/0003_contentnode_coach_content.py @@ -7,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('content', '0002_auto_20180327_1414'), + ("content", "0002_auto_20180327_1414"), ] operations = [ migrations.AddField( - model_name='contentnode', - name='coach_content', + model_name="contentnode", + name="coach_content", field=models.BooleanField(db_index=True, default=False), ), ] diff --git a/contentcuration/kolibri_content/migrations/0008_channelmetadata_tagline.py b/contentcuration/kolibri_content/migrations/0008_channelmetadata_tagline.py index fd39f84c8c..d72ff81a42 100644 --- a/contentcuration/kolibri_content/migrations/0008_channelmetadata_tagline.py +++ b/contentcuration/kolibri_content/migrations/0008_channelmetadata_tagline.py @@ -7,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('content', '0007_auto_20200613_0050'), + ("content", "0007_auto_20200613_0050"), ] operations = [ migrations.AddField( - model_name='channelmetadata', - name='tagline', + model_name="channelmetadata", + name="tagline", field=models.CharField(blank=True, max_length=150, null=True), ), ] diff --git a/contentcuration/kolibri_content/migrations/0009_contentnode_options.py b/contentcuration/kolibri_content/migrations/0009_contentnode_options.py index 369f56f92c..f4ca4542df 100644 --- a/contentcuration/kolibri_content/migrations/0009_contentnode_options.py +++ b/contentcuration/kolibri_content/migrations/0009_contentnode_options.py @@ -7,13 +7,13 @@ class Migration(migrations.Migration): dependencies = [ - ('content', '0008_channelmetadata_tagline'), + ("content", "0008_channelmetadata_tagline"), ] operations = [ migrations.AddField( - model_name='contentnode', - name='options', + model_name="contentnode", + name="options", field=jsonfield.fields.JSONField(default={}), ), ] diff --git a/contentcuration/kolibri_content/migrations/0010_auto_20210202_0604.py b/contentcuration/kolibri_content/migrations/0010_auto_20210202_0604.py index 4572673628..725328c521 100644 --- a/contentcuration/kolibri_content/migrations/0010_auto_20210202_0604.py +++ b/contentcuration/kolibri_content/migrations/0010_auto_20210202_0604.py @@ -7,18 +7,60 @@ class Migration(migrations.Migration): dependencies = [ - ('content', '0009_contentnode_options'), + ("content", "0009_contentnode_options"), ] operations = [ migrations.AlterField( - model_name='file', - name='extension', - field=models.CharField(blank=True, choices=[('mp4', 'MP4 Video'), ('webm', 'WEBM Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('epub', 'ePub Document')], max_length=40), + model_name="file", + name="extension", + field=models.CharField( + blank=True, + choices=[ + ("mp4", "MP4 Video"), + ("webm", "WEBM Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("epub", "ePub Document"), + ], + max_length=40, + ), ), migrations.AlterField( - model_name='localfile', - name='extension', - field=models.CharField(blank=True, choices=[('mp4', 'MP4 Video'), ('webm', 'WEBM Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('epub', 'ePub Document')], max_length=40), + model_name="localfile", + name="extension", + field=models.CharField( + blank=True, + choices=[ + ("mp4", "MP4 Video"), + ("webm", "WEBM Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("epub", "ePub Document"), + ], + max_length=40, + ), ), ] diff --git a/contentcuration/kolibri_content/migrations/0011_auto_20210504_1744.py b/contentcuration/kolibri_content/migrations/0011_auto_20210504_1744.py index 72a8620702..323e94e9fb 100644 --- a/contentcuration/kolibri_content/migrations/0011_auto_20210504_1744.py +++ b/contentcuration/kolibri_content/migrations/0011_auto_20210504_1744.py @@ -7,13 +7,43 @@ class Migration(migrations.Migration): dependencies = [ - ('content', '0010_auto_20210202_0604'), + ("content", "0010_auto_20210202_0604"), ] operations = [ migrations.AlterField( - model_name='file', - name='preset', - field=models.CharField(blank=True, choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('audio_dependency', 'audio (dependency)'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150), + model_name="file", + name="preset", + field=models.CharField( + blank=True, + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("audio_dependency", "audio (dependency)"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + ), ), ] diff --git a/contentcuration/kolibri_content/migrations/0012_auto_20210511_1605.py b/contentcuration/kolibri_content/migrations/0012_auto_20210511_1605.py index bb08a8f64a..134e661bdc 100644 --- a/contentcuration/kolibri_content/migrations/0012_auto_20210511_1605.py +++ b/contentcuration/kolibri_content/migrations/0012_auto_20210511_1605.py @@ -7,13 +7,45 @@ class Migration(migrations.Migration): dependencies = [ - ('content', '0011_auto_20210504_1744'), + ("content", "0011_auto_20210504_1744"), ] operations = [ migrations.AlterField( - model_name='file', - name='preset', - field=models.CharField(blank=True, choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('audio_dependency', 'audio (dependency)'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('qti', 'QTI Zip'), ('qti_thumbnail', 'QTI Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150), + model_name="file", + name="preset", + field=models.CharField( + blank=True, + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("audio_dependency", "audio (dependency)"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("qti", "QTI Zip"), + ("qti_thumbnail", "QTI Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + ), ), ] diff --git a/contentcuration/kolibri_content/migrations/0013_auto_20210519_1759.py b/contentcuration/kolibri_content/migrations/0013_auto_20210519_1759.py index 1617a522bf..b731f0910d 100644 --- a/contentcuration/kolibri_content/migrations/0013_auto_20210519_1759.py +++ b/contentcuration/kolibri_content/migrations/0013_auto_20210519_1759.py @@ -1,44 +1,59 @@ # Generated by Django 2.2.17 on 2021-05-19 17:59 - -from django.db import migrations, models import django.db.models.deletion +from django.db import migrations +from django.db import models class Migration(migrations.Migration): dependencies = [ - ('content', '0012_auto_20210511_1605'), + ("content", "0012_auto_20210511_1605"), ] operations = [ migrations.AlterField( - model_name='contentnode', - name='lang', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='content.Language'), + model_name="contentnode", + name="lang", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + to="content.Language", + ), ), migrations.AlterField( - model_name='contentnode', - name='level', + model_name="contentnode", + name="level", field=models.PositiveIntegerField(editable=False), ), migrations.AlterField( - model_name='contentnode', - name='lft', + model_name="contentnode", + name="lft", field=models.PositiveIntegerField(editable=False), ), migrations.AlterField( - model_name='contentnode', - name='license', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='content.License'), + model_name="contentnode", + name="license", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + to="content.License", + ), ), migrations.AlterField( - model_name='contentnode', - name='rght', + model_name="contentnode", + name="rght", field=models.PositiveIntegerField(editable=False), ), migrations.AlterField( - model_name='file', - name='lang', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='content.Language'), + model_name="file", + name="lang", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + to="content.Language", + ), ), ] diff --git a/contentcuration/kolibri_content/migrations/0014_auto_20210603_1536.py b/contentcuration/kolibri_content/migrations/0014_auto_20210603_1536.py index 84cc50507f..80a32a1646 100644 --- a/contentcuration/kolibri_content/migrations/0014_auto_20210603_1536.py +++ b/contentcuration/kolibri_content/migrations/0014_auto_20210603_1536.py @@ -6,23 +6,23 @@ class Migration(migrations.Migration): dependencies = [ - ('content', '0013_auto_20210519_1759'), + ("content", "0013_auto_20210519_1759"), ] operations = [ migrations.AlterField( - model_name='assessmentmetadata', - name='assessment_item_ids', - field=models.TextField(default='[]'), + model_name="assessmentmetadata", + name="assessment_item_ids", + field=models.TextField(default="[]"), ), migrations.AlterField( - model_name='assessmentmetadata', - name='mastery_model', - field=models.TextField(default='{}'), + model_name="assessmentmetadata", + name="mastery_model", + field=models.TextField(default="{}"), ), migrations.AlterField( - model_name='contentnode', - name='options', - field=models.TextField(default='{}'), + model_name="contentnode", + name="options", + field=models.TextField(default="{}"), ), ] diff --git a/contentcuration/kolibri_content/migrations/0015_auto_20210707_1606.py b/contentcuration/kolibri_content/migrations/0015_auto_20210707_1606.py index d04e46899b..beaf302d15 100644 --- a/contentcuration/kolibri_content/migrations/0015_auto_20210707_1606.py +++ b/contentcuration/kolibri_content/migrations/0015_auto_20210707_1606.py @@ -6,28 +6,120 @@ class Migration(migrations.Migration): dependencies = [ - ('content', '0014_auto_20210603_1536'), + ("content", "0014_auto_20210603_1536"), ] operations = [ migrations.AlterField( - model_name='contentnode', - name='kind', - field=models.CharField(blank=True, choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', 'Exercise'), ('document', 'Document'), ('html5', 'HTML5 App'), ('slideshow', 'Slideshow'), ('h5p', 'H5P'), ('zim', 'Zim')], max_length=200), + model_name="contentnode", + name="kind", + field=models.CharField( + blank=True, + choices=[ + ("topic", "Topic"), + ("video", "Video"), + ("audio", "Audio"), + ("exercise", "Exercise"), + ("document", "Document"), + ("html5", "HTML5 App"), + ("slideshow", "Slideshow"), + ("h5p", "H5P"), + ("zim", "Zim"), + ], + max_length=200, + ), ), migrations.AlterField( - model_name='file', - name='extension', - field=models.CharField(blank=True, choices=[('mp4', 'MP4 Video'), ('webm', 'WEBM Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('zim', 'ZIM'), ('epub', 'ePub Document')], max_length=40), + model_name="file", + name="extension", + field=models.CharField( + blank=True, + choices=[ + ("mp4", "MP4 Video"), + ("webm", "WEBM Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("zim", "ZIM"), + ("epub", "ePub Document"), + ], + max_length=40, + ), ), migrations.AlterField( - model_name='file', - name='preset', - field=models.CharField(blank=True, choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('audio_dependency', 'audio (dependency)'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('zim', 'Zim'), ('zim_thumbnail', 'Zim Thumbnail'), ('qti', 'QTI Zip'), ('qti_thumbnail', 'QTI Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest')], max_length=150), + model_name="file", + name="preset", + field=models.CharField( + blank=True, + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("audio_dependency", "audio (dependency)"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("zim", "Zim"), + ("zim_thumbnail", "Zim Thumbnail"), + ("qti", "QTI Zip"), + ("qti_thumbnail", "QTI Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ], + max_length=150, + ), ), migrations.AlterField( - model_name='localfile', - name='extension', - field=models.CharField(blank=True, choices=[('mp4', 'MP4 Video'), ('webm', 'WEBM Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('zim', 'ZIM'), ('epub', 'ePub Document')], max_length=40), + model_name="localfile", + name="extension", + field=models.CharField( + blank=True, + choices=[ + ("mp4", "MP4 Video"), + ("webm", "WEBM Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("zim", "ZIM"), + ("epub", "ePub Document"), + ], + max_length=40, + ), ), ] diff --git a/contentcuration/kolibri_content/migrations/0016_contentnode_duration.py b/contentcuration/kolibri_content/migrations/0016_contentnode_duration.py index 7c442f6e05..15afe3822b 100644 --- a/contentcuration/kolibri_content/migrations/0016_contentnode_duration.py +++ b/contentcuration/kolibri_content/migrations/0016_contentnode_duration.py @@ -6,13 +6,13 @@ class Migration(migrations.Migration): dependencies = [ - ('content', '0015_auto_20210707_1606'), + ("content", "0015_auto_20210707_1606"), ] operations = [ migrations.AddField( - model_name='contentnode', - name='duration', + model_name="contentnode", + name="duration", field=models.IntegerField(blank=True, null=True), ), ] diff --git a/contentcuration/kolibri_content/migrations/0018_auto_20220224_2031.py b/contentcuration/kolibri_content/migrations/0018_auto_20220224_2031.py index ee2e7c0eab..cab6456694 100644 --- a/contentcuration/kolibri_content/migrations/0018_auto_20220224_2031.py +++ b/contentcuration/kolibri_content/migrations/0018_auto_20220224_2031.py @@ -6,38 +6,38 @@ class Migration(migrations.Migration): dependencies = [ - ('content', '0017_alter_contentnode_kind'), + ("content", "0017_alter_contentnode_kind"), ] operations = [ migrations.AddField( - model_name='contentnode', - name='accessibility_labels', + model_name="contentnode", + name="accessibility_labels", field=models.TextField(blank=True, null=True), ), migrations.AddField( - model_name='contentnode', - name='categories', + model_name="contentnode", + name="categories", field=models.TextField(blank=True, null=True), ), migrations.AddField( - model_name='contentnode', - name='grade_levels', + model_name="contentnode", + name="grade_levels", field=models.TextField(blank=True, null=True), ), migrations.AddField( - model_name='contentnode', - name='learner_needs', + model_name="contentnode", + name="learner_needs", field=models.TextField(blank=True, null=True), ), migrations.AddField( - model_name='contentnode', - name='learning_activities', + model_name="contentnode", + name="learning_activities", field=models.TextField(blank=True, null=True), ), migrations.AddField( - model_name='contentnode', - name='resource_types', + model_name="contentnode", + name="resource_types", field=models.TextField(blank=True, null=True), ), ] diff --git a/contentcuration/kolibri_content/migrations/0019_auto_20230207_0116.py b/contentcuration/kolibri_content/migrations/0019_auto_20230207_0116.py index 55cada5291..1cdb71a093 100644 --- a/contentcuration/kolibri_content/migrations/0019_auto_20230207_0116.py +++ b/contentcuration/kolibri_content/migrations/0019_auto_20230207_0116.py @@ -8,65 +8,75 @@ class Migration(migrations.Migration): dependencies = [ - ('content', '0018_auto_20220224_2031'), + ("content", "0018_auto_20220224_2031"), ] operations = [ migrations.AlterModelOptions( - name='contentnode', + name="contentnode", options={}, ), migrations.AlterModelOptions( - name='file', + name="file", options={}, ), migrations.AlterField( - model_name='assessmentmetadata', - name='assessment_item_ids', + model_name="assessmentmetadata", + name="assessment_item_ids", field=kolibri_content.fields.JSONField(default=[]), ), migrations.AlterField( - model_name='assessmentmetadata', - name='mastery_model', + model_name="assessmentmetadata", + name="mastery_model", field=kolibri_content.fields.JSONField(default={}), ), migrations.AlterField( - model_name='contentnode', - name='coach_content', + model_name="contentnode", + name="coach_content", field=models.BooleanField(default=False), ), migrations.AlterField( - model_name='contentnode', - name='description', + model_name="contentnode", + name="description", field=models.TextField(blank=True, null=True), ), migrations.AlterField( - model_name='contentnode', - name='duration', + model_name="contentnode", + name="duration", field=models.PositiveIntegerField(blank=True, null=True), ), migrations.AlterField( - model_name='contentnode', - name='lang', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='content.language'), + model_name="contentnode", + name="lang", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="content.language", + ), ), migrations.AlterField( - model_name='contentnode', - name='license_description', + model_name="contentnode", + name="license_description", field=models.TextField(blank=True, null=True), ), migrations.AlterField( - model_name='contentnode', - name='options', + model_name="contentnode", + name="options", field=kolibri_content.fields.JSONField(blank=True, default={}, null=True), ), migrations.AlterField( - model_name='file', - name='lang', - field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='content.language'), + model_name="file", + name="lang", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="content.language", + ), ), migrations.AlterIndexTogether( - name='contentnode', + name="contentnode", index_together=set(), ), ] diff --git a/contentcuration/kolibri_content/migrations/0020_alter_file_preset.py b/contentcuration/kolibri_content/migrations/0020_alter_file_preset.py index 08f714d583..10ae0dd26b 100644 --- a/contentcuration/kolibri_content/migrations/0020_alter_file_preset.py +++ b/contentcuration/kolibri_content/migrations/0020_alter_file_preset.py @@ -6,13 +6,48 @@ class Migration(migrations.Migration): dependencies = [ - ('content', '0019_auto_20230207_0116'), + ("content", "0019_auto_20230207_0116"), ] operations = [ migrations.AlterField( - model_name='file', - name='preset', - field=models.CharField(blank=True, choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('audio_dependency', 'audio (dependency)'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('zim', 'Zim'), ('zim_thumbnail', 'Zim Thumbnail'), ('qti', 'QTI Zip'), ('qti_thumbnail', 'QTI Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest'), ('imscp_zip', 'IMSCP Zip')], max_length=150), + model_name="file", + name="preset", + field=models.CharField( + blank=True, + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("audio_dependency", "audio (dependency)"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("zim", "Zim"), + ("zim_thumbnail", "Zim Thumbnail"), + ("qti", "QTI Zip"), + ("qti_thumbnail", "QTI Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ("imscp_zip", "IMSCP Zip"), + ], + max_length=150, + ), ), ] diff --git a/contentcuration/kolibri_content/migrations/0021_auto_20240612_1847.py b/contentcuration/kolibri_content/migrations/0021_auto_20240612_1847.py index 8795b6bbd2..3546278937 100644 --- a/contentcuration/kolibri_content/migrations/0021_auto_20240612_1847.py +++ b/contentcuration/kolibri_content/migrations/0021_auto_20240612_1847.py @@ -1,28 +1,110 @@ # Generated by Django 3.2.24 on 2024-06-12 18:47 - -from django.db import migrations, models +from django.db import migrations +from django.db import models class Migration(migrations.Migration): dependencies = [ - ('content', '0020_alter_file_preset'), + ("content", "0020_alter_file_preset"), ] operations = [ migrations.AlterField( - model_name='file', - name='extension', - field=models.CharField(blank=True, choices=[('mp4', 'MP4 Video'), ('webm', 'WEBM Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('zim', 'ZIM'), ('epub', 'ePub Document'), ('bloompub', 'bloom Document')], max_length=40), + model_name="file", + name="extension", + field=models.CharField( + blank=True, + choices=[ + ("mp4", "MP4 Video"), + ("webm", "WEBM Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("zim", "ZIM"), + ("epub", "ePub Document"), + ("bloompub", "bloom Document"), + ], + max_length=40, + ), ), migrations.AlterField( - model_name='file', - name='preset', - field=models.CharField(blank=True, choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('audio_dependency', 'audio (dependency)'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('zim', 'Zim'), ('zim_thumbnail', 'Zim Thumbnail'), ('qti', 'QTI Zip'), ('qti_thumbnail', 'QTI Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest'), ('imscp_zip', 'IMSCP Zip'), ('bloompub', 'Bloom Document')], max_length=150), + model_name="file", + name="preset", + field=models.CharField( + blank=True, + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("audio_dependency", "audio (dependency)"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("zim", "Zim"), + ("zim_thumbnail", "Zim Thumbnail"), + ("qti", "QTI Zip"), + ("qti_thumbnail", "QTI Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ("imscp_zip", "IMSCP Zip"), + ("bloompub", "Bloom Document"), + ], + max_length=150, + ), ), migrations.AlterField( - model_name='localfile', - name='extension', - field=models.CharField(blank=True, choices=[('mp4', 'MP4 Video'), ('webm', 'WEBM Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('zim', 'ZIM'), ('epub', 'ePub Document'), ('bloompub', 'bloom Document')], max_length=40), + model_name="localfile", + name="extension", + field=models.CharField( + blank=True, + choices=[ + ("mp4", "MP4 Video"), + ("webm", "WEBM Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("zim", "ZIM"), + ("epub", "ePub Document"), + ("bloompub", "bloom Document"), + ], + max_length=40, + ), ), ] diff --git a/contentcuration/kolibri_content/migrations/0022_auto_20240915_1414.py b/contentcuration/kolibri_content/migrations/0022_auto_20240915_1414.py index c090382987..70c810c893 100644 --- a/contentcuration/kolibri_content/migrations/0022_auto_20240915_1414.py +++ b/contentcuration/kolibri_content/migrations/0022_auto_20240915_1414.py @@ -1,23 +1,71 @@ # Generated by Django 3.2.24 on 2024-09-15 14:14 - -from django.db import migrations, models +from django.db import migrations +from django.db import models class Migration(migrations.Migration): dependencies = [ - ('content', '0021_auto_20240612_1847'), + ("content", "0021_auto_20240612_1847"), ] operations = [ migrations.AlterField( - model_name='file', - name='extension', - field=models.CharField(blank=True, choices=[('mp4', 'MP4 Video'), ('webm', 'WEBM Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('zim', 'ZIM'), ('epub', 'ePub Document'), ('bloompub', 'Bloom Document'), ('bloomd', 'Bloom Document')], max_length=40), + model_name="file", + name="extension", + field=models.CharField( + blank=True, + choices=[ + ("mp4", "MP4 Video"), + ("webm", "WEBM Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("zim", "ZIM"), + ("epub", "ePub Document"), + ("bloompub", "Bloom Document"), + ("bloomd", "Bloom Document"), + ], + max_length=40, + ), ), migrations.AlterField( - model_name='localfile', - name='extension', - field=models.CharField(blank=True, choices=[('mp4', 'MP4 Video'), ('webm', 'WEBM Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('zim', 'ZIM'), ('epub', 'ePub Document'), ('bloompub', 'Bloom Document'), ('bloomd', 'Bloom Document')], max_length=40), + model_name="localfile", + name="extension", + field=models.CharField( + blank=True, + choices=[ + ("mp4", "MP4 Video"), + ("webm", "WEBM Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("zim", "ZIM"), + ("epub", "ePub Document"), + ("bloompub", "Bloom Document"), + ("bloomd", "Bloom Document"), + ], + max_length=40, + ), ), ] diff --git a/contentcuration/kolibri_public/apps.py b/contentcuration/kolibri_public/apps.py index ce4d9066cf..6c2b59cbe6 100644 --- a/contentcuration/kolibri_public/apps.py +++ b/contentcuration/kolibri_public/apps.py @@ -2,5 +2,5 @@ class KolibriPublicConfig(AppConfig): - default_auto_field = 'django.db.models.BigAutoField' - name = 'kolibri_public' + default_auto_field = "django.db.models.BigAutoField" + name = "kolibri_public" diff --git a/contentcuration/kolibri_public/import_metadata_view.py b/contentcuration/kolibri_public/import_metadata_view.py index 100ac870e3..9cf6e00952 100644 --- a/contentcuration/kolibri_public/import_metadata_view.py +++ b/contentcuration/kolibri_public/import_metadata_view.py @@ -10,8 +10,12 @@ from django.utils.decorators import method_decorator from kolibri_content import base_models from kolibri_content import models as kolibri_content_models -from kolibri_content.constants.schema_versions import CONTENT_SCHEMA_VERSION # Use kolibri_content -from kolibri_content.constants.schema_versions import MIN_CONTENT_SCHEMA_VERSION # Use kolibri_content +from kolibri_content.constants.schema_versions import ( + CONTENT_SCHEMA_VERSION, +) # Use kolibri_content +from kolibri_content.constants.schema_versions import ( + MIN_CONTENT_SCHEMA_VERSION, +) # Use kolibri_content from kolibri_public import models # Use kolibri_public models from kolibri_public.views import metadata_cache from rest_framework import status @@ -28,7 +32,9 @@ def _get_kc_and_base_models(model): except AttributeError: # This will happen if it's a M2M through model, which only exist on ContentNode through_model_name = model.__name__.replace("ContentNode_", "") - kc_model = getattr(kolibri_content_models.ContentNode, through_model_name).through + kc_model = getattr( + kolibri_content_models.ContentNode, through_model_name + ).through # Through models are not defined for the abstract base models, so we just cheat and # use these instead. base_model = kc_model @@ -74,8 +80,7 @@ def retrieve(self, request, pk=None): # noqa: C901 UUID(pk) except ValueError: return Response( - {"error": "Invalid UUID format."}, - status=status.HTTP_400_BAD_REQUEST + {"error": "Invalid UUID format."}, status=status.HTTP_400_BAD_REQUEST ) content_schema = request.query_params.get( @@ -159,9 +164,15 @@ def retrieve(self, request, pk=None): # noqa: C901 field_names.add(base_model._mptt_meta.left_attr) field_names.add(base_model._mptt_meta.right_attr) field_names.add(base_model._mptt_meta.level_attr) - raw_fields = [field.column for field in kc_model._meta.fields if field.column in field_names] + raw_fields = [ + field.column + for field in kc_model._meta.fields + if field.column in field_names + ] if qs.model is models.Language: - raw_fields = [rf for rf in raw_fields if rf != "lang_name"] + ["native_name"] + raw_fields = [rf for rf in raw_fields if rf != "lang_name"] + [ + "native_name" + ] qs = qs.values(*raw_fields) # Avoid using the Django queryset directly, as it will coerce the database values # via its field 'from_db_value' transformers, whereas import metadata is read @@ -171,7 +182,16 @@ def retrieve(self, request, pk=None): # noqa: C901 cursor.execute(*qs.query.sql_with_params()) data[table_name] = [ # Coerce any UUIDs to their hex representation, as Postgres raw values will be UUIDs - dict(zip(raw_fields, (value.hex if isinstance(value, UUID) else value for value in row))) for row in cursor + dict( + zip( + raw_fields, + ( + value.hex if isinstance(value, UUID) else value + for value in row + ), + ) + ) + for row in cursor ] if qs.model is models.Language: for lang in data[table_name]: diff --git a/contentcuration/kolibri_public/management/commands/export_channels_to_kolibri_public.py b/contentcuration/kolibri_public/management/commands/export_channels_to_kolibri_public.py index 076053ad90..3a0aee8fcd 100644 --- a/contentcuration/kolibri_public/management/commands/export_channels_to_kolibri_public.py +++ b/contentcuration/kolibri_public/management/commands/export_channels_to_kolibri_public.py @@ -32,7 +32,7 @@ def add_arguments(self, parser): "--channel-id", type=str, dest="channel_id", - help="The channel_id for which generate kolibri_public models [default: all channels]" + help="The channel_id for which generate kolibri_public models [default: all channels]", ) def handle(self, *args, **options): @@ -42,8 +42,14 @@ def handle(self, *args, **options): ids_to_export.append(options["channel_id"]) else: self._republish_problem_channels() - public_channel_ids = set(Channel.objects.filter(public=True, deleted=False, main_tree__published=True).values_list("id", flat=True)) - kolibri_public_channel_ids = set(ChannelMetadata.objects.all().values_list("id", flat=True)) + public_channel_ids = set( + Channel.objects.filter( + public=True, deleted=False, main_tree__published=True + ).values_list("id", flat=True) + ) + kolibri_public_channel_ids = set( + ChannelMetadata.objects.all().values_list("id", flat=True) + ) ids_to_export = public_channel_ids.difference(kolibri_public_channel_ids) count = 0 @@ -52,23 +58,41 @@ def handle(self, *args, **options): self._export_channel(channel_id) count += 1 except FileNotFoundError: - logger.warning("Tried to export channel {} to kolibri_public but its published channel database could not be found".format(channel_id)) + logger.warning( + "Tried to export channel {} to kolibri_public but its published channel database could not be found".format( + channel_id + ) + ) except Exception as e: - logger.exception("Failed to export channel {} to kolibri_public because of error: {}".format(channel_id, e)) + logger.exception( + "Failed to export channel {} to kolibri_public because of error: {}".format( + channel_id, e + ) + ) logger.info("Successfully put {} channels into kolibri_public".format(count)) def _export_channel(self, channel_id): logger.info("Putting channel {} into kolibri_public".format(channel_id)) - db_location = os.path.join(settings.DB_ROOT, "{id}.sqlite3".format(id=channel_id)) + db_location = os.path.join( + settings.DB_ROOT, "{id}.sqlite3".format(id=channel_id) + ) with storage.open(db_location) as storage_file: with tempfile.NamedTemporaryFile(suffix=".sqlite3") as db_file: shutil.copyfileobj(storage_file, db_file) db_file.seek(0) with using_content_database(db_file.name): # Run migration to handle old content databases published prior to current fields being added. - call_command("migrate", app_label=KolibriContentConfig.label, database=get_active_content_database()) + call_command( + "migrate", + app_label=KolibriContentConfig.label, + database=get_active_content_database(), + ) channel = ExportedChannelMetadata.objects.get(id=channel_id) - logger.info("Found channel {} for id: {} mapping now".format(channel.name, channel_id)) + logger.info( + "Found channel {} for id: {} mapping now".format( + channel.name, channel_id + ) + ) mapper = ChannelMapper(channel) mapper.run() @@ -81,19 +105,26 @@ def _republish_problem_channels(self): logger.error("Could not find chef user to republish channels") return channel_qs = Channel.objects.filter( - public=True, - deleted=False, - main_tree__published=True + public=True, deleted=False, main_tree__published=True ).filter( - Q(last_published__isnull=True) | - Q(last_published__lt=twenty_19, main_tree__modified__lte=F("last_published") + five_minutes) + Q(last_published__isnull=True) + | Q( + last_published__lt=twenty_19, + main_tree__modified__lte=F("last_published") + five_minutes, + ) ) for channel in channel_qs: try: - kolibri_temp_db = create_content_database(channel, True, chef_user.id, False) + kolibri_temp_db = create_content_database( + channel, True, chef_user.id, False + ) os.remove(kolibri_temp_db) channel.last_published = timezone.now() channel.save() except Exception as e: - logger.exception("Failed to export channel {} to kolibri_public because of error: {}".format(channel.id, e)) + logger.exception( + "Failed to export channel {} to kolibri_public because of error: {}".format( + channel.id, e + ) + ) diff --git a/contentcuration/kolibri_public/management/commands/rectify_incorrect_contentnode_source_fields.py b/contentcuration/kolibri_public/management/commands/rectify_incorrect_contentnode_source_fields.py index c4d40af4c0..ded124c098 100644 --- a/contentcuration/kolibri_public/management/commands/rectify_incorrect_contentnode_source_fields.py +++ b/contentcuration/kolibri_public/management/commands/rectify_incorrect_contentnode_source_fields.py @@ -15,14 +15,11 @@ class Command(BaseCommand): - def handle(self, *args, **options): main_trees_cte = With( ( - Channel.objects.filter( - main_tree__isnull=False - ) + Channel.objects.filter(main_tree__isnull=False) .annotate(channel_id=F("id")) .values("channel_id", "deleted", tree_id=F("main_tree__tree_id")) ), @@ -32,7 +29,9 @@ def handle(self, *args, **options): nodes = main_trees_cte.join( ContentNode.objects.all(), tree_id=main_trees_cte.col.tree_id, - ).annotate(channel_id=main_trees_cte.col.channel_id, deleted=main_trees_cte.col.deleted) + ).annotate( + channel_id=main_trees_cte.col.channel_id, deleted=main_trees_cte.col.deleted + ) original_source_nodes = ( nodes.with_cte(main_trees_cte) @@ -43,7 +42,9 @@ def handle(self, *args, **options): tree_id=OuterRef("tree_id"), ) .annotate( - coalesced_license_description=Coalesce("license_description", Value("")), + coalesced_license_description=Coalesce( + "license_description", Value("") + ), ) ) diff = ( @@ -58,7 +59,9 @@ def handle(self, *args, **options): diff_combined = diff.annotate( original_source_node_f_changed=Exists( original_source_nodes.exclude( - coalesced_license_description=OuterRef("coalesced_license_description") + coalesced_license_description=OuterRef( + "coalesced_license_description" + ) ) ) ).filter(original_source_node_f_changed=True) @@ -87,6 +90,11 @@ def handle(self, *args, **options): if original_source_channel_id is not None and original_source_node.exists(): # original source node exists and its license_description doesn't match # update the base node - if base_node.license_description != original_source_node[0].license_description: - base_node.license_description = original_source_node[0].license_description + if ( + base_node.license_description + != original_source_node[0].license_description + ): + base_node.license_description = original_source_node[ + 0 + ].license_description base_node.save() diff --git a/contentcuration/kolibri_public/migrations/0002_mptttreeidmanager.py b/contentcuration/kolibri_public/migrations/0002_mptttreeidmanager.py index fb216e0557..d815b53604 100644 --- a/contentcuration/kolibri_public/migrations/0002_mptttreeidmanager.py +++ b/contentcuration/kolibri_public/migrations/0002_mptttreeidmanager.py @@ -6,14 +6,22 @@ class Migration(migrations.Migration): dependencies = [ - ('kolibri_public', '0001_initial'), + ("kolibri_public", "0001_initial"), ] operations = [ migrations.CreateModel( - name='MPTTTreeIDManager', + name="MPTTTreeIDManager", fields=[ - ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), ], ), ] diff --git a/contentcuration/kolibri_public/migrations/0004_auto_20240612_1847.py b/contentcuration/kolibri_public/migrations/0004_auto_20240612_1847.py index 2c3110c2d8..6047e7ce73 100644 --- a/contentcuration/kolibri_public/migrations/0004_auto_20240612_1847.py +++ b/contentcuration/kolibri_public/migrations/0004_auto_20240612_1847.py @@ -1,23 +1,82 @@ # Generated by Django 3.2.24 on 2024-06-12 18:47 - -from django.db import migrations, models +from django.db import migrations +from django.db import models class Migration(migrations.Migration): dependencies = [ - ('kolibri_public', '0003_alter_file_preset'), + ("kolibri_public", "0003_alter_file_preset"), ] operations = [ migrations.AlterField( - model_name='file', - name='preset', - field=models.CharField(blank=True, choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('video_dependency', 'Video (dependency)'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('audio_dependency', 'audio (dependency)'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_dependency', 'HTML5 Dependency (Zip format)'), ('html5_thumbnail', 'HTML5 Thumbnail'), ('h5p', 'H5P Zip'), ('h5p_thumbnail', 'H5P Thumbnail'), ('zim', 'Zim'), ('zim_thumbnail', 'Zim Thumbnail'), ('qti', 'QTI Zip'), ('qti_thumbnail', 'QTI Thumbnail'), ('slideshow_image', 'Slideshow Image'), ('slideshow_thumbnail', 'Slideshow Thumbnail'), ('slideshow_manifest', 'Slideshow Manifest'), ('imscp_zip', 'IMSCP Zip'), ('bloompub', 'Bloom Document')], max_length=150), + model_name="file", + name="preset", + field=models.CharField( + blank=True, + choices=[ + ("high_res_video", "High Resolution"), + ("low_res_video", "Low Resolution"), + ("video_thumbnail", "Thumbnail"), + ("video_subtitle", "Subtitle"), + ("video_dependency", "Video (dependency)"), + ("audio", "Audio"), + ("audio_thumbnail", "Thumbnail"), + ("audio_dependency", "audio (dependency)"), + ("document", "Document"), + ("epub", "ePub Document"), + ("document_thumbnail", "Thumbnail"), + ("exercise", "Exercise"), + ("exercise_thumbnail", "Thumbnail"), + ("exercise_image", "Exercise Image"), + ("exercise_graphie", "Exercise Graphie"), + ("channel_thumbnail", "Channel Thumbnail"), + ("topic_thumbnail", "Thumbnail"), + ("html5_zip", "HTML5 Zip"), + ("html5_dependency", "HTML5 Dependency (Zip format)"), + ("html5_thumbnail", "HTML5 Thumbnail"), + ("h5p", "H5P Zip"), + ("h5p_thumbnail", "H5P Thumbnail"), + ("zim", "Zim"), + ("zim_thumbnail", "Zim Thumbnail"), + ("qti", "QTI Zip"), + ("qti_thumbnail", "QTI Thumbnail"), + ("slideshow_image", "Slideshow Image"), + ("slideshow_thumbnail", "Slideshow Thumbnail"), + ("slideshow_manifest", "Slideshow Manifest"), + ("imscp_zip", "IMSCP Zip"), + ("bloompub", "Bloom Document"), + ], + max_length=150, + ), ), migrations.AlterField( - model_name='localfile', - name='extension', - field=models.CharField(blank=True, choices=[('mp4', 'MP4 Video'), ('webm', 'WEBM Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('zim', 'ZIM'), ('epub', 'ePub Document'), ('bloompub', 'bloom Document')], max_length=40), + model_name="localfile", + name="extension", + field=models.CharField( + blank=True, + choices=[ + ("mp4", "MP4 Video"), + ("webm", "WEBM Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("zim", "ZIM"), + ("epub", "ePub Document"), + ("bloompub", "bloom Document"), + ], + max_length=40, + ), ), ] diff --git a/contentcuration/kolibri_public/migrations/0005_alter_localfile_extension.py b/contentcuration/kolibri_public/migrations/0005_alter_localfile_extension.py index cbdc55dce3..0d3fbf7c3d 100644 --- a/contentcuration/kolibri_public/migrations/0005_alter_localfile_extension.py +++ b/contentcuration/kolibri_public/migrations/0005_alter_localfile_extension.py @@ -1,18 +1,42 @@ # Generated by Django 3.2.24 on 2024-09-15 14:14 - -from django.db import migrations, models +from django.db import migrations +from django.db import models class Migration(migrations.Migration): dependencies = [ - ('kolibri_public', '0004_auto_20240612_1847'), + ("kolibri_public", "0004_auto_20240612_1847"), ] operations = [ migrations.AlterField( - model_name='localfile', - name='extension', - field=models.CharField(blank=True, choices=[('mp4', 'MP4 Video'), ('webm', 'WEBM Video'), ('vtt', 'VTT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('graphie', 'Graphie Exercise'), ('zip', 'HTML5 Zip'), ('h5p', 'H5P'), ('zim', 'ZIM'), ('epub', 'ePub Document'), ('bloompub', 'Bloom Document'), ('bloomd', 'Bloom Document')], max_length=40), + model_name="localfile", + name="extension", + field=models.CharField( + blank=True, + choices=[ + ("mp4", "MP4 Video"), + ("webm", "WEBM Video"), + ("vtt", "VTT Subtitle"), + ("mp3", "MP3 Audio"), + ("pdf", "PDF Document"), + ("jpg", "JPG Image"), + ("jpeg", "JPEG Image"), + ("png", "PNG Image"), + ("gif", "GIF Image"), + ("json", "JSON"), + ("svg", "SVG Image"), + ("perseus", "Perseus Exercise"), + ("graphie", "Graphie Exercise"), + ("zip", "HTML5 Zip"), + ("h5p", "H5P"), + ("zim", "ZIM"), + ("epub", "ePub Document"), + ("bloompub", "Bloom Document"), + ("bloomd", "Bloom Document"), + ], + max_length=40, + ), ), ] diff --git a/contentcuration/kolibri_public/search.py b/contentcuration/kolibri_public/search.py index 6f1e3648ff..fad6590b4f 100644 --- a/contentcuration/kolibri_public/search.py +++ b/contentcuration/kolibri_public/search.py @@ -43,14 +43,14 @@ for key, labels in metadata_lookup.items(): bitmask_lookup = {} i = 0 - while labels[i: i + 64]: + while labels[i : i + 64]: bitmask_field_name = "{}_bitmask_{}".format(key, i) bitmask_fieldnames[bitmask_field_name] = [] for j, label in enumerate(labels): info = { "bitmask_field_name": bitmask_field_name, "field_name": key, - "bits": 2**j, + "bits": 2 ** j, "label": label, } bitmask_lookup[label] = info @@ -64,9 +64,11 @@ def _get_available_languages(base_queryset): from contentcuration.models import Language langs = Language.objects.filter( - id__in=base_queryset.exclude(lang=None).values_list("lang_id", flat=True).distinct() - # Updated to use contentcuration field names - # Convert language objects to dicts mapped to the kolibri field names + id__in=base_queryset.exclude(lang=None) + .values_list("lang_id", flat=True) + .distinct() + # Updated to use contentcuration field names + # Convert language objects to dicts mapped to the kolibri field names ).values("id", lang_name=F("native_name")) return list(langs) diff --git a/contentcuration/kolibri_public/stopwords.py b/contentcuration/kolibri_public/stopwords.py index ce112936e7..f21e4705f8 100644 --- a/contentcuration/kolibri_public/stopwords.py +++ b/contentcuration/kolibri_public/stopwords.py @@ -4,9 +4,7 @@ # load stopwords file stopwords_path = os.path.abspath( - os.path.join( - os.path.dirname(__file__), "constants", "stopwords-all.json" - ) + os.path.join(os.path.dirname(__file__), "constants", "stopwords-all.json") ) with io.open(stopwords_path, mode="r", encoding="utf-8") as f: stopwords = json.load(f) diff --git a/contentcuration/kolibri_public/tests/base.py b/contentcuration/kolibri_public/tests/base.py index 1e5f341ab2..faa7b1b112 100644 --- a/contentcuration/kolibri_public/tests/base.py +++ b/contentcuration/kolibri_public/tests/base.py @@ -60,7 +60,9 @@ class ChannelBuilder(object): "root_node", ) - def __init__(self, levels=3, num_children=5, models=kolibri_public_models, options=None): + def __init__( + self, levels=3, num_children=5, models=kolibri_public_models, options=None + ): self.levels = levels self.num_children = num_children self.models = models @@ -135,7 +137,7 @@ def load_data(self): for key in self.tree_keys: setattr(self, key, data[key]) except KeyError: - print( + print( # noqa: T201 "No tree cache found for {} levels and {} children per level".format( self.levels, self.num_children ) @@ -151,7 +153,9 @@ def save_data(self): self.__TREE_CACHE[self.cache_key] = copy.deepcopy(data) def generate_nodes_from_root_node(self): - self._django_nodes = self.models.ContentNode.objects.build_tree_nodes(self.root_node) + self._django_nodes = self.models.ContentNode.objects.build_tree_nodes( + self.root_node + ) self.nodes = {n["id"]: n for n in map(to_dict, self._django_nodes)} @@ -173,7 +177,9 @@ def insert_into_default_db(self): self.models.LocalFile.objects.bulk_create( (self.models.LocalFile(**local) for local in self.localfiles.values()) ) - self.models.File.objects.bulk_create((self.models.File(**f) for f in self.files.values())) + self.models.File.objects.bulk_create( + (self.models.File(**f) for f in self.files.values()) + ) def recurse_tree_until_leaf_container(self, parent): if not parent.get("children"): diff --git a/contentcuration/kolibri_public/tests/test_content_app.py b/contentcuration/kolibri_public/tests/test_content_app.py index 7ee47ef076..30d14df272 100644 --- a/contentcuration/kolibri_public/tests/test_content_app.py +++ b/contentcuration/kolibri_public/tests/test_content_app.py @@ -195,9 +195,13 @@ def test_contentnode_list_labels(self): nodes = self.root.get_descendants(include_self=True).filter(available=True) response = self._get(reverse("publiccontentnode-list"), data={"max_results": 1}) node_languages = Language.objects.filter(contentnode__in=nodes) - self.assertEqual(len(response.data["labels"]["languages"]), node_languages.distinct().count()) + self.assertEqual( + len(response.data["labels"]["languages"]), node_languages.distinct().count() + ) for lang in response.data["labels"]["languages"]: - self.assertTrue(node_languages.filter(native_name=lang["lang_name"]).exists()) + self.assertTrue( + node_languages.filter(native_name=lang["lang_name"]).exists() + ) def test_contentnode_list_headers(self): channel = models.ChannelMetadata.objects.get() @@ -441,22 +445,16 @@ def test_channelmetadata_langfield_none(self): self.assertEqual(response.data["lang_name"], None) def test_channelmetadata_content_available_param_filter_lowercase_true(self): - response = self.client.get( - reverse("publicchannel-list"), {"available": "true"} - ) + response = self.client.get(reverse("publicchannel-list"), {"available": "true"}) self.assertEqual(response.data[0]["id"], self.channel_data["id"]) def test_channelmetadata_content_available_param_filter_uppercase_true(self): - response = self.client.get( - reverse("publicchannel-list"), {"available": True} - ) + response = self.client.get(reverse("publicchannel-list"), {"available": True}) self.assertEqual(response.data[0]["id"], self.channel_data["id"]) def test_channelmetadata_content_unavailable_param_filter_false(self): models.ContentNode.objects.all().update(available=False) - response = self.client.get( - reverse("publicchannel-list"), {"available": False} - ) + response = self.client.get(reverse("publicchannel-list"), {"available": False}) self.assertEqual(response.data[0]["id"], self.channel_data["id"]) def test_channelmetadata_content_available_field_true(self): @@ -498,7 +496,9 @@ def test_channelmetadata_has_exercises_filter(self): reverse("publicchannel-list"), {"has_exercise": True} ) self.assertEqual(len(with_filter_response.data), 1) - self.assertEqual(with_filter_response.data[0]["name"], self.channel_data["name"]) + self.assertEqual( + with_filter_response.data[0]["name"], self.channel_data["name"] + ) def test_filtering_coach_content_anon(self): response = self.client.get( diff --git a/contentcuration/kolibri_public/tests/test_importmetadata_api.py b/contentcuration/kolibri_public/tests/test_importmetadata_api.py index 484fa6c382..e56ab28629 100644 --- a/contentcuration/kolibri_public/tests/test_importmetadata_api.py +++ b/contentcuration/kolibri_public/tests/test_importmetadata_api.py @@ -27,9 +27,7 @@ def setUpTestData(cls): cls.assessmentmetadata = public.AssessmentMetaData.objects.filter( contentnode__in=cls.all_nodes ) - cls.localfiles = public.LocalFile.objects.filter( - files__in=cls.files - ).distinct() + cls.localfiles = public.LocalFile.objects.filter(files__in=cls.files).distinct() cls.languages = public.Language.objects.filter( Q(id__in=cls.files.values_list("lang_id", flat=True)) | Q(id__in=cls.all_nodes.values_list("lang_id", flat=True)) @@ -54,7 +52,9 @@ def _assert_data(self, Model, ContentModel, queryset): field_names.add(BaseModel._mptt_meta.left_attr) field_names.add(BaseModel._mptt_meta.right_attr) field_names.add(BaseModel._mptt_meta.level_attr) - for response_data, obj in zip(response.data[ContentModel._meta.db_table], queryset): + for response_data, obj in zip( + response.data[ContentModel._meta.db_table], queryset + ): # Ensure that we are not returning any empty objects self.assertNotEqual(response_data, {}) for field in fields: @@ -71,7 +71,11 @@ def test_import_metadata_files(self): self._assert_data(public.File, content.File, self.files) def test_import_metadata_assessmentmetadata(self): - self._assert_data(public.AssessmentMetaData, content.AssessmentMetaData, self.assessmentmetadata) + self._assert_data( + public.AssessmentMetaData, + content.AssessmentMetaData, + self.assessmentmetadata, + ) def test_import_metadata_localfiles(self): self._assert_data(public.LocalFile, content.LocalFile, self.localfiles) @@ -80,7 +84,11 @@ def test_import_metadata_languages(self): self._assert_data(public.Language, content.Language, self.languages) def test_import_metadata_through_tags(self): - self._assert_data(public.ContentNode.tags.through, content.ContentNode.tags.through, self.through_tags) + self._assert_data( + public.ContentNode.tags.through, + content.ContentNode.tags.through, + self.through_tags, + ) def test_import_metadata_tags(self): self._assert_data(public.ContentTag, content.ContentTag, self.tags) diff --git a/contentcuration/kolibri_public/tests/test_mapper.py b/contentcuration/kolibri_public/tests/test_mapper.py index 918a352c19..d938233e63 100644 --- a/contentcuration/kolibri_public/tests/test_mapper.py +++ b/contentcuration/kolibri_public/tests/test_mapper.py @@ -18,7 +18,6 @@ class ChannelMapperTest(TestCase): - @property def overrides(self): return { @@ -28,7 +27,7 @@ def overrides(self): }, kolibri_public_models.LocalFile: { "available": True, - } + }, } @classmethod @@ -39,15 +38,32 @@ def setUpClass(cls): admin_user = user() with using_content_database(cls.tempdb): - call_command("migrate", "content", database=get_active_content_database(), no_input=True) - builder = ChannelBuilder(models=kolibri_content_models, options={ - "problematic_tags": True, - "problematic_nodes": True, - }) + call_command( + "migrate", + "content", + database=get_active_content_database(), + no_input=True, + ) + builder = ChannelBuilder( + models=kolibri_content_models, + options={ + "problematic_tags": True, + "problematic_nodes": True, + }, + ) builder.insert_into_default_db() - cls.source_root = kolibri_content_models.ContentNode.objects.get(id=builder.root_node["id"]) - cls.channel = kolibri_content_models.ChannelMetadata.objects.get(id=builder.channel["id"]) - contentcuration_channel = Channel.objects.create(actor_id=admin_user.id, id=cls.channel.id, name=cls.channel.name, public=True) + cls.source_root = kolibri_content_models.ContentNode.objects.get( + id=builder.root_node["id"] + ) + cls.channel = kolibri_content_models.ChannelMetadata.objects.get( + id=builder.channel["id"] + ) + contentcuration_channel = Channel.objects.create( + actor_id=admin_user.id, + id=cls.channel.id, + name=cls.channel.name, + public=True, + ) contentcuration_channel.main_tree.published = True contentcuration_channel.main_tree.save() cls.mapper = ChannelMapper(cls.channel) @@ -59,7 +75,9 @@ def _assert_model(self, source, mapped, Model): column = field.column if hasattr(source, column): if Model in self.overrides and column in self.overrides[Model]: - self.assertEqual(self.overrides[Model][column], getattr(mapped, column)) + self.assertEqual( + self.overrides[Model][column], getattr(mapped, column) + ) else: self.assertEqual(getattr(source, column), getattr(mapped, column)) @@ -70,18 +88,28 @@ def _assert_node(self, source, mapped): """ self._assert_model(source, mapped, kolibri_public_models.ContentNode) - for src, mpd in zip(source.assessmentmetadata.all(), mapped.assessmentmetadata.all()): + for src, mpd in zip( + source.assessmentmetadata.all(), mapped.assessmentmetadata.all() + ): self._assert_model(src, mpd, kolibri_public_models.AssessmentMetaData) for src, mpd in zip(source.files.all(), mapped.files.all()): self._assert_model(src, mpd, kolibri_public_models.File) - self._assert_model(src.local_file, mpd.local_file, kolibri_public_models.LocalFile) + self._assert_model( + src.local_file, mpd.local_file, kolibri_public_models.LocalFile + ) # should only map OKAY_TAG and not BAD_TAG for mapped_tag in mapped.tags.all(): self.assertEqual(OKAY_TAG, mapped_tag.tag_name) - self.assertEqual(mapped.ancestors, [{"id": ancestor.id, "title": ancestor.title} for ancestor in source.get_ancestors()]) + self.assertEqual( + mapped.ancestors, + [ + {"id": ancestor.id, "title": ancestor.title} + for ancestor in source.get_ancestors() + ], + ) def _recurse_and_assert(self, sources, mappeds, recursion_depth=0): recursion_depths = [] @@ -106,14 +134,22 @@ def _recurse_and_assert(self, sources, mappeds, recursion_depth=0): def test_map(self): with using_content_database(self.tempdb): self._recurse_and_assert([self.source_root], [self.mapped_root]) - self._assert_model(self.channel, self.mapper.mapped_channel, kolibri_public_models.ChannelMetadata) + self._assert_model( + self.channel, + self.mapper.mapped_channel, + kolibri_public_models.ChannelMetadata, + ) def test_map_replace(self): with using_content_database(self.tempdb): mapper = ChannelMapper(self.channel) mapper.run() self._recurse_and_assert([self.source_root], [mapper.mapped_root]) - self._assert_model(self.channel, self.mapper.mapped_channel, kolibri_public_models.ChannelMetadata) + self._assert_model( + self.channel, + self.mapper.mapped_channel, + kolibri_public_models.ChannelMetadata, + ) @classmethod def tearDownClass(cls): diff --git a/contentcuration/kolibri_public/tests/test_public_v1_api.py b/contentcuration/kolibri_public/tests/test_public_v1_api.py index 442830471a..75037eba6a 100644 --- a/contentcuration/kolibri_public/tests/test_public_v1_api.py +++ b/contentcuration/kolibri_public/tests/test_public_v1_api.py @@ -28,7 +28,9 @@ def test_info_endpoint(self): response = self.client.get(reverse("info")) self.assertEqual(response.data["application"], "studio") self.assertEqual(response.data["device_name"], "Kolibri Studio") - self.assertEqual(response.data["instance_id"], "ef896e7b7bbf5a359371e6f7afd28742") + self.assertEqual( + response.data["instance_id"], "ef896e7b7bbf5a359371e6f7afd28742" + ) def test_empty_public_channels(self): """ diff --git a/contentcuration/kolibri_public/urls.py b/contentcuration/kolibri_public/urls.py index cc6ff65cb5..f3d499683f 100644 --- a/contentcuration/kolibri_public/urls.py +++ b/contentcuration/kolibri_public/urls.py @@ -26,9 +26,23 @@ ) urlpatterns = [ - re_path(r'^api/public/channel/(?P[^/]+)', views_v1.get_channel_name_by_id, name='get_channel_name_by_id'), - re_path(r'^api/public/(?P[^/]+)/channels$', views_v1.get_public_channel_list, name='get_public_channel_list'), - re_path(r'^api/public/(?P[^/]+)/channels/lookup/(?P[^/]+)', views_v1.get_public_channel_lookup, name='get_public_channel_lookup'), - re_path(r'^api/public/info', views_v1.InfoViewSet.as_view({'get': 'list'}), name='info'), + re_path( + r"^api/public/channel/(?P[^/]+)", + views_v1.get_channel_name_by_id, + name="get_channel_name_by_id", + ), + re_path( + r"^api/public/(?P[^/]+)/channels$", + views_v1.get_public_channel_list, + name="get_public_channel_list", + ), + re_path( + r"^api/public/(?P[^/]+)/channels/lookup/(?P[^/]+)", + views_v1.get_public_channel_lookup, + name="get_public_channel_lookup", + ), + re_path( + r"^api/public/info", views_v1.InfoViewSet.as_view({"get": "list"}), name="info" + ), path("api/public/v2/", include(public_content_v2_router.urls)), ] diff --git a/contentcuration/kolibri_public/utils/annotation.py b/contentcuration/kolibri_public/utils/annotation.py index 3cb800a3d9..7295c97e39 100644 --- a/contentcuration/kolibri_public/utils/annotation.py +++ b/contentcuration/kolibri_public/utils/annotation.py @@ -71,10 +71,15 @@ def calculate_next_order(channel, public=False): # This has been edited from the source Kolibri, in order # to make the order match given by the public channel API on Studio. if public: - channel_list_order = list(Channel.objects.filter( - # Ensure that this channel is always included in the list. - Q(public=True, deleted=False, main_tree__published=True) | Q(id=channel.id) - ).order_by("-priority").values_list("id", flat=True)) + channel_list_order = list( + Channel.objects.filter( + # Ensure that this channel is always included in the list. + Q(public=True, deleted=False, main_tree__published=True) + | Q(id=channel.id) + ) + .order_by("-priority") + .values_list("id", flat=True) + ) # this shouldn't happen, but if we're exporting a channel database to Kolibri Public # and the channel does not actually exist locally, then this would fail if channel.id in channel_list_order: diff --git a/contentcuration/kolibri_public/utils/mapper.py b/contentcuration/kolibri_public/utils/mapper.py index f7605d7e1b..01dc1f0726 100644 --- a/contentcuration/kolibri_public/utils/mapper.py +++ b/contentcuration/kolibri_public/utils/mapper.py @@ -32,12 +32,14 @@ def overrides(self): }, kolibri_public_models.LocalFile: { "available": True, - } + }, } def _handle_old_tree_if_exists(self): try: - old_channel = kolibri_public_models.ChannelMetadata.objects.get(id=self.channel.id) + old_channel = kolibri_public_models.ChannelMetadata.objects.get( + id=self.channel.id + ) self.tree_id = old_channel.root.tree_id old_channel.root.get_descendants(include_self=True).delete() except kolibri_public_models.ChannelMetadata.DoesNotExist: @@ -47,7 +49,9 @@ def run(self): with transaction.atomic(): self._handle_old_tree_if_exists() self.mapped_root = self.map_root(self.channel.root) - self.mapped_channel = self._map_model(self.channel, kolibri_public_models.ChannelMetadata) + self.mapped_channel = self._map_model( + self.channel, kolibri_public_models.ChannelMetadata + ) self.mapped_channel.public = self.public self.mapped_channel.save_base(raw=True) annotate_label_bitmasks(self.mapped_root.get_descendants(include_self=True)) @@ -77,7 +81,9 @@ def _map_node(self, source, ancestors): return node def _extend_ancestors(self, ancestors, new_ancestor): - return ancestors + [{"id": new_ancestor.id, "title": new_ancestor.title.replace('"', '\\"')}] + return ancestors + [ + {"id": new_ancestor.id, "title": new_ancestor.title.replace('"', '\\"')} + ] def _recurse_to_create_tree( self, @@ -91,19 +97,16 @@ def _recurse_to_create_tree( children = sorted(nodes_by_parent[source.id], key=lambda x: x.lft) ancestors = self._extend_ancestors(ancestors, source) for child in children: - nodes_to_create.extend(self._recurse_to_create_tree( - child, - nodes_by_parent, - ancestors, - )) + nodes_to_create.extend( + self._recurse_to_create_tree( + child, + nodes_by_parent, + ancestors, + ) + ) return nodes_to_create - def map_root( - self, - root, - batch_size=None, - progress_tracker=None - ): + def map_root(self, root, batch_size=None, progress_tracker=None): """ :type progress_tracker: contentcuration.utils.celery.ProgressTracker|None """ @@ -152,41 +155,48 @@ def _map( return [node_copy] def _copy_tags(self, node_ids): - initial_source_tag_mappings = kolibri_content_models.ContentNode.tags.through.objects.filter( - contentnode_id__in=node_ids + initial_source_tag_mappings = ( + kolibri_content_models.ContentNode.tags.through.objects.filter( + contentnode_id__in=node_ids + ) ) - source_tags = ( - kolibri_content_models.ContentTag.objects - .annotate( - tag_name_len=Length("tag_name"), - ) - .filter( - id__in=initial_source_tag_mappings.values_list("contenttag_id", flat=True), - tag_name_len__lte=MAX_TAG_LENGTH, - ) + source_tags = kolibri_content_models.ContentTag.objects.annotate( + tag_name_len=Length("tag_name"), + ).filter( + id__in=initial_source_tag_mappings.values_list("contenttag_id", flat=True), + tag_name_len__lte=MAX_TAG_LENGTH, ) - source_tag_mappings = ( - initial_source_tag_mappings - .filter( - contenttag_id__in=source_tags.values_list("id", flat=True), - ) + source_tag_mappings = initial_source_tag_mappings.filter( + contenttag_id__in=source_tags.values_list("id", flat=True), ) self._map_and_bulk_create_model(source_tags, kolibri_public_models.ContentTag) - self._map_and_bulk_create_model(source_tag_mappings, kolibri_public_models.ContentNode.tags.through) + self._map_and_bulk_create_model( + source_tag_mappings, kolibri_public_models.ContentNode.tags.through + ) def _copy_assessment_metadata(self, node_ids): - node_assessmentmetadata = kolibri_content_models.AssessmentMetaData.objects.filter(contentnode_id__in=node_ids) + node_assessmentmetadata = ( + kolibri_content_models.AssessmentMetaData.objects.filter( + contentnode_id__in=node_ids + ) + ) - self._map_and_bulk_create_model(node_assessmentmetadata, kolibri_public_models.AssessmentMetaData) + self._map_and_bulk_create_model( + node_assessmentmetadata, kolibri_public_models.AssessmentMetaData + ) def _copy_files(self, node_ids): - node_files = kolibri_content_models.File.objects.filter(contentnode_id__in=node_ids) + node_files = kolibri_content_models.File.objects.filter( + contentnode_id__in=node_ids + ) - local_files = kolibri_content_models.LocalFile.objects.filter(id__in=node_files.values_list("local_file_id", flat=True)) + local_files = kolibri_content_models.LocalFile.objects.filter( + id__in=node_files.values_list("local_file_id", flat=True) + ) self._map_and_bulk_create_model(local_files, kolibri_public_models.LocalFile) @@ -232,12 +242,16 @@ def _deep_map( ancestors, ) - mapped_nodes = kolibri_public_models.ContentNode.objects.bulk_create(nodes_to_create) + mapped_nodes = kolibri_public_models.ContentNode.objects.bulk_create( + nodes_to_create + ) # filter to only the nodes that were created, since some source nodes could have # been problematic - self._copy_associated_objects(source_nodes.filter( - id__in=[mapped_node.id for mapped_node in mapped_nodes], - )) + self._copy_associated_objects( + source_nodes.filter( + id__in=[mapped_node.id for mapped_node in mapped_nodes], + ) + ) return mapped_nodes diff --git a/contentcuration/kolibri_public/views.py b/contentcuration/kolibri_public/views.py index 8e51e30e00..00817d4e64 100644 --- a/contentcuration/kolibri_public/views.py +++ b/contentcuration/kolibri_public/views.py @@ -51,7 +51,9 @@ def get_last_modified(*args, **kwargs): - return models.ChannelMetadata.objects.all().aggregate(updated=Max("last_updated"))["updated"] + return models.ChannelMetadata.objects.all().aggregate(updated=Max("last_updated"))[ + "updated" + ] def metadata_cache(some_func): @@ -352,7 +354,9 @@ def map_file(file): file["file_size"] = file.pop("local_file__file_size") file["extension"] = file.pop("local_file__extension") # Swap in the contentcuration generate_storage_url function here - file["storage_url"] = generate_storage_url("{}.{}".format(file["checksum"], file["extension"])) + file["storage_url"] = generate_storage_url( + "{}.{}".format(file["checksum"], file["extension"]) + ) return file @@ -454,9 +458,9 @@ def get_related_data_maps(self, items, queryset): lang["id"]: lang # Add an annotation for lang_name to map to native_name to map from content curation model # to how we want to expose it for Kolibri. - for lang in models.Language.objects.filter(id__in=lang_ids).annotate(lang_name=F("native_name")).values( - "id", "lang_code", "lang_subcode", "lang_name", "lang_direction" - ) + for lang in models.Language.objects.filter(id__in=lang_ids) + .annotate(lang_name=F("native_name")) + .values("id", "lang_code", "lang_subcode", "lang_name", "lang_direction") } for f in files: @@ -673,9 +677,7 @@ def get_tree_queryset(self, request, pk): @method_decorator(metadata_cache, name="dispatch") -class ContentNodeTreeViewset( - BaseContentNodeMixin, TreeQueryMixin, BaseValuesViewset -): +class ContentNodeTreeViewset(BaseContentNodeMixin, TreeQueryMixin, BaseValuesViewset): def retrieve(self, request, pk=None): """ A nested, paginated representation of the children and grandchildren of a specific node @@ -704,8 +706,7 @@ def retrieve(self, request, pk=None): UUID(pk) except ValueError: return Response( - {"error": "Invalid UUID format."}, - status=status.HTTP_400_BAD_REQUEST + {"error": "Invalid UUID format."}, status=status.HTTP_400_BAD_REQUEST ) queryset = self.get_tree_queryset(request, pk) diff --git a/contentcuration/kolibri_public/views_v1.py b/contentcuration/kolibri_public/views_v1.py index 6b118f67d9..8928c22bcc 100644 --- a/contentcuration/kolibri_public/views_v1.py +++ b/contentcuration/kolibri_public/views_v1.py @@ -28,33 +28,46 @@ def _get_channel_list(version, params, identifier=None): def _get_channel_list_v1(params, identifier=None): - keyword = params.get('keyword', '').strip() - language_id = params.get('language', '').strip() - token_list = params.get('tokens', '').strip().replace('-', '').split(',') + keyword = params.get("keyword", "").strip() + language_id = params.get("language", "").strip() + token_list = params.get("tokens", "").strip().replace("-", "").split(",") channels = None if identifier: - channels = Channel.objects.prefetch_related('secret_tokens').filter(secret_tokens__token=identifier) + channels = Channel.objects.prefetch_related("secret_tokens").filter( + secret_tokens__token=identifier + ) if not channels.exists(): channels = Channel.objects.filter(pk=identifier) else: - channels = Channel.objects.prefetch_related('secret_tokens').filter(Q(public=True) | Q(secret_tokens__token__in=token_list)) - - if keyword != '': - channels = channels.prefetch_related('tags').filter(Q(name__icontains=keyword) | Q( - description__icontains=keyword) | Q(tags__tag_name__icontains=keyword)) - - if language_id != '': + channels = Channel.objects.prefetch_related("secret_tokens").filter( + Q(public=True) | Q(secret_tokens__token__in=token_list) + ) + + if keyword != "": + channels = channels.prefetch_related("tags").filter( + Q(name__icontains=keyword) + | Q(description__icontains=keyword) + | Q(tags__tag_name__icontains=keyword) + ) + + if language_id != "": channels.filter(included_languages__id=language_id) - return channels.annotate(tokens=Value(json.dumps(token_list), output_field=TextField()))\ - .filter(deleted=False, main_tree__published=True)\ - .order_by("-priority")\ + return ( + channels.annotate( + tokens=Value(json.dumps(token_list), output_field=TextField()) + ) + .filter(deleted=False, main_tree__published=True) + .order_by("-priority") .distinct() + ) -@cache_page(settings.PUBLIC_CHANNELS_CACHE_DURATION, key_prefix='get_public_channel_list') -@api_view(['GET']) +@cache_page( + settings.PUBLIC_CHANNELS_CACHE_DURATION, key_prefix="get_public_channel_list" +) +@api_view(["GET"]) @permission_classes((AllowAny,)) @cache_no_user_data def get_public_channel_list(request, version): @@ -62,34 +75,44 @@ def get_public_channel_list(request, version): try: channel_list = _get_channel_list(version, request.query_params) except LookupError: - return HttpResponseNotFound(_("Api endpoint {} is not available").format(version)) + return HttpResponseNotFound( + _("Api endpoint {} is not available").format(version) + ) return Response(PublicChannelSerializer(channel_list, many=True).data) -@api_view(['GET']) +@api_view(["GET"]) @permission_classes((AllowAny,)) def get_public_channel_lookup(request, version, identifier): """ Endpoint: /public//channels/lookup/ """ try: - channel_list = _get_channel_list(version, request.query_params, identifier=identifier.strip().replace('-', '')) + channel_list = _get_channel_list( + version, + request.query_params, + identifier=identifier.strip().replace("-", ""), + ) except LookupError: - return HttpResponseNotFound(_("Api endpoint {} is not available").format(version)) + return HttpResponseNotFound( + _("Api endpoint {} is not available").format(version) + ) if not channel_list.exists(): - return HttpResponseNotFound(_("No channel matching {} found").format(identifier)) + return HttpResponseNotFound( + _("No channel matching {} found").format(identifier) + ) return Response(PublicChannelSerializer(channel_list, many=True).data) -@api_view(['GET']) +@api_view(["GET"]) @permission_classes((AllowAny,)) def get_channel_name_by_id(request, channel_id): """ Endpoint: /public/channels/ """ channel = Channel.objects.filter(pk=channel_id).first() if not channel: - return HttpResponseNotFound('Channel with id {} not found'.format(channel_id)) + return HttpResponseNotFound("Channel with id {} not found".format(channel_id)) channel_info = { "name": channel.name, "description": channel.description, - "version": channel.version + "version": channel.version, } return Response(channel_info) @@ -134,7 +157,9 @@ def get_instance_id(): global INSTANCE_ID if INSTANCE_ID is None: - INSTANCE_ID = generate_ecosystem_namespaced_uuid(Site.objects.get_current().domain).hex + INSTANCE_ID = generate_ecosystem_namespaced_uuid( + Site.objects.get_current().domain + ).hex return INSTANCE_ID @@ -153,7 +178,7 @@ def get_device_info(version=DEVICE_INFO_VERSION): "application": "studio", "kolibri_version": "0.16.0", "instance_id": get_instance_id(), - 'device_name': "Kolibri Studio", + "device_name": "Kolibri Studio", "operating_system": None, "subset_of_users_device": False, "min_content_schema_version": MIN_CONTENT_SCHEMA_VERSION, @@ -175,7 +200,7 @@ class InfoViewSet(viewsets.ViewSet): Ref: https://github.com/learningequality/kolibri/blob/develop/kolibri/core/public/api.py#L53 """ - permission_classes = (AllowAny, ) + permission_classes = (AllowAny,) def list(self, request): """Returns metadata information about the type of device""" diff --git a/contentcuration/search/apps.py b/contentcuration/search/apps.py index 5726231f79..f54009131f 100644 --- a/contentcuration/search/apps.py +++ b/contentcuration/search/apps.py @@ -2,4 +2,4 @@ class SearchConfig(AppConfig): - name = 'search' + name = "search" diff --git a/contentcuration/search/constants.py b/contentcuration/search/constants.py index 1ac316c3ae..313cd10a3c 100644 --- a/contentcuration/search/constants.py +++ b/contentcuration/search/constants.py @@ -5,12 +5,34 @@ POSTGRES_FTS_CONFIG = "simple" # ContentNode vectors and search fields. -CONTENTNODE_KEYWORDS_TSVECTOR_FIELDS = ("id", "channel_id", "node_id", "content_id", "tree_id", "title", "description", "contentnode_tags") -CONTENTNODE_KEYWORDS_TSVECTOR = SearchVector(*CONTENTNODE_KEYWORDS_TSVECTOR_FIELDS, config=POSTGRES_FTS_CONFIG) +CONTENTNODE_KEYWORDS_TSVECTOR_FIELDS = ( + "id", + "channel_id", + "node_id", + "content_id", + "tree_id", + "title", + "description", + "contentnode_tags", +) +CONTENTNODE_KEYWORDS_TSVECTOR = SearchVector( + *CONTENTNODE_KEYWORDS_TSVECTOR_FIELDS, config=POSTGRES_FTS_CONFIG +) CONTENTNODE_AUTHOR_TSVECTOR_FIELDS = ("author", "aggregator", "provider") -CONTENTNODE_AUTHOR_TSVECTOR = SearchVector(*CONTENTNODE_AUTHOR_TSVECTOR_FIELDS, config=POSTGRES_FTS_CONFIG) +CONTENTNODE_AUTHOR_TSVECTOR = SearchVector( + *CONTENTNODE_AUTHOR_TSVECTOR_FIELDS, config=POSTGRES_FTS_CONFIG +) # Channel vector and search fields. -CHANNEL_KEYWORDS_TSVECTOR_FIELDS = ("id", "main_tree__tree_id", "name", "description", "tagline", "primary_channel_token") -CHANNEL_KEYWORDS_TSVECTOR = SearchVector(*CHANNEL_KEYWORDS_TSVECTOR_FIELDS, config=POSTGRES_FTS_CONFIG) +CHANNEL_KEYWORDS_TSVECTOR_FIELDS = ( + "id", + "main_tree__tree_id", + "name", + "description", + "tagline", + "primary_channel_token", +) +CHANNEL_KEYWORDS_TSVECTOR = SearchVector( + *CHANNEL_KEYWORDS_TSVECTOR_FIELDS, config=POSTGRES_FTS_CONFIG +) diff --git a/contentcuration/search/management/commands/set_channel_tsvectors.py b/contentcuration/search/management/commands/set_channel_tsvectors.py index 68d7e17b51..fd17bb9065 100644 --- a/contentcuration/search/management/commands/set_channel_tsvectors.py +++ b/contentcuration/search/management/commands/set_channel_tsvectors.py @@ -18,15 +18,22 @@ class Command(BaseCommand): - def handle(self, *args, **options): start = time.time() - channel_not_already_inserted_query = ~Exists(ChannelFullTextSearch.objects.filter(channel_id=OuterRef("id"))) + channel_not_already_inserted_query = ~Exists( + ChannelFullTextSearch.objects.filter(channel_id=OuterRef("id")) + ) - channel_query = (get_fts_annotated_channel_qs().filter(channel_not_already_inserted_query, - deleted=False, main_tree__published=True) - .values("id", "keywords_tsvector")) + channel_query = ( + get_fts_annotated_channel_qs() + .filter( + channel_not_already_inserted_query, + deleted=False, + main_tree__published=True, + ) + .values("id", "keywords_tsvector") + ) insertable_channels = list(channel_query[:CHUNKSIZE]) total_channel_tsvectors_inserted = 0 @@ -36,17 +43,25 @@ def handle(self, *args, **options): insert_objs = list() for channel in insertable_channels: - obj = ChannelFullTextSearch(channel_id=channel["id"], keywords_tsvector=channel["keywords_tsvector"]) + obj = ChannelFullTextSearch( + channel_id=channel["id"], + keywords_tsvector=channel["keywords_tsvector"], + ) insert_objs.append(obj) inserted_objs_list = ChannelFullTextSearch.objects.bulk_create(insert_objs) current_inserts_count = len(inserted_objs_list) - total_channel_tsvectors_inserted = total_channel_tsvectors_inserted + current_inserts_count + total_channel_tsvectors_inserted = ( + total_channel_tsvectors_inserted + current_inserts_count + ) logging.info("Inserted {} channel tsvectors.".format(current_inserts_count)) insertable_channels = list(channel_query[:CHUNKSIZE]) - logging.info("Completed! successfully inserted total of {} channel tsvectors in {} seconds.".format( - total_channel_tsvectors_inserted, time.time() - start)) + logging.info( + "Completed! successfully inserted total of {} channel tsvectors in {} seconds.".format( + total_channel_tsvectors_inserted, time.time() - start + ) + ) diff --git a/contentcuration/search/management/commands/set_contentnode_tsvectors.py b/contentcuration/search/management/commands/set_contentnode_tsvectors.py index 067a956f62..7593a7b883 100644 --- a/contentcuration/search/management/commands/set_contentnode_tsvectors.py +++ b/contentcuration/search/management/commands/set_contentnode_tsvectors.py @@ -21,19 +21,35 @@ class Command(BaseCommand): def add_arguments(self, parser): - parser.add_argument("--channel-id", type=str, dest="channel_id", - help="The channel_id for which tsvectors need to be generated.\ - If not specified then tsvectors is generated for all published channels.") - parser.add_argument("--published", dest="published", action="store_true", - help="Filters on whether channel's contentnodes are published or not.") + parser.add_argument( + "--channel-id", + type=str, + dest="channel_id", + help="The channel_id for which tsvectors need to be generated.\ + If not specified then tsvectors is generated for all published channels.", + ) + parser.add_argument( + "--published", + dest="published", + action="store_true", + help="Filters on whether channel's contentnodes are published or not.", + ) def handle(self, *args, **options): start = time.time() if options["channel_id"]: - generate_tsv_for_channels = list(Channel.objects.filter(id=options["channel_id"]).values("id", "main_tree__tree_id")) + generate_tsv_for_channels = list( + Channel.objects.filter(id=options["channel_id"]).values( + "id", "main_tree__tree_id" + ) + ) else: - generate_tsv_for_channels = list(Channel.objects.filter(main_tree__published=True, deleted=False).values("id", "main_tree__tree_id")) + generate_tsv_for_channels = list( + Channel.objects.filter(main_tree__published=True, deleted=False).values( + "id", "main_tree__tree_id" + ) + ) if options["published"]: publish_filter_dict = dict(published=True) @@ -43,32 +59,59 @@ def handle(self, *args, **options): total_tsvectors_inserted = 0 for channel in generate_tsv_for_channels: - tsvector_not_already_inserted_query = ~Exists(ContentNodeFullTextSearch.objects.filter(contentnode_id=OuterRef("id"))) - tsvector_nodes_query = (get_fts_annotated_contentnode_qs(channel["id"]) - .filter(tsvector_not_already_inserted_query, tree_id=channel["main_tree__tree_id"], complete=True, **publish_filter_dict) - .values("id", "channel_id", "keywords_tsvector", "author_tsvector") - .order_by()) + tsvector_not_already_inserted_query = ~Exists( + ContentNodeFullTextSearch.objects.filter(contentnode_id=OuterRef("id")) + ) + tsvector_nodes_query = ( + get_fts_annotated_contentnode_qs(channel["id"]) + .filter( + tsvector_not_already_inserted_query, + tree_id=channel["main_tree__tree_id"], + complete=True, + **publish_filter_dict + ) + .values("id", "channel_id", "keywords_tsvector", "author_tsvector") + .order_by() + ) insertable_nodes_tsvector = list(tsvector_nodes_query[:CHUNKSIZE]) - logging.info("Inserting contentnode tsvectors of channel {}.".format(channel["id"])) + logging.info( + "Inserting contentnode tsvectors of channel {}.".format(channel["id"]) + ) while insertable_nodes_tsvector: insert_objs = list() for node in insertable_nodes_tsvector: - obj = ContentNodeFullTextSearch(contentnode_id=node["id"], channel_id=node["channel_id"], - keywords_tsvector=node["keywords_tsvector"], author_tsvector=node["author_tsvector"]) + obj = ContentNodeFullTextSearch( + contentnode_id=node["id"], + channel_id=node["channel_id"], + keywords_tsvector=node["keywords_tsvector"], + author_tsvector=node["author_tsvector"], + ) insert_objs.append(obj) - inserted_objs_list = ContentNodeFullTextSearch.objects.bulk_create(insert_objs) + inserted_objs_list = ContentNodeFullTextSearch.objects.bulk_create( + insert_objs + ) current_inserts_count = len(inserted_objs_list) - total_tsvectors_inserted = total_tsvectors_inserted + current_inserts_count + total_tsvectors_inserted = ( + total_tsvectors_inserted + current_inserts_count + ) - logging.info("Inserted {} contentnode tsvectors of channel {}.".format(current_inserts_count, channel["id"])) + logging.info( + "Inserted {} contentnode tsvectors of channel {}.".format( + current_inserts_count, channel["id"] + ) + ) insertable_nodes_tsvector = list(tsvector_nodes_query[:CHUNKSIZE]) logging.info("Insertion complete for channel {}.".format(channel["id"])) - logging.info("Completed! Successfully inserted total of {} contentnode tsvectors in {} seconds.".format(total_tsvectors_inserted, time.time() - start)) + logging.info( + "Completed! Successfully inserted total of {} contentnode tsvectors in {} seconds.".format( + total_tsvectors_inserted, time.time() - start + ) + ) diff --git a/contentcuration/search/migrations/0001_initial.py b/contentcuration/search/migrations/0001_initial.py index 9df128ff11..a42b22db21 100644 --- a/contentcuration/search/migrations/0001_initial.py +++ b/contentcuration/search/migrations/0001_initial.py @@ -19,14 +19,32 @@ class Migration(migrations.Migration): operations = [ migrations.CreateModel( - name='SavedSearch', + name="SavedSearch", fields=[ - ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), - ('name', models.CharField(blank=True, max_length=200)), - ('created', models.DateTimeField(auto_now_add=True)), - ('modified', models.DateTimeField(auto_now=True)), - ('params', django.contrib.postgres.fields.jsonb.JSONField(default=dict)), - ('saved_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='searches', to=settings.AUTH_USER_MODEL)), + ( + "id", + models.UUIDField( + default=uuid.uuid4, + editable=False, + primary_key=True, + serialize=False, + ), + ), + ("name", models.CharField(blank=True, max_length=200)), + ("created", models.DateTimeField(auto_now_add=True)), + ("modified", models.DateTimeField(auto_now=True)), + ( + "params", + django.contrib.postgres.fields.jsonb.JSONField(default=dict), + ), + ( + "saved_by", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="searches", + to=settings.AUTH_USER_MODEL, + ), + ), ], ), ] diff --git a/contentcuration/search/migrations/0002_auto_20201215_2110.py b/contentcuration/search/migrations/0002_auto_20201215_2110.py index 43eee63e55..3070ddda73 100644 --- a/contentcuration/search/migrations/0002_auto_20201215_2110.py +++ b/contentcuration/search/migrations/0002_auto_20201215_2110.py @@ -6,13 +6,13 @@ class Migration(migrations.Migration): dependencies = [ - ('search', '0001_initial'), + ("search", "0001_initial"), ] operations = [ migrations.AlterField( - model_name='savedsearch', - name='params', + model_name="savedsearch", + name="params", field=models.JSONField(default=dict), ), ] diff --git a/contentcuration/search/migrations/0003_fulltextsearch.py b/contentcuration/search/migrations/0003_fulltextsearch.py index 632df6a39e..4cf6b22872 100644 --- a/contentcuration/search/migrations/0003_fulltextsearch.py +++ b/contentcuration/search/migrations/0003_fulltextsearch.py @@ -16,39 +16,97 @@ class Migration(migrations.Migration): atomic = False dependencies = [ - ('contentcuration', '0140_delete_task'), - ('search', '0002_auto_20201215_2110'), + ("contentcuration", "0140_delete_task"), + ("search", "0002_auto_20201215_2110"), ] operations = [ migrations.CreateModel( - name='ContentNodeFullTextSearch', + name="ContentNodeFullTextSearch", fields=[ - ('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)), - ('keywords_tsvector', django.contrib.postgres.search.SearchVectorField(blank=True, null=True)), - ('author_tsvector', django.contrib.postgres.search.SearchVectorField(blank=True, null=True)), - ('channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='channel_nodes_fts', to='contentcuration.channel')), - ('contentnode', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='node_fts', to='contentcuration.contentnode')), + ( + "id", + contentcuration.models.UUIDField( + default=uuid.uuid4, + max_length=32, + primary_key=True, + serialize=False, + ), + ), + ( + "keywords_tsvector", + django.contrib.postgres.search.SearchVectorField( + blank=True, null=True + ), + ), + ( + "author_tsvector", + django.contrib.postgres.search.SearchVectorField( + blank=True, null=True + ), + ), + ( + "channel", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_nodes_fts", + to="contentcuration.channel", + ), + ), + ( + "contentnode", + models.OneToOneField( + on_delete=django.db.models.deletion.CASCADE, + related_name="node_fts", + to="contentcuration.contentnode", + ), + ), ], ), migrations.CreateModel( - name='ChannelFullTextSearch', + name="ChannelFullTextSearch", fields=[ - ('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)), - ('keywords_tsvector', django.contrib.postgres.search.SearchVectorField(blank=True, null=True)), - ('channel', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='channel_fts', to='contentcuration.channel')), + ( + "id", + contentcuration.models.UUIDField( + default=uuid.uuid4, + max_length=32, + primary_key=True, + serialize=False, + ), + ), + ( + "keywords_tsvector", + django.contrib.postgres.search.SearchVectorField( + blank=True, null=True + ), + ), + ( + "channel", + models.OneToOneField( + on_delete=django.db.models.deletion.CASCADE, + related_name="channel_fts", + to="contentcuration.channel", + ), + ), ], ), AddIndexConcurrently( - model_name='contentnodefulltextsearch', - index=django.contrib.postgres.indexes.GinIndex(fields=['keywords_tsvector'], name='node_keywords_tsv__gin_idx'), + model_name="contentnodefulltextsearch", + index=django.contrib.postgres.indexes.GinIndex( + fields=["keywords_tsvector"], name="node_keywords_tsv__gin_idx" + ), ), AddIndexConcurrently( - model_name='contentnodefulltextsearch', - index=django.contrib.postgres.indexes.GinIndex(fields=['author_tsvector'], name='node_author_tsv__gin_idx'), + model_name="contentnodefulltextsearch", + index=django.contrib.postgres.indexes.GinIndex( + fields=["author_tsvector"], name="node_author_tsv__gin_idx" + ), ), AddIndexConcurrently( - model_name='channelfulltextsearch', - index=django.contrib.postgres.indexes.GinIndex(fields=['keywords_tsvector'], name='channel_keywords_tsv__gin_idx'), + model_name="channelfulltextsearch", + index=django.contrib.postgres.indexes.GinIndex( + fields=["keywords_tsvector"], name="channel_keywords_tsv__gin_idx" + ), ), ] diff --git a/contentcuration/search/models.py b/contentcuration/search/models.py index 9e121af509..ea26afe470 100644 --- a/contentcuration/search/models.py +++ b/contentcuration/search/models.py @@ -25,11 +25,15 @@ class ContentNodeFullTextSearch(models.Model): id = StudioUUIDField(primary_key=True, default=uuid.uuid4) # The contentnode that this record points to. - contentnode = models.OneToOneField(ContentNode, on_delete=models.CASCADE, related_name="node_fts") + contentnode = models.OneToOneField( + ContentNode, on_delete=models.CASCADE, related_name="node_fts" + ) # The channel to which the contentnode belongs. Channel cannot be NULL because we only allow # searches to be made inside channels. - channel = models.ForeignKey(Channel, on_delete=models.CASCADE, related_name="channel_nodes_fts") + channel = models.ForeignKey( + Channel, on_delete=models.CASCADE, related_name="channel_nodes_fts" + ) # This stores the keywords as tsvector. keywords_tsvector = SearchVectorField(null=True, blank=True) @@ -38,18 +42,24 @@ class ContentNodeFullTextSearch(models.Model): author_tsvector = SearchVectorField(null=True, blank=True) class Meta: - indexes = [GinIndex(fields=["keywords_tsvector"], name="node_keywords_tsv__gin_idx"), - GinIndex(fields=["author_tsvector"], name="node_author_tsv__gin_idx")] + indexes = [ + GinIndex(fields=["keywords_tsvector"], name="node_keywords_tsv__gin_idx"), + GinIndex(fields=["author_tsvector"], name="node_author_tsv__gin_idx"), + ] class ChannelFullTextSearch(models.Model): id = StudioUUIDField(primary_key=True, default=uuid.uuid4) # The channel to which this record points. - channel = models.OneToOneField(Channel, on_delete=models.CASCADE, related_name="channel_fts") + channel = models.OneToOneField( + Channel, on_delete=models.CASCADE, related_name="channel_fts" + ) # This stores the channel keywords as tsvector for super fast searches. keywords_tsvector = SearchVectorField(null=True, blank=True) class Meta: - indexes = [GinIndex(fields=["keywords_tsvector"], name="channel_keywords_tsv__gin_idx")] + indexes = [ + GinIndex(fields=["keywords_tsvector"], name="channel_keywords_tsv__gin_idx") + ] diff --git a/contentcuration/search/tests/test_savesearch.py b/contentcuration/search/tests/test_savesearch.py index 7dc07df230..20ea36b89f 100644 --- a/contentcuration/search/tests/test_savesearch.py +++ b/contentcuration/search/tests/test_savesearch.py @@ -12,7 +12,6 @@ class SavedSearchViewsetTestCase(SyncTestMixin, StudioAPITestCase): - @property def savedsearch_metadata(self): return { @@ -38,7 +37,11 @@ def setUp(self): def test_create_savedsearch(self): savedsearch = self.savedsearch_metadata response = self.sync_changes( - [generate_create_event(savedsearch["id"], SAVEDSEARCH, savedsearch, user_id=self.user.id)], + [ + generate_create_event( + savedsearch["id"], SAVEDSEARCH, savedsearch, user_id=self.user.id + ) + ], ) self.assertEqual(response.status_code, 200, response.content) try: @@ -51,8 +54,12 @@ def test_create_savedsearchs(self): savedsearch2 = self.savedsearch_metadata response = self.sync_changes( [ - generate_create_event(savedsearch1["id"], SAVEDSEARCH, savedsearch1, user_id=self.user.id), - generate_create_event(savedsearch2["id"], SAVEDSEARCH, savedsearch2, user_id=self.user.id), + generate_create_event( + savedsearch1["id"], SAVEDSEARCH, savedsearch1, user_id=self.user.id + ), + generate_create_event( + savedsearch2["id"], SAVEDSEARCH, savedsearch2, user_id=self.user.id + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -87,8 +94,12 @@ def test_delete_savedsearchs(self): response = self.sync_changes( [ - generate_delete_event(savedsearch1.id, SAVEDSEARCH, user_id=self.user.id), - generate_delete_event(savedsearch2.id, SAVEDSEARCH, user_id=self.user.id), + generate_delete_event( + savedsearch1.id, SAVEDSEARCH, user_id=self.user.id + ), + generate_delete_event( + savedsearch2.id, SAVEDSEARCH, user_id=self.user.id + ), ], ) self.assertEqual(response.status_code, 200, response.content) @@ -107,7 +118,9 @@ def test_delete_savedsearchs(self): def test_retrieve_savedsearch(self): savedsearch = SavedSearch.objects.create(**self.savedsearch_db_metadata) - response = self.client.get(reverse("savedsearch-detail", kwargs={"pk": savedsearch.id})) + response = self.client.get( + reverse("savedsearch-detail", kwargs={"pk": savedsearch.id}) + ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual(response.data["id"], savedsearch.id) diff --git a/contentcuration/search/tests/test_search.py b/contentcuration/search/tests/test_search.py index 95d643f504..dff9c2e730 100644 --- a/contentcuration/search/tests/test_search.py +++ b/contentcuration/search/tests/test_search.py @@ -27,7 +27,9 @@ def setUp(self): def test_filter_exclude_channels(self): self.client.force_authenticate(user=self.user) response = self.client.get( - reverse("search-list"), data={"exclude_channel": self.channel.id}, format="json", + reverse("search-list"), + data={"exclude_channel": self.channel.id}, + format="json", ) self.assertEqual(response.status_code, 200, response.content) self.assertEqual(response.data["results"], []) @@ -35,7 +37,9 @@ def test_filter_exclude_channels(self): def test_filter_channels_by_edit(self): self.client.force_authenticate(user=self.user) response = self.client.get( - reverse("search-list"), data={"channel_list": "edit"}, format="json", + reverse("search-list"), + data={"channel_list": "edit"}, + format="json", ) self.assertEqual(response.status_code, 200, response.content) self.assertNotEqual(response.data["results"], []) @@ -49,18 +53,28 @@ def test_search(self): user = testdata.user(email="a{}@a.com".format(i)) users.append(user) - channel = Channel.objects.create(actor_id=user.id, name="user_a{}_channel".format(i)) + channel = Channel.objects.create( + actor_id=user.id, name="user_a{}_channel".format(i) + ) channel.save() channels.append(channel) channel.editors.add(user) - public_channel, editable_channel, viewable_channel, inaccessible_channel = channels + ( + public_channel, + editable_channel, + viewable_channel, + inaccessible_channel, + ) = channels # Create public video node. - public_video_node = testdata.node({ - "title": "Kolibri video", - "kind_id": "video", - }, parent=public_channel.main_tree) + public_video_node = testdata.node( + { + "title": "Kolibri video", + "kind_id": "video", + }, + parent=public_channel.main_tree, + ) public_channel.public = True public_channel.save() @@ -83,7 +97,9 @@ def test_search(self): viewable_channel.main_tree.refresh_from_db() viewable_video_node = viewable_channel.main_tree.get_descendants().first() inaccessible_channel.main_tree.refresh_from_db() - inaccessible_video_node = inaccessible_channel.main_tree.get_descendants().first() + inaccessible_video_node = ( + inaccessible_channel.main_tree.get_descendants().first() + ) # Send request from user_b to the search endpoint. self.client.force_authenticate(user=user_b) @@ -91,10 +107,7 @@ def test_search(self): for channel_list in ("public", "edit", "view"): response = self.client.get( reverse("search-list"), - data={ - "channel_list": channel_list, - "keywords": "video" - }, + data={"channel_list": channel_list, "keywords": "video"}, format="json", ) diff --git a/contentcuration/search/urls.py b/contentcuration/search/urls.py index 891e0cabed..4371f91681 100644 --- a/contentcuration/search/urls.py +++ b/contentcuration/search/urls.py @@ -5,7 +5,7 @@ from search.viewsets.savedsearch import SavedSearchViewSet router = routers.DefaultRouter(trailing_slash=False) -router.register(r'search', SearchContentNodeViewSet, basename='search') -router.register(r'saved-search', SavedSearchViewSet) +router.register(r"search", SearchContentNodeViewSet, basename="search") +router.register(r"saved-search", SavedSearchViewSet) -urlpatterns = [url(r'^', include(router.urls))] +urlpatterns = [url(r"^", include(router.urls))] diff --git a/contentcuration/search/utils.py b/contentcuration/search/utils.py index 8519fde49d..3717c89dc0 100644 --- a/contentcuration/search/utils.py +++ b/contentcuration/search/utils.py @@ -24,7 +24,7 @@ def get_fts_annotated_contentnode_qs(channel_id): channel_id=Value(channel_id), contentnode_tags=StringAgg("tags__tag_name", delimiter=" "), keywords_tsvector=CONTENTNODE_KEYWORDS_TSVECTOR, - author_tsvector=CONTENTNODE_AUTHOR_TSVECTOR + author_tsvector=CONTENTNODE_AUTHOR_TSVECTOR, ) @@ -35,5 +35,7 @@ def get_fts_annotated_channel_qs(): from contentcuration.models import Channel from contentcuration.viewsets.channel import primary_token_subquery - return Channel.objects.annotate(primary_channel_token=primary_token_subquery, - keywords_tsvector=CHANNEL_KEYWORDS_TSVECTOR) + return Channel.objects.annotate( + primary_channel_token=primary_token_subquery, + keywords_tsvector=CHANNEL_KEYWORDS_TSVECTOR, + ) diff --git a/contentcuration/search/viewsets/contentnode.py b/contentcuration/search/viewsets/contentnode.py index 0f88a037f6..27a5057592 100644 --- a/contentcuration/search/viewsets/contentnode.py +++ b/contentcuration/search/viewsets/contentnode.py @@ -50,11 +50,17 @@ def filter_channel_list(self, queryset, name, value): if value == "public": channel_ids = Channel.get_public_channels().values_list("id", flat=True) elif value == "edit" and user: - channel_ids = user.editable_channels.filter(deleted=False).values_list("id", flat=True) + channel_ids = user.editable_channels.filter(deleted=False).values_list( + "id", flat=True + ) elif value == "bookmark" and user: - channel_ids = user.bookmarked_channels.filter(deleted=False).values_list("id", flat=True) + channel_ids = user.bookmarked_channels.filter(deleted=False).values_list( + "id", flat=True + ) elif value == "view" and user: - channel_ids = user.view_only_channels.filter(deleted=False).values_list("id", flat=True) + channel_ids = user.view_only_channels.filter(deleted=False).values_list( + "id", flat=True + ) return queryset.filter(channel_id__in=list(channel_ids)) @@ -119,7 +125,6 @@ class SearchContentNodeViewSet(ReadOnlyValuesViewset): "channel_id", "resource_count", "original_channel_name", - # TODO: currently loading nodes separately # "thumbnail_checksum", # "thumbnail_extension", @@ -138,18 +143,21 @@ def annotate_queryset(self, queryset): """ Annotates thumbnails, resources count and original channel name. """ - descendant_resources_count = ExpressionWrapper(((F("contentnode__rght") - F("contentnode__lft") - Value(1)) / Value(2)), output_field=IntegerField()) + descendant_resources_count = ExpressionWrapper( + ((F("contentnode__rght") - F("contentnode__lft") - Value(1)) / Value(2)), + output_field=IntegerField(), + ) original_channel_name = Coalesce( Subquery( - Channel.objects.filter(pk=OuterRef("contentnode__original_channel_id")).values( - "name" - )[:1] + Channel.objects.filter( + pk=OuterRef("contentnode__original_channel_id") + ).values("name")[:1] ), Subquery( - Channel.objects.filter(main_tree__tree_id=OuterRef("contentnode__tree_id")).values( - "name" - )[:1] + Channel.objects.filter( + main_tree__tree_id=OuterRef("contentnode__tree_id") + ).values("name")[:1] ), ) diff --git a/deploy/generatejsconstantfiles.py b/deploy/generatejsconstantfiles.py index 3ef554a5e4..391fe613a8 100644 --- a/deploy/generatejsconstantfiles.py +++ b/deploy/generatejsconstantfiles.py @@ -97,7 +97,9 @@ def generate_constants_map_file( generate_names_constants = generate_names_constants and type(constant.id) is str if generate_names_constants: # Replace "-" with "_" to ensure we get keys that don't need to be wrapped in strings - names_output += " {}: '{}',\n".format(constant.id.upper().replace("-", "_"), constant.id) + names_output += " {}: '{}',\n".format( + constant.id.upper().replace("-", "_"), constant.id + ) output += "]);\n\n" output += "export default {}Map\n\n".format(constant_name) @@ -111,7 +113,9 @@ def generate_constants_map_file( with open(os.path.join(constants_path, constant_name + ".js"), "w") as f: f.write(output) - print("{0}: {1} constants saved".format(str(constant_name), len(constant_list))) + print( # noqa: T201 + "{0}: {1} constants saved".format(str(constant_name), len(constant_list)) + ) def generate_constants_set_file( @@ -137,7 +141,9 @@ def generate_constants_set_file( generate_names_constants = generate_names_constants and type(value) is str if generate_names_constants: # Replace "-" with "_" to ensure we get keys that don't need to be wrapped in strings - names_output += " {}: {},\n".format(value.upper().replace("-", "_"), cast_value) + names_output += " {}: {},\n".format( + value.upper().replace("-", "_"), cast_value + ) output += "]);\n\nexport default {};\n\n".format(constant_name) output += "export const {}List = Array.from({});\n".format( @@ -151,17 +157,21 @@ def generate_constants_set_file( with open(os.path.join(constants_path, constant_name + ".js"), "w") as f: f.write(output) - print("{0}: {1} constants saved".format(str(constant_name), len(constant_list))) + print( # noqa: T201 + "{0}: {1} constants saved".format(str(constant_name), len(constant_list)) + ) def main(): - print("***** Generating Constants in JS *****") + print("***** Generating Constants in JS *****") # noqa: T201 try: os.mkdir(constants_path) except OSError: pass generate_constants_set_file( - content_kinds.KINDLIST, "ContentKinds", mapper=get_kind_value, + content_kinds.KINDLIST, + "ContentKinds", + mapper=get_kind_value, ) generate_constants_map_file( licenses.LICENSELIST, "Licenses", mapper=get_license_dict @@ -177,14 +187,19 @@ def main(): ) generate_constants_set_file( - [m[0] for m in sorted( - exercises.MASTERY_MODELS, key=lambda x: int(x[0][21:]) if 'num_correct_in_a_row_' in x[0] else 0 - ) if m[0] != exercises.SKILL_CHECK and m[0] != exercises.QUIZ], + [ + m[0] + for m in sorted( + exercises.MASTERY_MODELS, + key=lambda x: int(x[0][21:]) if "num_correct_in_a_row_" in x[0] else 0, + ) + if m[0] != exercises.SKILL_CHECK and m[0] != exercises.QUIZ + ], "MasteryModels", ) generate_constants_set_file([r[0] for r in roles.choices], "Roles") - print("************ DONE. ************") + print("************ DONE. ************") # noqa: T201 if __name__ == "__main__": diff --git a/deploy/mime.types b/deploy/mime.types index 3437f2f474..89be9a4cd6 100644 --- a/deploy/mime.types +++ b/deploy/mime.types @@ -86,4 +86,4 @@ types { video/x-ms-asf asx asf; video/x-ms-wmv wmv; video/x-msvideo avi; -} \ No newline at end of file +} diff --git a/deploy/probers/base.py b/deploy/probers/base.py index 1a8233edbd..7f85a18c16 100644 --- a/deploy/probers/base.py +++ b/deploy/probers/base.py @@ -17,7 +17,9 @@ class BaseProbe(object): def __init__(self): self.session = requests.Session() - self.session.headers.update({"User-Agent": "Studio-Internal-Prober={}".format(self.prober_name)}) + self.session.headers.update( + {"User-Agent": "Studio-Internal-Prober={}".format(self.prober_name)} + ) def do_probe(self): pass @@ -35,7 +37,7 @@ def _login(self): headers = { "referer": url, "X-Studio-Internal-Prober": "LOGIN-PROBER", - 'X-CSRFToken': csrf, + "X-CSRFToken": csrf, } r = self.session.post( @@ -57,22 +59,31 @@ def _construct_studio_url(self, path): url = "{base_url}/{path}".format(base_url=STUDIO_BASE_URL, path=path_stripped) return url - def request(self, path, action="GET", data=None, headers=None, contenttype="application/json"): + def request( + self, + path, + action="GET", + data=None, + headers=None, + contenttype="application/json", + ): data = data or {} headers = headers or {} # Make sure session is logged in - if not self.session.cookies.get('csrftoken'): + if not self.session.cookies.get("csrftoken"): self._login() url = self._construct_studio_url(path) - headers.update({ - 'X-CSRFToken': self.session.cookies.get('csrftoken'), - }) + headers.update( + { + "X-CSRFToken": self.session.cookies.get("csrftoken"), + } + ) - headers.update({'Content-Type': contenttype}) - headers.update({'X-Studio-Internal-Prober': self.prober_name}) + headers.update({"Content-Type": contenttype}) + headers.update({"X-Studio-Internal-Prober": self.prober_name}) response = self.session.request(action, url, data=data, headers=headers) response.raise_for_status() @@ -90,9 +101,11 @@ def run(self): end_time = datetime.datetime.now() elapsed = (end_time - start_time).total_seconds() * 1000 - print("{metric_name} {latency_ms}".format( - metric_name=self.metric, - latency_ms=elapsed)) + print( # noqa: T201 + "{metric_name} {latency_ms}".format( + metric_name=self.metric, latency_ms=elapsed + ) + ) class ProberException(Exception): diff --git a/deploy/probers/channel_creation_probe.py b/deploy/probers/channel_creation_probe.py index 83abbdde21..b7ab8d4254 100755 --- a/deploy/probers/channel_creation_probe.py +++ b/deploy/probers/channel_creation_probe.py @@ -11,19 +11,24 @@ class ChannelCreationProbe(BaseProbe): prober_name = "CHANNEL-CREATION-PROBER" def _get_user_id(self): - response = self.request('api/internal/authenticate_user_internal') - return json.loads(response.content)['user_id'] + response = self.request("api/internal/authenticate_user_internal") + return json.loads(response.content)["user_id"] def do_probe(self): payload = { - 'description': "description", - 'language': "en-PT", - 'name': "test", - 'thumbnail': "b3897c3d96bde7f1cff77ce368924098.png", - 'content_defaults': "{}", - 'editors': [self._get_user_id()] + "description": "description", + "language": "en-PT", + "name": "test", + "thumbnail": "b3897c3d96bde7f1cff77ce368924098.png", + "content_defaults": "{}", + "editors": [self._get_user_id()], } - self.request("api/channel", action="POST", data=payload, contenttype="application/x-www-form-urlencoded") + self.request( + "api/channel", + action="POST", + data=payload, + contenttype="application/x-www-form-urlencoded", + ) if __name__ == "__main__": diff --git a/deploy/probers/channel_edit_page_probe.py b/deploy/probers/channel_edit_page_probe.py index 716568f9b6..2b3b80d2a3 100755 --- a/deploy/probers/channel_edit_page_probe.py +++ b/deploy/probers/channel_edit_page_probe.py @@ -10,12 +10,12 @@ class ChannelEditPageProbe(BaseProbe): prober_name = "CHANNEL-EDIT-PAGE-PROBER" def _get_channel(self): - response = self.request('api/probers/get_prober_channel') + response = self.request("api/probers/get_prober_channel") return json.loads(response.content) def do_probe(self): channel = self._get_channel() - path = "channels/{}/edit".format(channel['id']) + path = "channels/{}/edit".format(channel["id"]) self.request(path) diff --git a/deploy/probers/channel_update_probe.py b/deploy/probers/channel_update_probe.py index dd00ef0fc9..1951df9348 100755 --- a/deploy/probers/channel_update_probe.py +++ b/deploy/probers/channel_update_probe.py @@ -11,17 +11,19 @@ class ChannelUpdateProbe(BaseProbe): develop_only = True def _get_channel(self): - response = self.request('api/probers/get_prober_channel') + response = self.request("api/probers/get_prober_channel") return json.loads(response.content) def do_probe(self): channel = self._get_channel() - payload = { - "name": 'New Test Name', - 'id': channel['id'] - } - path = "api/channel/{}".format(channel['id']) - self.request(path, action="PATCH", data=payload, contenttype="application/x-www-form-urlencoded") + payload = {"name": "New Test Name", "id": channel["id"]} + path = "api/channel/{}".format(channel["id"]) + self.request( + path, + action="PATCH", + data=payload, + contenttype="application/x-www-form-urlencoded", + ) if __name__ == "__main__": diff --git a/deploy/probers/postgres_probe.py b/deploy/probers/postgres_probe.py index 49dc03471e..3aa29acc0c 100755 --- a/deploy/probers/postgres_probe.py +++ b/deploy/probers/postgres_probe.py @@ -6,11 +6,11 @@ # Use dev options if no env set -DB_HOST = os.getenv('DATA_DB_HOST') or 'localhost' +DB_HOST = os.getenv("DATA_DB_HOST") or "localhost" DB_PORT = 5432 -DB_NAME = os.getenv("DATA_DB_NAME") or 'kolibri-studio' -DB_USER = os.getenv('DATA_DB_USER') or 'learningequality' -DB_PASSWORD = os.getenv('DATA_DB_PASS') or 'kolibri' +DB_NAME = os.getenv("DATA_DB_NAME") or "kolibri-studio" +DB_USER = os.getenv("DATA_DB_USER") or "learningequality" +DB_PASSWORD = os.getenv("DATA_DB_PASS") or "kolibri" TIMEOUT_SECONDS = 2 @@ -18,8 +18,14 @@ class PostgresProbe(BaseProbe): metric = "postgres_latency_msec" def do_probe(self): - conn = psycopg2.connect(host=DB_HOST, port=DB_PORT, dbname=DB_NAME, user=DB_USER, - password=DB_PASSWORD, connect_timeout=TIMEOUT_SECONDS) + conn = psycopg2.connect( + host=DB_HOST, + port=DB_PORT, + dbname=DB_NAME, + user=DB_USER, + password=DB_PASSWORD, + connect_timeout=TIMEOUT_SECONDS, + ) cur = conn.cursor() cur.execute("SELECT datname FROM pg_database;") cur.fetchone() # raises exception if cur.execute() produced no results diff --git a/deploy/probers/postmark_api_probe.py b/deploy/probers/postmark_api_probe.py index 1c2d514493..30cbb1741c 100755 --- a/deploy/probers/postmark_api_probe.py +++ b/deploy/probers/postmark_api_probe.py @@ -5,33 +5,30 @@ POSTMARK_SERVICE_STATUS_URL = "https://status.postmarkapp.com/api/1.0/services" # (See here for API details: https://status.postmarkapp.com/api) -ALL_POSSIBLE_STATUSES = ['UP', 'MAINTENANCE', 'DELAY', 'DEGRADED', 'DOWN'] +ALL_POSSIBLE_STATUSES = ["UP", "MAINTENANCE", "DELAY", "DEGRADED", "DOWN"] PASSING_POSTMARK_STATUSES = { - '/services/smtp': ['UP', 'MAINTENANCE'], - '/services/api': ALL_POSSIBLE_STATUSES, - '/services/inbound': ALL_POSSIBLE_STATUSES, - '/services/web': ALL_POSSIBLE_STATUSES + "/services/smtp": ["UP", "MAINTENANCE"], + "/services/api": ALL_POSSIBLE_STATUSES, + "/services/inbound": ALL_POSSIBLE_STATUSES, + "/services/web": ALL_POSSIBLE_STATUSES, } class PostmarkProbe(BaseProbe): - metric = "postmark_api_latency_msec" def do_probe(self): r = requests.get(url=POSTMARK_SERVICE_STATUS_URL) for service in r.json(): - allowed_statuses = PASSING_POSTMARK_STATUSES.get(service['url']) - passing = service['status'] in allowed_statuses + allowed_statuses = PASSING_POSTMARK_STATUSES.get(service["url"]) + passing = service["status"] in allowed_statuses if passing: continue - raise Exception("Postmark's `%s` service has status %s, but we require one of the following: %s" % ( - service['name'], - service['status'], - allowed_statuses - ) + raise Exception( + "Postmark's `%s` service has status %s, but we require one of the following: %s" + % (service["name"], service["status"], allowed_statuses) ) diff --git a/deploy/probers/publishing_status_probe.py b/deploy/probers/publishing_status_probe.py index f8ce6dc606..fffe67eb92 100755 --- a/deploy/probers/publishing_status_probe.py +++ b/deploy/probers/publishing_status_probe.py @@ -7,7 +7,9 @@ from base import PRODUCTION_MODE_ON -ALERT_THRESHOLD = int(os.getenv("PROBER_PUBLISHING_ALERT_THRESHOLD") or 2 * 3600) # default = 2 hours +ALERT_THRESHOLD = int( + os.getenv("PROBER_PUBLISHING_ALERT_THRESHOLD") or 2 * 3600 +) # default = 2 hours DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" @@ -27,19 +29,24 @@ def run(self): channel_ids = [] for result in results: - duration = (now - datetime.datetime.strptime(result["performed"], DATE_FORMAT)).seconds + duration = ( + now - datetime.datetime.strptime(result["performed"], DATE_FORMAT) + ).seconds max_duration = max(max_duration, duration) if duration >= ALERT_THRESHOLD or not result["task_id"]: channel_ids.append(result["channel_id"]) if max_duration > 0: - print("{metric_name} {duration_sec}".format( - metric_name=self.metric, - duration_sec=max_duration - )) + print( # noqa: T201 + "{metric_name} {duration_sec}".format( + metric_name=self.metric, duration_sec=max_duration + ) + ) if channel_ids: - raise ProberException("Publishing alert for channels: {}".format(", ".join(channel_ids))) + raise ProberException( + "Publishing alert for channels: {}".format(", ".join(channel_ids)) + ) if __name__ == "__main__": diff --git a/deploy/probers/task_queue_probe.py b/deploy/probers/task_queue_probe.py index 3a3b02cfed..6148176856 100755 --- a/deploy/probers/task_queue_probe.py +++ b/deploy/probers/task_queue_probe.py @@ -12,9 +12,13 @@ def do_probe(self): r.raise_for_status() results = r.json() - task_count = results.get('queued_task_count', 0) + task_count = results.get("queued_task_count", 0) if task_count >= self.threshold: - raise Exception("Task queue length is over threshold! {} > {}".format(task_count, self.threshold)) + raise Exception( + "Task queue length is over threshold! {} > {}".format( + task_count, self.threshold + ) + ) if __name__ == "__main__": diff --git a/deploy/probers/topic_creation_probe.py b/deploy/probers/topic_creation_probe.py index 60d30e7287..6c7090c598 100755 --- a/deploy/probers/topic_creation_probe.py +++ b/deploy/probers/topic_creation_probe.py @@ -12,24 +12,29 @@ class TopicCreationProbe(BaseProbe): prober_name = "TOPIC-CREATION-PROBER" def _get_channel(self): - response = self.request('api/probers/get_prober_channel') + response = self.request("api/probers/get_prober_channel") return json.loads(response.content) def do_probe(self): channel = self._get_channel() payload = { - 'title': 'Statistics and Probeability', - 'kind': content_kinds.TOPIC, + "title": "Statistics and Probeability", + "kind": content_kinds.TOPIC, } - response = self.request("api/contentnode", action="POST", data=json.dumps(payload)) + response = self.request( + "api/contentnode", action="POST", data=json.dumps(payload) + ) # Test saving to channel works new_topic = json.loads(response.content) - new_topic.update({ - "parent": channel['main_tree'] - }) - path = "api/contentnode/{}".format(new_topic['id']) - self.request(path, action="PUT", data=payload, contenttype="application/x-www-form-urlencoded") + new_topic.update({"parent": channel["main_tree"]}) + path = "api/contentnode/{}".format(new_topic["id"]) + self.request( + path, + action="PUT", + data=payload, + contenttype="application/x-www-form-urlencoded", + ) if __name__ == "__main__": diff --git a/deploy/probers/unapplied_changes_probe.py b/deploy/probers/unapplied_changes_probe.py index a3ceee915e..6065f3df28 100755 --- a/deploy/probers/unapplied_changes_probe.py +++ b/deploy/probers/unapplied_changes_probe.py @@ -11,11 +11,15 @@ def do_probe(self): r.raise_for_status() results = r.json() - active_task_count = results.get('active_task_count', 0) - unapplied_changes_count = results.get('unapplied_changes_count', 0) + active_task_count = results.get("active_task_count", 0) + unapplied_changes_count = results.get("unapplied_changes_count", 0) if active_task_count == 0 and unapplied_changes_count > 0: - raise Exception("There are unapplied changes and no active tasks! {} unapplied changes".format(unapplied_changes_count)) + raise Exception( + "There are unapplied changes and no active tasks! {} unapplied changes".format( + unapplied_changes_count + ) + ) if __name__ == "__main__": diff --git a/docker/entrypoint.py b/docker/entrypoint.py index 80f6ccfcd2..1b3deb8c04 100644 --- a/docker/entrypoint.py +++ b/docker/entrypoint.py @@ -22,6 +22,7 @@ def check_postgresql_ready(postgres_checks=CONNECT_TRIES): import psycopg2 + """ Check that postgres is ready to accept connections. """ @@ -48,6 +49,7 @@ def check_postgresql_ready(postgres_checks=CONNECT_TRIES): def check_minio_ready(minio_checks=CONNECT_TRIES): import requests + """ Check that minio is accepting requests. """ @@ -81,7 +83,14 @@ def setup_studio(): - create sample user accounts: user@a.com:a, user@b.com:b, user@c.com:c - create sample channels """ - subprocess.call(["python", "contentcuration/manage.py", "setup", "--settings=contentcuration.dev_settings"]) + subprocess.call( + [ + "python", + "contentcuration/manage.py", + "setup", + "--settings=contentcuration.dev_settings", + ] + ) def run_cmd(): diff --git a/integration_testing/features/manage-resources/bulk-editing/apply-inheritable-metadata.feature b/integration_testing/features/manage-resources/bulk-editing/apply-inheritable-metadata.feature index 8c8e02f134..8a3d12e399 100755 --- a/integration_testing/features/manage-resources/bulk-editing/apply-inheritable-metadata.feature +++ b/integration_testing/features/manage-resources/bulk-editing/apply-inheritable-metadata.feature @@ -5,7 +5,7 @@ Feature: Modal for applying inheritable metadata And I am at the channel editor page And there is a folder named *Folder 1* with language, categories, level, and requirements And there is a second folder named *Folder 2* with different metadata - + Scenario: Upload resources into a folder and see the modal When I upload a resource into *Folder 1* Then I see the *Apply details from the folder 'Folder 1'* modal diff --git a/integration_testing/features/manage-resources/bulk-editing/quick-edit-single-resource.feature b/integration_testing/features/manage-resources/bulk-editing/quick-edit-single-resource.feature index bbd0bb28e0..82af61d0b2 100755 --- a/integration_testing/features/manage-resources/bulk-editing/quick-edit-single-resource.feature +++ b/integration_testing/features/manage-resources/bulk-editing/quick-edit-single-resource.feature @@ -14,7 +14,7 @@ Feature: Quick edit fields of a single resource Then I am back at the page with the resources And I see a message: *Changes saved* And I can see that the title and description are changed - + Scenario: Edit the language of a resource When I click the *⋮* (Options) button for a resource #alternatively one can select a resource and click the corresponding icon in the top bar And I click the *Edit language* option diff --git a/k8s/create-cloudsql-proxy.sh b/k8s/create-cloudsql-proxy.sh index 92b571e7a1..e9adfd0c8b 100755 --- a/k8s/create-cloudsql-proxy.sh +++ b/k8s/create-cloudsql-proxy.sh @@ -18,4 +18,4 @@ helm upgrade $1 stable/gcloud-sqlproxy --namespace sqlproxy \ --set cloudsql.instances[0].instance=$3 \ --set cloudsql.instances[0].project=$4 \ --set cloudsql.instances[0].region=$5 \ - --set cloudsql.instances[0].port=5432 -i \ No newline at end of file + --set cloudsql.instances[0].port=5432 -i From 4aa45821fb4eb3e225a3cd0f49c0ec6a88b363ad Mon Sep 17 00:00:00 2001 From: Samson Akol Date: Mon, 19 May 2025 19:18:37 +0300 Subject: [PATCH 7/7] Adds linting commit to git blame --- .git-blame-ignore-revs | 1 + 1 file changed, 1 insertion(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 7c8dcb41d3..b9718afad7 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -3,3 +3,4 @@ # Linting updates and fixes a52e08e5c2031cecb97a03fbed49997756ebe01b +8ccaaa60efd1c07b220aefce5a307e4791345111