From 9697eafb973b597b1a9fc8a0e0bceb5b901d9e9c Mon Sep 17 00:00:00 2001 From: Jordan Yoshihara Date: Fri, 14 Apr 2017 14:56:51 -0700 Subject: [PATCH 01/11] Separated out serializer fields that aren't needed immediately --- contentcuration/contentcuration/models.py | 15 +- .../contentcuration/serializers.py | 19 +- .../hbtemplates/export_modal.handlebars | 2 +- .../static/js/edit_channel/export/views.js | 3 +- .../hbtemplates/import_dialog.handlebars | 5 +- .../hbtemplates/import_list_item.handlebars | 6 +- .../static/js/edit_channel/import/views.js | 9 +- .../static/js/edit_channel/models.js | 50 +++- .../static/js/edit_channel/move/views.js | 22 +- .../hbtemplates/content_list_item.handlebars | 16 +- .../static/js/edit_channel/views.js | 6 +- .../contentcuration/static/less/import.less | 4 +- contentcuration/contentcuration/urls.py | 32 ++- .../contentcuration/view/file_views.py | 117 +++++++++ .../contentcuration/view/node_views.py | 172 +++++++++++++ contentcuration/contentcuration/views.py | 240 +----------------- 16 files changed, 415 insertions(+), 303 deletions(-) create mode 100644 contentcuration/contentcuration/view/file_views.py create mode 100644 contentcuration/contentcuration/view/node_views.py diff --git a/contentcuration/contentcuration/models.py b/contentcuration/contentcuration/models.py index 3654e81e45..18668bfdfa 100644 --- a/contentcuration/contentcuration/models.py +++ b/contentcuration/contentcuration/models.py @@ -393,14 +393,17 @@ def get_channel(self): return channel def save(self, *args, **kwargs): - # Detect if node has been moved to another tree - if self.pk is not None and ContentNode.objects.filter(pk=self.pk).exists(): - original = ContentNode.objects.get(pk=self.pk) - if original.parent and original.parent_id != self.parent_id and not original.parent.changed: - original.parent.changed = True - original.parent.save() + is_new = self.pk is not None and ContentNode.objects.filter(pk=self.pk).exists() + + # Update all current ancestors to be changed + if not is_new: + ContentNode.objects.get(pk=self.pk).get_ancestors(include_self=True).update(changed=True) super(ContentNode, self).save(*args, **kwargs) + + if is_new: + self.get_ancestors(include_self=True).update(changed=True) + post_save_changes = False if self.original_node is None: self.original_node = self diff --git a/contentcuration/contentcuration/serializers.py b/contentcuration/contentcuration/serializers.py index affc032ba3..b9e2435f24 100644 --- a/contentcuration/contentcuration/serializers.py +++ b/contentcuration/contentcuration/serializers.py @@ -39,10 +39,7 @@ class FormatPresetSerializer(serializers.ModelSerializer): name = serializers.SerializerMethodField('retrieve_name') def retrieve_mimetypes(self, preset): - mimetypes = [] - for m in preset.allowed_formats.all(): - mimetypes.append(m.mimetype) - return mimetypes + return preset.allowed_formats.values_list('mimetype', flat=True) def retrieve_name(self, preset): return preset.id @@ -292,11 +289,9 @@ class Meta: class ContentNodeSerializer(BulkSerializerMixin, serializers.ModelSerializer): children = serializers.PrimaryKeyRelatedField(many=True, read_only=True) - tags = TagSerializer(many=True) id = serializers.CharField(required=False) ancestors = serializers.SerializerMethodField('get_node_ancestors') - descendants = serializers.SerializerMethodField('get_node_descendants') files = FileSerializer(many=True, read_only=True) assessment_items = AssessmentItemSerializer(many=True, read_only=True) associated_presets = serializers.SerializerMethodField('retrieve_associated_presets') @@ -331,14 +326,12 @@ def retrieve_metadata(self, node): # size_q = File.objects.select_related('contentnode').select_related('assessment_item')\ # .filter(Q(contentnode_id__in=descendants.values_list('id', flat=True)) | Q(assessment_item_id__in=descendants.values_list('assessment_items__id', flat=True)))\ # .only('checksum', 'file_size').distinct().aggregate(resource_size=Sum('file_size')) - descendants = node.get_descendants(include_self=True).annotate(change_count=Case(When(changed=True, then=Value(1)),default=Value(0),output_field=IntegerField())) - aggregated = descendants.aggregate(resource_size=Sum('files__file_size'), is_changed=Sum('change_count'), assessment_size=Sum('assessment_items__files__file_size')) + descendants = node.get_descendants(include_self=True) return { "total_count" : node.get_descendant_count(), "resource_count" : descendants.exclude(kind=content_kinds.TOPIC).count(), "max_sort_order" : node.children.aggregate(max_sort_order=Max('sort_order'))['max_sort_order'] or 1, - "resource_size" : (aggregated.get('resource_size') or 0) + (aggregated.get('assessment_size') or 0), - "has_changed_descendant" : aggregated.get('is_changed') != 0 + "resource_size" : 0, # Make separate request } else: # TODO: Account for files duplicated on node @@ -352,7 +345,6 @@ def retrieve_metadata(self, node): "resource_count" : 1, "max_sort_order" : node.sort_order, "resource_size" : assessment_size + resource_size, - "has_changed_descendant" : node.changed } @staticmethod @@ -465,15 +457,12 @@ def update(self, instance, validated_data): def get_node_ancestors(self,node): return node.get_ancestors().values_list('id', flat=True) - def get_node_descendants(self, node): - return node.get_descendants().values_list('id', flat=True) - class Meta: list_serializer_class = CustomListSerializer model = ContentNode fields = ('title', 'changed', 'id', 'description', 'sort_order','author', 'original_node', 'cloned_source', 'original_channel','original_source_node_id', 'source_node_id', 'node_id', 'copyright_holder', 'license', 'license_description', 'kind', 'children', 'parent', 'content_id','associated_presets', 'valid', 'original_channel_id', 'source_channel_id', - 'descendants', 'ancestors', 'tags', 'files', 'metadata', 'created', 'modified', 'published', 'extra_fields', 'assessment_items', 'source_id', 'source_domain') + 'ancestors', 'tags', 'files', 'metadata', 'created', 'modified', 'published', 'extra_fields', 'assessment_items', 'source_id', 'source_domain') class RootNodeSerializer(serializers.ModelSerializer): children = serializers.PrimaryKeyRelatedField(many=True, read_only=True) diff --git a/contentcuration/contentcuration/static/js/edit_channel/export/hbtemplates/export_modal.handlebars b/contentcuration/contentcuration/static/js/edit_channel/export/hbtemplates/export_modal.handlebars index 080c13feaa..2799e15365 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/export/hbtemplates/export_modal.handlebars +++ b/contentcuration/contentcuration/static/js/edit_channel/export/hbtemplates/export_modal.handlebars @@ -27,7 +27,7 @@ {{#format_count "Resource" node.metadata.resource_count}}{{/format_count}} - (Calculating size...) + (Calculating...) CANCEL diff --git a/contentcuration/contentcuration/static/js/edit_channel/export/views.js b/contentcuration/contentcuration/static/js/edit_channel/export/views.js index b7300e6212..d8312e695d 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/export/views.js +++ b/contentcuration/contentcuration/static/js/edit_channel/export/views.js @@ -25,8 +25,7 @@ var ExportModalView = BaseViews.BaseModalView.extend({ }); var self = this; - this.retrieve_nodes(this.model.get('children')).then(function(collection){ - var size = collection.reduce(function(size, node){ return size + node.get('metadata').resource_size; }, 0); + this.model.calculate_size().then(function(size){ self.$("#export_size").text("(" + stringHelper.format_size(size) + ")"); }); }, diff --git a/contentcuration/contentcuration/static/js/edit_channel/import/hbtemplates/import_dialog.handlebars b/contentcuration/contentcuration/static/js/edit_channel/import/hbtemplates/import_dialog.handlebars index 6f81a76d7d..a2b3db9584 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/import/hbtemplates/import_dialog.handlebars +++ b/contentcuration/contentcuration/static/js/edit_channel/import/hbtemplates/import_dialog.handlebars @@ -7,5 +7,8 @@ - No files selected + + 0 Topics, 0 Resources + (0B) + \ No newline at end of file diff --git a/contentcuration/contentcuration/static/js/edit_channel/import/hbtemplates/import_list_item.handlebars b/contentcuration/contentcuration/static/js/edit_channel/import/hbtemplates/import_list_item.handlebars index bfa63ffc05..e74b072d0d 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/import/hbtemplates/import_list_item.handlebars +++ b/contentcuration/contentcuration/static/js/edit_channel/import/hbtemplates/import_list_item.handlebars @@ -7,8 +7,12 @@
{{node.title}}
{{#if isfolder}} {{#if node.children}} + {{#if isfolder}} + {{#unless is_channel}}{{/unless}} + {{else}} + + {{/if}} - {{#unless is_channel}}{{/unless}} {{else}} {{/if}} diff --git a/contentcuration/contentcuration/static/js/edit_channel/import/views.js b/contentcuration/contentcuration/static/js/edit_channel/import/views.js index 18300c4fa4..34592d2fc1 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/import/views.js +++ b/contentcuration/contentcuration/static/js/edit_channel/import/views.js @@ -83,12 +83,16 @@ var ImportView = BaseViews.BaseListView.extend({ }else{ totalCount += entry.get("metadata").total_count; } - }); var data = this.importList.get_metadata(); totalCount = totalCount - data.count; - this.$("#import_file_count").html(totalCount + " Topic" + ((totalCount == 1)? ", " : "s, ") + data.count + " Resource" + ((data.count == 1)? " " : "s ") + stringHelper.format_size(data.size)); + this.$("#import_file_count").html(totalCount + " Topic" + ((totalCount == 1)? ", " : "s, ") + data.count + " Resource" + ((data.count == 1)? "" : "s")); + var self = this; + this.$("#import_file_size").html("Calculating...") + collection.calculate_size().then(function(size){ + self.$("#import_file_size").html(stringHelper.format_size(size)); + }); }, import_content:function(){ var self = this; @@ -172,7 +176,6 @@ var ImportList = BaseViews.BaseListView.extend({ this.metadata = {"count" : 0, "size":0}; this.views.forEach(function(entry){ self.metadata.count += entry.metadata.count; - self.metadata.size += entry.metadata.size; }); return this.metadata; } diff --git a/contentcuration/contentcuration/static/js/edit_channel/models.js b/contentcuration/contentcuration/static/js/edit_channel/models.js index 2804e67e41..c85ae2c02e 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/models.js +++ b/contentcuration/contentcuration/static/js/edit_channel/models.js @@ -199,6 +199,21 @@ var ContentNodeModel = BaseModel.extend({ data['randomize'] = (data['randomize'] !== undefined)? data['randomize'] : window.preferences.auto_randomize_questions; this.set('extra_fields', data); } + }, + calculate_size: function(){ + var self = this; + var promise = new Promise(function(resolve, reject){ + $.ajax({ + method:"POST", + url: window.Urls.get_total_size(), + data: JSON.stringify([self.id]), + error:reject, + success: function(data) { + resolve(JSON.parse(data).size); + } + }); + }); + return promise; } }); @@ -235,6 +250,36 @@ var ContentNodeCollection = BaseCollection.extend({ }); return promise; }, + get_descendant_ids: function(){ + var self = this; + var promise = new Promise(function(resolve, reject){ + $.ajax({ + method:"POST", + url: window.Urls.get_node_descendants(), + data: JSON.stringify(self.pluck('id')), + success: function(data) { + resolve(JSON.parse(data).node_ids.split(" ")); + }, + error:reject + }); + }); + return promise; + }, + calculate_size: function(){ + var self = this; + var promise = new Promise(function(resolve, reject){ + $.ajax({ + method:"POST", + url: window.Urls.get_total_size(), + data: JSON.stringify(self.pluck('id')), + success: function(data) { + resolve(JSON.parse(data).size); + }, + error:reject + }); + }); + return promise; + }, get_all_fetch: function(ids, force_fetch){ force_fetch = (force_fetch)? true : false; var self = this; @@ -270,10 +315,7 @@ var ContentNodeCollection = BaseCollection.extend({ url: window.Urls.duplicate_nodes(), data: JSON.stringify(data), success: function(data) { - copied_list = JSON.parse(data).node_ids.split(" "); - self.get_all_fetch(copied_list).then(function(fetched){ - resolve(fetched); - }); + resolve(new ContentNodeCollection(JSON.parse(data))); }, error:reject }); diff --git a/contentcuration/contentcuration/static/js/edit_channel/move/views.js b/contentcuration/contentcuration/static/js/edit_channel/move/views.js index b72ce30889..3d28a02b1a 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/move/views.js +++ b/contentcuration/contentcuration/static/js/edit_channel/move/views.js @@ -34,8 +34,6 @@ var MoveView = BaseViews.BaseListView.extend({ this.onmove = options.onmove; this.collection = options.collection; - // Calculate valid moves using node descendants - this.to_move_ids = _.uniq(this.collection.reduce(function(l,n){ return l.concat(n.get('descendants')).concat(n.id);}, [])); this.render(); }, events: { @@ -69,13 +67,19 @@ var MoveView = BaseViews.BaseListView.extend({ clipboard_node.set({'title': 'My Clipboard'}); fetched.add(clipboard_node); - // Render list - this.targetList = new MoveList({ - model: null, - el: $("#target_list_area"), - is_target: true, - collection: fetched, - container: this + // Calculate valid moves using node descendants + var self = this; + this.collection.get_descendant_ids().then(function(ids){ + self.to_move_ids = ids; + + // Render list + self.targetList = new MoveList({ + model: null, + el: $("#target_list_area"), + is_target: true, + collection: fetched, + container: self + }); }); }, diff --git a/contentcuration/contentcuration/static/js/edit_channel/tree_edit/hbtemplates/content_list_item.handlebars b/contentcuration/contentcuration/static/js/edit_channel/tree_edit/hbtemplates/content_list_item.handlebars index ac20bb5fb2..b6130a02ec 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/tree_edit/hbtemplates/content_list_item.handlebars +++ b/contentcuration/contentcuration/static/js/edit_channel/tree_edit/hbtemplates/content_list_item.handlebars @@ -21,13 +21,15 @@ {{#if time}} {{time}}{{/if}} {{#if isfolder}} {{#format_count "Resource" node.metadata.resource_count}}{{/format_count}}{{/if}}   - {{#if isexercise}} - - {{#format_count "Question" num_questions}}{{/format_count}} - {{else}} - - {{#format_file_size node.metadata.resource_size}}{{/format_file_size}} - {{/if}} + {{#unless isfolder}} + {{#if isexercise}} + + {{#format_count "Question" num_questions}}{{/format_count}} + {{else}} + + {{#format_file_size node.metadata.resource_size}}{{/format_file_size}} + {{/if}} + {{/unless}}   {{#if node.changed}} {{#if node.published}}Updated{{else}}New{{/if}} diff --git a/contentcuration/contentcuration/static/js/edit_channel/views.js b/contentcuration/contentcuration/static/js/edit_channel/views.js index 731a702ab6..267b83b1e4 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/views.js +++ b/contentcuration/contentcuration/static/js/edit_channel/views.js @@ -200,9 +200,6 @@ var BaseWorkspaceView = BaseView.extend({ }); }, handle_move:function(target, moved, original_parents){ - // Recalculate counts - this.reload_ancestors(original_parents, true); - // Remove where nodes originally were moved.forEach(function(node){ window.workspace_manager.remove(node.id)}); @@ -210,6 +207,9 @@ var BaseWorkspaceView = BaseView.extend({ var content = window.workspace_manager.get(target.id); if(content && content.list) content.list.add_nodes(moved); + + // Recalculate counts + this.reload_ancestors(original_parents, true); } }); diff --git a/contentcuration/contentcuration/static/less/import.less b/contentcuration/contentcuration/static/less/import.less index 0cbaeb925d..6686e3a4cc 100644 --- a/contentcuration/contentcuration/static/less/import.less +++ b/contentcuration/contentcuration/static/less/import.less @@ -129,8 +129,8 @@ #import_content_submit{ margin-right:20px; } - #import_file_count{ - margin-right:20px; + #import_file_metadata{ + padding-right:20px; font-size:12pt; margin-top:2px; } diff --git a/contentcuration/contentcuration/urls.py b/contentcuration/contentcuration/urls.py index 50771f8b4a..f2b33251f9 100644 --- a/contentcuration/contentcuration/urls.py +++ b/contentcuration/contentcuration/urls.py @@ -27,6 +27,8 @@ import contentcuration.view.settings_views as settings_views import contentcuration.view.internal_views as internal_views import contentcuration.view.zip_views as zip_views +import contentcuration.view.file_views as file_views +import contentcuration.view.node_views as node_views from rest_framework.authtoken import views as auth_view from contentcuration import api @@ -108,25 +110,35 @@ class AssessmentItemViewSet(BulkModelViewSet): url(r'^admin/', include(admin.site.urls)), url(r'^api/', include(router.urls)), url(r'^api/', include(bulkrouter.urls)), - url(r'^api/duplicate_nodes/$', views.duplicate_nodes, name='duplicate_nodes'), - url(r'^api/move_nodes/$', views.move_nodes, name='move_nodes'), url(r'^api/publish_channel/$', views.publish_channel, name='publish_channel'), - url(r'^api/generate_thumbnail/$', views.generate_thumbnail, name='generate_thumbnail'), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), - url(r'^file_upload/', views.file_upload, name="file_upload"), - url(r'^file_create/', views.file_create, name="file_create"), url(r'^channels/$', views.channel_list, name='channels'), url(r'^channels/(?P[^/]+)/edit', views.channel, name='channel'), url(r'^channels/(?P[^/]+)/view', views.channel_view_only, name='channel_view_only'), - url(r'^thumbnail_upload/', views.thumbnail_upload, name='thumbnail_upload'), - url(r'^exercise_image_upload/', views.exercise_image_upload, name='exercise_image_upload'), - url(r'^image_upload/', views.image_upload, name='image_upload'), - url(r'^zipcontent/(?P[^/]+)/(?P.*)', zip_views.ZipContentView.as_view(), {}, "zipcontent"), url(r'^unsupported_browser/$', views.unsupported_browser, name='unsupported_browser'), url(r'^unauthorized/$', views.unauthorized, name='unauthorized'), url(r'^accessible_channels/$', views.accessible_channels, name='accessible_channels'), url(r'^healthz$', views.health, name='health'), - url(r'^get_nodes_by_ids$', views.get_nodes_by_ids, name='get_nodes_by_ids'), +] + +# Add node api enpoints +urlpatterns += [ + url(r'^api/get_nodes_by_ids$', node_views.get_nodes_by_ids, name='get_nodes_by_ids'), + url(r'^api/get_total_size$', node_views.get_total_size, name='get_total_size'), + url(r'^api/duplicate_nodes/$', node_views.duplicate_nodes, name='duplicate_nodes'), + url(r'^api/move_nodes/$', node_views.move_nodes, name='move_nodes'), + url(r'^api/get_node_descendants/$', node_views.get_node_descendants, name='get_node_descendants'), +] + +# Add file api enpoints +urlpatterns += [ + url(r'^api/thumbnail_upload/', file_views.thumbnail_upload, name='thumbnail_upload'), + url(r'^api/exercise_image_upload/', file_views.exercise_image_upload, name='exercise_image_upload'), + url(r'^api/image_upload/', file_views.image_upload, name='image_upload'), + url(r'^zipcontent/(?P[^/]+)/(?P.*)', zip_views.ZipContentView.as_view(), {}, "zipcontent"), + url(r'^api/file_upload/', file_views.file_upload, name="file_upload"), + url(r'^api/file_create/', file_views.file_create, name="file_create"), + url(r'^api/generate_thumbnail/$', file_views.generate_thumbnail, name='generate_thumbnail'), ] # Add account/registration endpoints diff --git a/contentcuration/contentcuration/view/file_views.py b/contentcuration/contentcuration/view/file_views.py new file mode 100644 index 0000000000..b1ac9bdd06 --- /dev/null +++ b/contentcuration/contentcuration/view/file_views.py @@ -0,0 +1,117 @@ +import json +import logging +import os +from django.http import HttpResponse, HttpResponseBadRequest +from django.views.decorators.csrf import csrf_exempt +from django.conf import settings +from django.core.urlresolvers import reverse_lazy +from django.core.files import File as DjFile +from rest_framework.renderers import JSONRenderer +from contentcuration.api import write_file_to_storage +from contentcuration.utils.files import generate_thumbnail_from_node +from contentcuration.models import File, FormatPreset, ContentNode, License, generate_file_on_disk_name, generate_storage_url +from contentcuration.serializers import ContentNodeSerializer, FileSerializer +from le_utils.constants import format_presets, content_kinds, file_formats, exercises, licenses +from pressurecooker.videos import guess_video_preset_by_resolution + +def file_upload(request): + if request.method == 'POST': + #Implement logic for switching out files without saving it yet + filename, ext = os.path.splitext(request.FILES.values()[0]._name) + size = request.FILES.values()[0]._size + file_object = File(file_size=size, file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:], original_filename=request.FILES.values()[0]._name, preset_id=request.META.get('HTTP_PRESET')) + file_object.save() + return HttpResponse(json.dumps({ + "success": True, + "filename": str(file_object), + "file": JSONRenderer().render(FileSerializer(file_object).data) + })) + +def file_create(request): + if request.method == 'POST': + original_filename, ext = os.path.splitext(request.FILES.values()[0]._name) + size = request.FILES.values()[0]._size + presets = FormatPreset.objects.filter(allowed_formats__extension__contains=ext[1:]) + kind = presets.first().kind + preferences = json.loads(request.user.preferences) + author = preferences.get('author') if isinstance(preferences.get('author'), basestring) else request.user.get_full_name() + license = License.objects.filter(license_name=preferences.get('license')).first() # Use filter/first in case preference hasn't been set + license_id = license.pk if license else settings.DEFAULT_LICENSE + new_node = ContentNode(title=original_filename, kind=kind, license_id=license_id, author=author, copyright_holder=preferences.get('copyright_holder')) + if license.license_name == licenses.SPECIAL_PERMISSIONS: + new_node.license_description = preferences.get('license_description') + new_node.save() + file_object = File(file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:], original_filename=request.FILES.values()[0]._name, contentnode=new_node, file_size=size) + file_object.save() + if kind.pk == content_kinds.VIDEO: + file_object.preset_id = guess_video_preset_by_resolution(str(file_object.file_on_disk)) + elif presets.filter(supplementary=False).count() == 1: + file_object.preset = presets.filter(supplementary=False).first() + + file_object.save() + + try: + if preferences.get('auto_derive_video_thumbnail') and new_node.kind_id == content_kinds.VIDEO \ + or preferences.get('auto_derive_audio_thumbnail') and new_node.kind_id == content_kinds.AUDIO \ + or preferences.get('auto_derive_html5_thumbnail') and new_node.kind_id == content_kinds.HTML5 \ + or preferences.get('auto_derive_document_thumbnail') and new_node.kind_id == content_kinds.DOCUMENT: + generate_thumbnail_from_node(new_node, set_node=True) + except Exception: + pass + + return HttpResponse(json.dumps({ + "success": True, + "node": JSONRenderer().render(ContentNodeSerializer(new_node).data) + })) + +def generate_thumbnail(request): + logging.debug("Entering the generate_thumbnail endpoint") + + if request.method != 'POST': + raise HttpResponseBadRequest("Only POST requests are allowed on this endpoint.") + else: + data = json.loads(request.body) + node = ContentNode.objects.get(pk=data["node_id"]) + + thumbnail_object = generate_thumbnail_from_node(node) + + return HttpResponse(json.dumps({ + "success": True, + "file": JSONRenderer().render(FileSerializer(thumbnail_object).data), + "path": generate_storage_url(str(thumbnail_object)), + })) + +def thumbnail_upload(request): + if request.method == 'POST': + fobj = request.FILES.values()[0] + formatted_filename = write_file_to_storage(fobj) + + return HttpResponse(json.dumps({ + "success": True, + "formatted_filename": formatted_filename, + "file": None, + "path": generate_storage_url(formatted_filename), + })) + +def image_upload(request): + if request.method == 'POST': + name, ext = os.path.splitext(request.FILES.values()[0]._name) # gets file extension without leading period + file_object = File(contentnode_id=request.META.get('HTTP_NODE'),original_filename=name, preset_id=request.META.get('HTTP_PRESET'), file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:]) + file_object.save() + return HttpResponse(json.dumps({ + "success": True, + "file": JSONRenderer().render(FileSerializer(file_object).data), + "path": generate_storage_url(str(file_object)), + })) + +def exercise_image_upload(request): + if request.method == 'POST': + ext = os.path.splitext(request.FILES.values()[0]._name)[1][1:] # gets file extension without leading period + file_object = File(preset_id=format_presets.EXERCISE_IMAGE, file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext) + file_object.save() + return HttpResponse(json.dumps({ + "success": True, + "formatted_filename": exercises.CONTENT_STORAGE_FORMAT.format(str(file_object)), + "file_id": file_object.pk, + "path": generate_storage_url(str(file_object)), + })) diff --git a/contentcuration/contentcuration/view/node_views.py b/contentcuration/contentcuration/view/node_views.py new file mode 100644 index 0000000000..47eb9f1cb7 --- /dev/null +++ b/contentcuration/contentcuration/view/node_views.py @@ -0,0 +1,172 @@ +import copy +import json +import logging +import os +from django.http import HttpResponse, HttpResponseBadRequest +from django.views.decorators.csrf import csrf_exempt +from django.conf import settings +from django.core.cache import cache +from django.core.exceptions import ObjectDoesNotExist +from django.db import transaction +from django.db.models import Q, Case, When, Value, IntegerField, Max, Sum +from rest_framework.renderers import JSONRenderer +from contentcuration.utils.files import duplicate_file +from contentcuration.models import File, ContentNode, ContentTag +from contentcuration.serializers import ContentNodeSerializer +from le_utils.constants import format_presets, content_kinds, file_formats, licenses + +def get_total_size(request): + if request.method == 'POST': + data = json.loads(request.body) + sizes = ContentNode.objects.prefetch_related('assessment_items').prefetch_related('files').prefetch_related('children')\ + .filter(id__in=data).get_descendants(include_self=True)\ + .aggregate(resource_size=Sum('files__file_size'), assessment_size=Sum('assessment_items__files__file_size')) + + return HttpResponse(json.dumps({'success':True, 'size': (sizes['resource_size'] or 0) + (sizes['assessment_size'] or 0)})) + +def delete_nodes(request): + if request.method == 'POST': + data = json.loads(request.body) + nodes = ContentNode.objects.filter(pk__in=data['nodes']).delete() + return HttpResponse({'success':True}) + +def get_node_descendants(request): + if request.method == 'POST': + data = json.loads(request.body) + nodes = ContentNode.objects.filter(pk__in=data).get_descendants(include_self=True).values_list('id', flat=True) + return HttpResponse(json.dumps({'success':True, "node_ids": " ".join(nodes)})) + + +def get_nodes_by_ids(request): + if request.method == 'POST': + nodes = ContentNode.objects.prefetch_related('files').prefetch_related('assessment_items')\ + .prefetch_related('tags').prefetch_related('children').filter(pk__in=json.loads(request.body)) + return HttpResponse(JSONRenderer().render(ContentNodeSerializer(nodes, many=True).data)) + + +def duplicate_nodes(request): + logging.debug("Entering the copy_node endpoint") + + if request.method != 'POST': + raise HttpResponseBadRequest("Only POST requests are allowed on this endpoint.") + else: + data = json.loads(request.body) + + try: + nodes = data["nodes"] + sort_order = data.get("sort_order") or 1 + target_parent = data["target_parent"] + channel_id = data["channel_id"] + new_nodes = [] + + with transaction.atomic(): + for node_data in nodes: + new_node = _duplicate_node(node_data['id'], sort_order=sort_order, parent=target_parent, channel_id=channel_id) + new_nodes.append(new_node.pk) + sort_order+=1 + + except KeyError: + raise ObjectDoesNotExist("Missing attribute from data: {}".format(data)) + + serialized = ContentNodeSerializer(ContentNode.objects.filter(pk__in=new_nodes), many=True).data + return HttpResponse(JSONRenderer().render(serialized)) + +def _duplicate_node(node, sort_order=None, parent=None, channel_id=None): + if isinstance(node, int) or isinstance(node, basestring): + node = ContentNode.objects.get(pk=node) + + original_channel = node.get_original_node().get_channel() if node.get_original_node() else None + + new_node = ContentNode.objects.create( + title=node.title, + description=node.description, + kind=node.kind, + license=node.license, + parent=ContentNode.objects.get(pk=parent) if parent else None, + sort_order=sort_order or node.sort_order, + copyright_holder=node.copyright_holder, + changed=True, + original_node=node.original_node or node, + cloned_source=node, + original_channel_id = node.original_channel_id or original_channel.id if original_channel else None, + source_channel_id = node.get_channel().id if node.get_channel() else None, + original_source_node_id = node.original_source_node_id or node.node_id, + source_node_id = node.node_id, + author=node.author, + content_id=node.content_id, + extra_fields=node.extra_fields, + ) + + # add tags now + for tag in node.tags.all(): + new_tag, is_new = ContentTag.objects.get_or_create( + tag_name=tag.tag_name, + channel_id=channel_id, + ) + new_node.tags.add(new_tag) + + # copy file object too + for fobj in node.files.all(): + duplicate_file(fobj, node=new_node) + + # copy assessment item object too + for aiobj in node.assessment_items.all(): + aiobj_copy = copy.copy(aiobj) + aiobj_copy.id = None + aiobj_copy.contentnode = new_node + aiobj_copy.save() + for fobj in aiobj.files.all(): + duplicate_file(fobj, assessment_item=aiobj_copy) + + for c in node.children.all(): + _duplicate_node(c, parent=new_node.id) + + return new_node + +def move_nodes(request): + logging.debug("Entering the move_nodes endpoint") + + if request.method != 'POST': + raise HttpResponseBadRequest("Only POST requests are allowed on this endpoint.") + else: + data = json.loads(request.body) + + try: + nodes = data["nodes"] + target_parent = ContentNode.objects.get(pk=data["target_parent"]) + channel_id = data["channel_id"] + min_order = data.get("min_order") or 0 + max_order = data.get("max_order") or min_order + len(nodes) + + except KeyError: + raise ObjectDoesNotExist("Missing attribute from data: {}".format(data)) + + all_ids = [] + with transaction.atomic(): + for n in nodes: + min_order = min_order + float(max_order - min_order) / 2 + node = ContentNode.objects.get(pk=n['id']) + _move_node(node, parent=target_parent, sort_order=min_order, channel_id=channel_id) + all_ids.append(n['id']) + + serialized = ContentNodeSerializer(ContentNode.objects.filter(pk__in=all_ids), many=True).data + return HttpResponse(JSONRenderer().render(serialized)) + +def _move_node(node, parent=None, sort_order=None, channel_id=None): + node.parent = parent + node.sort_order = sort_order + node.changed = True + descendants = node.get_descendants(include_self=True) + node.save() + + for tag in ContentTag.objects.filter(tagged_content__in=descendants).distinct(): + # If moving from another channel + if tag.channel_id != channel_id: + t, is_new = ContentTag.objects.get_or_create(tag_name=tag.tag_name, channel_id=channel_id) + + # Set descendants with this tag to correct tag + for n in descendants.filter(tags=tag): + n.tags.remove(tag) + n.tags.add(t) + + return node diff --git a/contentcuration/contentcuration/views.py b/contentcuration/contentcuration/views.py index 5831d816ef..b0148d3e2e 100644 --- a/contentcuration/contentcuration/views.py +++ b/contentcuration/contentcuration/views.py @@ -18,7 +18,7 @@ from django.core.exceptions import ObjectDoesNotExist from django.core.context_processors import csrf from django.db import transaction -from django.db.models import Q, Case, When, Value, IntegerField, Max +from django.db.models import Q, Case, When, Value, IntegerField, Max, Sum from django.core.urlresolvers import reverse_lazy from django.core.files import File as DjFile from rest_framework.renderers import JSONRenderer @@ -34,12 +34,6 @@ from pressurecooker.images import create_tiled_image from pressurecooker.encodings import write_base64_to_file -def get_nodes_by_ids(request): - if request.method == 'POST': - nodes = ContentNode.objects.prefetch_related('files').prefetch_related('assessment_items')\ - .prefetch_related('tags').prefetch_related('children').filter(pk__in=json.loads(request.body)) - return HttpResponse(JSONRenderer().render(ContentNodeSerializer(nodes, many=True).data)) - def base(request): if not check_supported_browsers(request.META.get('HTTP_USER_AGENT')): return redirect(reverse_lazy('unsupported_browser')) @@ -143,238 +137,6 @@ def channel_view_only(request, channel_id): return channel_page(request, channel) -def file_upload(request): - if request.method == 'POST': - preset = FormatPreset.objects.get(id=request.META.get('HTTP_PRESET')) - #Implement logic for switching out files without saving it yet - filename, ext = os.path.splitext(request.FILES.values()[0]._name) - size = request.FILES.values()[0]._size - file_object = File(file_size=size, file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:], original_filename=request.FILES.values()[0]._name, preset=preset) - file_object.save() - return HttpResponse(json.dumps({ - "success": True, - "filename": str(file_object), - "file": JSONRenderer().render(FileSerializer(file_object).data) - })) - -def file_create(request): - if request.method == 'POST': - original_filename, ext = os.path.splitext(request.FILES.values()[0]._name) - size = request.FILES.values()[0]._size - presets = FormatPreset.objects.filter(allowed_formats__extension__contains=ext[1:]) - kind = presets.first().kind - preferences = json.loads(request.user.preferences) - author = preferences.get('author') if isinstance(preferences.get('author'), basestring) else request.user.get_full_name() - license = License.objects.filter(license_name=preferences.get('license')).first() # Use filter/first in case preference hasn't been set - license_id = license.pk if license else settings.DEFAULT_LICENSE - new_node = ContentNode(title=original_filename, kind=kind, license_id=license_id, author=author, copyright_holder=preferences.get('copyright_holder')) - if license.license_name == licenses.SPECIAL_PERMISSIONS: - new_node.license_description = preferences.get('license_description') - new_node.save() - file_object = File(file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:], original_filename=request.FILES.values()[0]._name, contentnode=new_node, file_size=size) - file_object.save() - if kind.pk == content_kinds.VIDEO: - file_object.preset_id = guess_video_preset_by_resolution(str(file_object.file_on_disk)) - elif presets.filter(supplementary=False).count() == 1: - file_object.preset = presets.filter(supplementary=False).first() - - file_object.save() - - try: - if preferences.get('auto_derive_video_thumbnail') and new_node.kind_id == content_kinds.VIDEO \ - or preferences.get('auto_derive_audio_thumbnail') and new_node.kind_id == content_kinds.AUDIO \ - or preferences.get('auto_derive_html5_thumbnail') and new_node.kind_id == content_kinds.HTML5 \ - or preferences.get('auto_derive_document_thumbnail') and new_node.kind_id == content_kinds.DOCUMENT: - generate_thumbnail_from_node(new_node, set_node=True) - except Exception: - pass - - return HttpResponse(json.dumps({ - "success": True, - "node": JSONRenderer().render(ContentNodeSerializer(new_node).data) - })) - -def generate_thumbnail(request): - logging.debug("Entering the generate_thumbnail endpoint") - - if request.method != 'POST': - raise HttpResponseBadRequest("Only POST requests are allowed on this endpoint.") - else: - data = json.loads(request.body) - node = ContentNode.objects.get(pk=data["node_id"]) - - thumbnail_object = generate_thumbnail_from_node(node) - - return HttpResponse(json.dumps({ - "success": True, - "file": JSONRenderer().render(FileSerializer(thumbnail_object).data), - "path": generate_storage_url(str(thumbnail_object)), - })) - -def thumbnail_upload(request): - if request.method == 'POST': - fobj = request.FILES.values()[0] - formatted_filename = write_file_to_storage(fobj) - - return HttpResponse(json.dumps({ - "success": True, - "formatted_filename": formatted_filename, - "file": None, - "path": generate_storage_url(formatted_filename), - })) - -def image_upload(request): - if request.method == 'POST': - name, ext = os.path.splitext(request.FILES.values()[0]._name) # gets file extension without leading period - file_object = File(contentnode_id=request.META.get('HTTP_NODE'),original_filename=name, preset_id=request.META.get('HTTP_PRESET'), file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:]) - file_object.save() - return HttpResponse(json.dumps({ - "success": True, - "file": JSONRenderer().render(FileSerializer(file_object).data), - "path": generate_storage_url(str(file_object)), - })) - -def exercise_image_upload(request): - if request.method == 'POST': - ext = os.path.splitext(request.FILES.values()[0]._name)[1][1:] # gets file extension without leading period - file_object = File(preset_id=format_presets.EXERCISE_IMAGE, file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext) - file_object.save() - return HttpResponse(json.dumps({ - "success": True, - "formatted_filename": exercises.CONTENT_STORAGE_FORMAT.format(str(file_object)), - "file_id": file_object.pk, - "path": generate_storage_url(str(file_object)), - })) - -def duplicate_nodes(request): - logging.debug("Entering the copy_node endpoint") - - if request.method != 'POST': - raise HttpResponseBadRequest("Only POST requests are allowed on this endpoint.") - else: - data = json.loads(request.body) - - try: - nodes = data["nodes"] - sort_order = data.get("sort_order") or 1 - target_parent = data["target_parent"] - channel_id = data["channel_id"] - new_nodes = [] - - with transaction.atomic(): - for node_data in nodes: - new_node = _duplicate_node(node_data['id'], sort_order=sort_order, parent=target_parent, channel_id=channel_id) - new_nodes.append(new_node.pk) - sort_order+=1 - - except KeyError: - raise ObjectDoesNotExist("Missing attribute from data: {}".format(data)) - - return HttpResponse(json.dumps({ - "success": True, - "node_ids": " ".join(new_nodes) - })) - -def _duplicate_node(node, sort_order=None, parent=None, channel_id=None): - if isinstance(node, int) or isinstance(node, basestring): - node = ContentNode.objects.get(pk=node) - - original_channel = node.get_original_node().get_channel() if node.get_original_node() else None - - new_node = ContentNode.objects.create( - title=node.title, - description=node.description, - kind=node.kind, - license=node.license, - parent=ContentNode.objects.get(pk=parent) if parent else None, - sort_order=sort_order or node.sort_order, - copyright_holder=node.copyright_holder, - changed=True, - original_node=node.original_node or node, - cloned_source=node, - original_channel_id = node.original_channel_id or original_channel.id if original_channel else None, - source_channel_id = node.get_channel().id if node.get_channel() else None, - original_source_node_id = node.original_source_node_id or node.node_id, - source_node_id = node.node_id, - author=node.author, - content_id=node.content_id, - extra_fields=node.extra_fields, - ) - - # add tags now - for tag in node.tags.all(): - new_tag, is_new = ContentTag.objects.get_or_create( - tag_name=tag.tag_name, - channel_id=channel_id, - ) - new_node.tags.add(new_tag) - - # copy file object too - for fobj in node.files.all(): - duplicate_file(fobj, node=new_node) - - # copy assessment item object too - for aiobj in node.assessment_items.all(): - aiobj_copy = copy.copy(aiobj) - aiobj_copy.id = None - aiobj_copy.contentnode = new_node - aiobj_copy.save() - for fobj in aiobj.files.all(): - duplicate_file(fobj, assessment_item=aiobj_copy) - - for c in node.children.all(): - _duplicate_node(c, parent=new_node.id) - - return new_node - -def move_nodes(request): - logging.debug("Entering the move_nodes endpoint") - - if request.method != 'POST': - raise HttpResponseBadRequest("Only POST requests are allowed on this endpoint.") - else: - data = json.loads(request.body) - - try: - nodes = data["nodes"] - target_parent = ContentNode.objects.get(pk=data["target_parent"]) - channel_id = data["channel_id"] - min_order = data.get("min_order") or 0 - max_order = data.get("max_order") or min_order + len(nodes) - - except KeyError: - raise ObjectDoesNotExist("Missing attribute from data: {}".format(data)) - - all_ids = [] - with transaction.atomic(): - for n in nodes: - min_order = min_order + float(max_order - min_order) / 2 - node = ContentNode.objects.get(pk=n['id']) - _move_node(node, parent=target_parent, sort_order=min_order, channel_id=channel_id) - all_ids.append(n['id']) - - serialized = ContentNodeSerializer(ContentNode.objects.filter(pk__in=all_ids), many=True).data - return HttpResponse(JSONRenderer().render(serialized)) - -def _move_node(node, parent=None, sort_order=None, channel_id=None): - node.parent = parent - node.sort_order = sort_order - node.changed = True - descendants = node.get_descendants(include_self=True) - node.save() - - for tag in ContentTag.objects.filter(tagged_content__in=descendants).distinct(): - # If moving from another channel - if tag.channel_id != channel_id: - t, is_new = ContentTag.objects.get_or_create(tag_name=tag.tag_name, channel_id=channel_id) - - # Set descendants with this tag to correct tag - for n in descendants.filter(tags=tag): - n.tags.remove(tag) - n.tags.add(t) - - return node - @csrf_exempt def publish_channel(request): logging.debug("Entering the publish_channel endpoint") From 18829c5fd185c5ffe604aadd98afad91d9e17c27 Mon Sep 17 00:00:00 2001 From: Jordan Yoshihara Date: Fri, 14 Apr 2017 15:51:22 -0700 Subject: [PATCH 02/11] Fixed saving changes --- contentcuration/contentcuration/models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contentcuration/contentcuration/models.py b/contentcuration/contentcuration/models.py index 18668bfdfa..e39cc7f171 100644 --- a/contentcuration/contentcuration/models.py +++ b/contentcuration/contentcuration/models.py @@ -393,10 +393,10 @@ def get_channel(self): return channel def save(self, *args, **kwargs): - is_new = self.pk is not None and ContentNode.objects.filter(pk=self.pk).exists() + is_new = self.pk is not None # Update all current ancestors to be changed - if not is_new: + if not is_new and ContentNode.objects.filter(pk=self.pk).exists(): ContentNode.objects.get(pk=self.pk).get_ancestors(include_self=True).update(changed=True) super(ContentNode, self).save(*args, **kwargs) From 0824b6de0fc6ebf08b26f5a901fce62e191de68d Mon Sep 17 00:00:00 2001 From: Jordan Yoshihara Date: Fri, 14 Apr 2017 16:49:18 -0700 Subject: [PATCH 03/11] Reverted to old saving method --- contentcuration/contentcuration/models.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/contentcuration/contentcuration/models.py b/contentcuration/contentcuration/models.py index e39cc7f171..22b4af2261 100644 --- a/contentcuration/contentcuration/models.py +++ b/contentcuration/contentcuration/models.py @@ -393,17 +393,15 @@ def get_channel(self): return channel def save(self, *args, **kwargs): - is_new = self.pk is not None - - # Update all current ancestors to be changed - if not is_new and ContentNode.objects.filter(pk=self.pk).exists(): - ContentNode.objects.get(pk=self.pk).get_ancestors(include_self=True).update(changed=True) + # Detect if node has been moved to another tree + if self.pk and ContentNode.objects.filter(pk=self.pk).exists(): + original = ContentNode.objects.get(pk=self.pk) + if original.parent and original.parent_id != self.parent_id and not original.parent.changed: + original.parent.changed = True + original.parent.save() super(ContentNode, self).save(*args, **kwargs) - if is_new: - self.get_ancestors(include_self=True).update(changed=True) - post_save_changes = False if self.original_node is None: self.original_node = self From d9ff1903514b5856210abe91e495239b93d3c829 Mon Sep 17 00:00:00 2001 From: Jordan Yoshihara Date: Fri, 14 Apr 2017 18:16:32 -0700 Subject: [PATCH 04/11] Separated serializers and added caching to associated presets and original channel --- contentcuration/contentcuration/models.py | 31 +++- .../contentcuration/serializers.py | 147 ++++++++++-------- .../static/js/edit_channel/router.js | 1 - .../hbtemplates/content_list_item.handlebars | 2 +- .../static/js/edit_channel/tree_edit/views.js | 3 +- .../edit_metadata_dialog.handlebars | 2 +- .../static/js/edit_channel/uploader/views.js | 4 +- .../static/js/edit_channel/views.js | 2 - .../contentcuration/static/less/uploader.less | 8 + .../contentcuration/view/file_views.py | 4 +- .../contentcuration/view/node_views.py | 13 +- 11 files changed, 127 insertions(+), 90 deletions(-) diff --git a/contentcuration/contentcuration/models.py b/contentcuration/contentcuration/models.py index 22b4af2261..8fcf164af9 100644 --- a/contentcuration/contentcuration/models.py +++ b/contentcuration/contentcuration/models.py @@ -6,6 +6,7 @@ import json from django.conf import settings from django.contrib import admin +from django.core.cache import cache from django.core.files.storage import FileSystemStorage from django.db import IntegrityError, connections, models, connection from django.db.models import Q, Sum, Max, Count, Case, When, IntegerField @@ -377,13 +378,31 @@ class ContentNode(MPTTModel, models.Model): objects = TreeManager() def get_original_node(self): + key = "original_channel_{}".format(self.pk) + cached_data = cache.get(key) + if cached_data: + return cached_data + original_node = None if self.original_channel_id and self.original_source_node_id: - original_channel = Channel.objects.get(pk=self.original_channel_id) - return original_channel.main_tree.get_descendants().filter(node_id=self.original_source_node_id).first() or self - - # TEMPORARY: until all nodes have proper sources set (e.g. source_node_id) - return self.original_node or self - + current_channel = self.get_channel() + if current_channel and self.original_channel_id == current_channel.pk: + original_node = self + else: + original_channel = Channel.objects.get(pk=self.original_channel_id) + original_node = original_channel.main_tree.get_descendants().filter(node_id=self.original_source_node_id).first() or self + else: + original_node = self.original_node or self + cache.set(key, original_node, None) + return original_node + + def get_associated_presets(self): + key = "associated_presets_{}".format(self.kind_id) + cached_data = cache.get(key) + if cached_data: + return cached_data + presets = FormatPreset.objects.filter(kind=self.kind).values() + cache.set(key, presets, None) + return presets def get_channel(self): root = self.get_root() diff --git a/contentcuration/contentcuration/serializers.py b/contentcuration/contentcuration/serializers.py index b9e2435f24..7e473d2fd6 100644 --- a/contentcuration/contentcuration/serializers.py +++ b/contentcuration/contentcuration/serializers.py @@ -287,75 +287,13 @@ class Meta: 'hints', 'raw_data', 'order', 'source_url', 'randomize', 'deleted') list_serializer_class = AssessmentListSerializer -class ContentNodeSerializer(BulkSerializerMixin, serializers.ModelSerializer): - children = serializers.PrimaryKeyRelatedField(many=True, read_only=True) - id = serializers.CharField(required=False) - - ancestors = serializers.SerializerMethodField('get_node_ancestors') - files = FileSerializer(many=True, read_only=True) - assessment_items = AssessmentItemSerializer(many=True, read_only=True) - associated_presets = serializers.SerializerMethodField('retrieve_associated_presets') - metadata = serializers.SerializerMethodField('retrieve_metadata') - original_channel = serializers.SerializerMethodField('retrieve_original_channel') - valid = serializers.SerializerMethodField('check_valid') - - def check_valid(self, node): - if node.kind_id == content_kinds.TOPIC: - return True - elif node.kind_id == content_kinds.EXERCISE: - for aitem in node.assessment_items.exclude(type=exercises.PERSEUS_QUESTION): - answers = json.loads(aitem.answers) - correct_answers = filter(lambda a: a['correct'], answers) - if aitem.question == "" or len(answers) == 0 or len(correct_answers) == 0 or\ - any(filter(lambda a: a['answer'] == "", answers)) or\ - (aitem.type == exercises.SINGLE_SELECTION and len(correct_answers) > 1) or\ - any(filter(lambda h: h['hint'] == "", json.loads(aitem.hints))): - return False - return True - else: - return node.files.filter(preset__supplementary=False).exists() - - def retrieve_original_channel(self, node): - original = node.get_original_node() - channel = original.get_channel() if original else None - return {"id": channel.pk, "name": channel.name} if channel else None - - def retrieve_metadata(self, node): - if node.kind_id == content_kinds.TOPIC: - # TODO: Account for files duplicated in tree - # size_q = File.objects.select_related('contentnode').select_related('assessment_item')\ - # .filter(Q(contentnode_id__in=descendants.values_list('id', flat=True)) | Q(assessment_item_id__in=descendants.values_list('assessment_items__id', flat=True)))\ - # .only('checksum', 'file_size').distinct().aggregate(resource_size=Sum('file_size')) - descendants = node.get_descendants(include_self=True) - return { - "total_count" : node.get_descendant_count(), - "resource_count" : descendants.exclude(kind=content_kinds.TOPIC).count(), - "max_sort_order" : node.children.aggregate(max_sort_order=Max('sort_order'))['max_sort_order'] or 1, - "resource_size" : 0, # Make separate request - } - else: - # TODO: Account for files duplicated on node - # size_q = File.objects.select_related('contentnode').select_related('assessment_item')\ - # .filter(Q(contentnode=node) | Q(assessment_item_id__in=node.assessment_items.values_list('id', flat=True)))\ - # .only('checksum', 'file_size').distinct().aggregate(resource_size=Sum('file_size')) - assessment_size = node.assessment_items.aggregate(resource_size=Sum('files__file_size'))['resource_size'] or 0 - resource_size = node.files.aggregate(resource_size=Sum('file_size')).get('resource_size') or 0 - return { - "total_count" : 1, - "resource_count" : 1, - "max_sort_order" : node.sort_order, - "resource_size" : assessment_size + resource_size, - } - +class ContentNodeBaseSerializer(BulkSerializerMixin, serializers.ModelSerializer): @staticmethod def setup_eager_loading(queryset): """ Perform necessary eager loading of data. """ queryset = queryset.prefetch_related('children').prefetch_related('files').prefetch_related('assessment_items') return queryset - def retrieve_associated_presets(self, node): - return FormatPreset.objects.filter(kind=node.kind).values() - def to_internal_value(self, data): """ In order to be able to handle passing tag_name in array, @@ -457,12 +395,88 @@ def update(self, instance, validated_data): def get_node_ancestors(self,node): return node.get_ancestors().values_list('id', flat=True) + +class ContentNodeEditSerializer(ContentNodeBaseSerializer): + children = serializers.PrimaryKeyRelatedField(many=True, read_only=True) + id = serializers.CharField(required=False) + ancestors = serializers.SerializerMethodField('get_node_ancestors') + metadata = serializers.SerializerMethodField('retrieve_metadata') + valid = serializers.SerializerMethodField('check_valid') + original_channel = serializers.SerializerMethodField('retrieve_original_channel') + associated_presets = serializers.SerializerMethodField('retrieve_associated_presets') + files = FileSerializer(many=True, read_only=True) + assessment_items = AssessmentItemSerializer(many=True, read_only=True) + + def retrieve_original_channel(self, node): + original = node.get_original_node() + channel = original.get_channel() if original else None + return {"id": channel.pk, "name": channel.name} if channel else None + + def retrieve_associated_presets(self, node): + return node.get_associated_presets() + + def check_valid(self, node): + if node.kind_id == content_kinds.TOPIC: + return True + elif node.kind_id == content_kinds.EXERCISE: + for aitem in node.assessment_items.exclude(type=exercises.PERSEUS_QUESTION): + answers = json.loads(aitem.answers) + correct_answers = filter(lambda a: a['correct'], answers) + if aitem.question == "" or len(answers) == 0 or len(correct_answers) == 0 or\ + any(filter(lambda a: a['answer'] == "", answers)) or\ + (aitem.type == exercises.SINGLE_SELECTION and len(correct_answers) > 1) or\ + any(filter(lambda h: h['hint'] == "", json.loads(aitem.hints))): + return False + return True + else: + return node.files.filter(preset__supplementary=False).exists() + + def retrieve_metadata(self, node): + if node.kind_id == content_kinds.TOPIC: + # TODO: Account for files duplicated in tree + # size_q = File.objects.select_related('contentnode').select_related('assessment_item')\ + # .filter(Q(contentnode_id__in=descendants.values_list('id', flat=True)) | Q(assessment_item_id__in=descendants.values_list('assessment_items__id', flat=True)))\ + # .only('checksum', 'file_size').distinct().aggregate(resource_size=Sum('file_size')) + descendants = node.get_descendants(include_self=True) + return { + "total_count" : node.get_descendant_count(), + "resource_count" : descendants.exclude(kind=content_kinds.TOPIC).count(), + "max_sort_order" : node.children.aggregate(max_sort_order=Max('sort_order'))['max_sort_order'] or 1, + "resource_size" : 0, # Make separate request + "has_changed_descendant" : descendants.filter(changed=True).exists(), + } + else: + # TODO: Account for files duplicated on node + # size_q = File.objects.select_related('contentnode').select_related('assessment_item')\ + # .filter(Q(contentnode=node) | Q(assessment_item_id__in=node.assessment_items.values_list('id', flat=True)))\ + # .only('checksum', 'file_size').distinct().aggregate(resource_size=Sum('file_size')) + assessment_size = node.assessment_items.aggregate(resource_size=Sum('files__file_size'))['resource_size'] or 0 + resource_size = node.files.aggregate(resource_size=Sum('file_size')).get('resource_size') or 0 + resource_count = 1 + if node.kind_id == content_kinds.EXERCISE: + resource_count = node.assessment_items.filter(deleted=False).count() + + return { + "total_count" : 1, + "resource_count" : resource_count, + "max_sort_order" : node.sort_order, + "resource_size" : assessment_size + resource_size, + "has_changed_descendant" : node.changed, + } + class Meta: list_serializer_class = CustomListSerializer model = ContentNode - fields = ('title', 'changed', 'id', 'description', 'sort_order','author', 'original_node', 'cloned_source', 'original_channel','original_source_node_id', 'source_node_id', 'node_id', - 'copyright_holder', 'license', 'license_description', 'kind', 'children', 'parent', 'content_id','associated_presets', 'valid', 'original_channel_id', 'source_channel_id', - 'ancestors', 'tags', 'files', 'metadata', 'created', 'modified', 'published', 'extra_fields', 'assessment_items', 'source_id', 'source_domain') + fields = ('title', 'changed', 'id', 'description', 'sort_order','author', 'copyright_holder', 'license', 'license_description','assessment_items', 'files', + 'kind', 'parent', 'children', 'published', 'associated_presets', 'valid', 'metadata', 'ancestors', 'tags', 'extra_fields', 'original_channel') + +class ContentNodeSerializer(ContentNodeEditSerializer): + class Meta: + list_serializer_class = CustomListSerializer + model = ContentNode + fields = ('title', 'changed', 'id', 'description', 'sort_order','author', 'node_id', 'copyright_holder', 'license', 'license_description', 'kind', + 'original_channel','original_source_node_id', 'source_node_id', 'content_id', 'original_channel_id', 'source_channel_id', 'source_id', 'source_domain', + 'children', 'parent', 'tags', 'created', 'modified', 'published', 'extra_fields', 'assessment_items', 'files', 'valid', 'metadata') class RootNodeSerializer(serializers.ModelSerializer): children = serializers.PrimaryKeyRelatedField(many=True, read_only=True) @@ -568,7 +582,6 @@ class Meta: model = Channel fields = ('id', 'created', 'name', 'view_only', 'published', 'pending_editors', 'editors', 'description', 'size', 'count', 'version', 'public', 'thumbnail_url', 'thumbnail', 'deleted') - class UserSerializer(serializers.ModelSerializer): class Meta: model = User diff --git a/contentcuration/contentcuration/static/js/edit_channel/router.js b/contentcuration/contentcuration/static/js/edit_channel/router.js index 18ef9cee92..1a52086c28 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/router.js +++ b/contentcuration/contentcuration/static/js/edit_channel/router.js @@ -46,7 +46,6 @@ ChannelEditRouter = Backbone.Router.extend({ clipboard_page:function(){ this.open_channel(true, true, window.current_user.get_clipboard()); }, - open_channel: function(edit_mode_on, is_clipboard, root){ window.fileformats = this.fileformats ; window.channels = this.channelCollection; diff --git a/contentcuration/contentcuration/static/js/edit_channel/tree_edit/hbtemplates/content_list_item.handlebars b/contentcuration/contentcuration/static/js/edit_channel/tree_edit/hbtemplates/content_list_item.handlebars index b6130a02ec..d50117e3a9 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/tree_edit/hbtemplates/content_list_item.handlebars +++ b/contentcuration/contentcuration/static/js/edit_channel/tree_edit/hbtemplates/content_list_item.handlebars @@ -24,7 +24,7 @@ {{#unless isfolder}} {{#if isexercise}} - {{#format_count "Question" num_questions}}{{/format_count}} + {{#format_count "Question" node.metadata.resource_count}}{{/format_count}} {{else}} {{#format_file_size node.metadata.resource_size}}{{/format_file_size}} diff --git a/contentcuration/contentcuration/static/js/edit_channel/tree_edit/views.js b/contentcuration/contentcuration/static/js/edit_channel/tree_edit/views.js index 53c507ad7a..df17ccfa2c 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/tree_edit/views.js +++ b/contentcuration/contentcuration/static/js/edit_channel/tree_edit/views.js @@ -297,8 +297,7 @@ var ContentItem = BaseViews.BaseWorkspaceListNodeItemView.extend({ checked: this.checked, isexercise: this.model.get("kind") === "exercise", description_first: description[0], - description_overflow: description[1], - num_questions: _.where(this.model.get('assessment_items'), {'deleted': false}).length + description_overflow: description[1] })); this.handle_checked(); if(this.isSelected){ diff --git a/contentcuration/contentcuration/static/js/edit_channel/uploader/hbtemplates/edit_metadata_dialog.handlebars b/contentcuration/contentcuration/static/js/edit_channel/uploader/hbtemplates/edit_metadata_dialog.handlebars index b2bc5074ed..9ce6f95488 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/uploader/hbtemplates/edit_metadata_dialog.handlebars +++ b/contentcuration/contentcuration/static/js/edit_channel/uploader/hbtemplates/edit_metadata_dialog.handlebars @@ -8,7 +8,7 @@