From 5a6c8a1d05a885dd4a85b2ebde1b49690edbd0e9 Mon Sep 17 00:00:00 2001 From: Tom Hacohen Date: Sun, 27 Dec 2020 10:53:01 +0200 Subject: [PATCH] Gracefully handle uploading the same revision This is needed so that immediately re-played requests don't fail. Consider for example the following scenario: a client makes a batch request that registers correctly on the server, but fails to return (e.g. a networking error after the request has been processed). The client would think that the request failed, but the server will already have the up to date information. This commit just returns a successful status if this request is sent again (by the client retrying the request) instead of returning a conflict. This however doesn't handle the case of a request failing, a modification being made by another client, and then the request being retried. This case will stay fail. --- django_etebase/serializers.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/django_etebase/serializers.py b/django_etebase/serializers.py index 32a2757..06bc8ad 100644 --- a/django_etebase/serializers.py +++ b/django_etebase/serializers.py @@ -239,6 +239,11 @@ class CollectionItemSerializer(BetterErrorsMixin, serializers.ModelSerializer): # We don't have to use select_for_update here because the unique constraint on current guards against # the race condition. But it's a good idea because it'll lock and wait rather than fail. current_revision = instance.revisions.filter(current=True).select_for_update().first() + + # If we are just re-uploading the same revision, consider it a succes and return. + if current_revision.uid == revision_data.get("uid"): + return instance + current_revision.current = None current_revision.save()