1
0
mirror of https://github.com/etesync/android synced 2024-11-15 20:38:58 +00:00

SyncManager: change pushing entries in chunks to also process in chunks

Android annoyingly kill sync managers that don't have a significant
amount of network traffic within a given minute. This means that if we
have a lot of entries to process, we may get killed by the system if we
have a lot of entries to prepare for pushing. We were sending in chunks,
for network performance, but now we make the whole process work in
chunks.

This should fix an issue reported by a user who imported a significant
amount of contacts in one go.

This is similar to the issue fixed for fetch in:
f7104bbcef
This commit is contained in:
Tom Hacohen 2018-01-24 16:02:20 +00:00
parent 5a2f8260c7
commit 2f1a9281b8

View File

@ -40,6 +40,7 @@ import org.apache.commons.collections4.ListUtils;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
@ -173,26 +174,28 @@ abstract public class SyncManager {
applyRemoteEntries();
} while (remoteEntries.size() == MAX_FETCH);
/* Create journal entries out of local changes. */
if (Thread.interrupted())
throw new InterruptedException();
syncPhase = R.string.sync_phase_create_local_entries;
App.log.info("Sync phase: " + context.getString(syncPhase));
createLocalEntries();
do {
/* Create journal entries out of local changes. */
if (Thread.interrupted())
throw new InterruptedException();
syncPhase = R.string.sync_phase_create_local_entries;
App.log.info("Sync phase: " + context.getString(syncPhase));
createLocalEntries();
if (Thread.interrupted())
throw new InterruptedException();
syncPhase = R.string.sync_phase_apply_local_entries;
App.log.info("Sync phase: " + context.getString(syncPhase));
/* FIXME: Skipping this now, because we already override with remote.
applyLocalEntries();
*/
if (Thread.interrupted())
throw new InterruptedException();
syncPhase = R.string.sync_phase_apply_local_entries;
App.log.info("Sync phase: " + context.getString(syncPhase));
/* FIXME: Skipping this now, because we already override with remote.
applyLocalEntries();
*/
if (Thread.interrupted())
throw new InterruptedException();
syncPhase = R.string.sync_phase_push_entries;
App.log.info("Sync phase: " + context.getString(syncPhase));
pushEntries();
if (Thread.interrupted())
throw new InterruptedException();
syncPhase = R.string.sync_phase_push_entries;
App.log.info("Sync phase: " + context.getString(syncPhase));
pushEntries();
} while (localEntries.size() == MAX_PUSH);
/* Cleanup and finalize changes */
if (Thread.interrupted())
@ -382,26 +385,30 @@ abstract public class SyncManager {
// FIXME: Deal with failure (someone else uploaded before we go here)
try {
if (!localEntries.isEmpty()) {
for (List<JournalEntryManager.Entry> entries : ListUtils.partition(localEntries, MAX_PUSH)) {
journal.create(entries, remoteCTag);
// Persist the entries after they've been pushed
for (JournalEntryManager.Entry entry : entries) {
SyncEntry cEntry = SyncEntry.fromJournalEntry(crypto, entry);
persistSyncEntry(entry.getUid(), cEntry);
}
remoteCTag = entries.get(entries.size() - 1).getUid();
pushed += entries.size();
List<JournalEntryManager.Entry> entries = localEntries;
journal.create(entries, remoteCTag);
// Persist the entries after they've been pushed
for (JournalEntryManager.Entry entry : entries) {
SyncEntry cEntry = SyncEntry.fromJournalEntry(crypto, entry);
persistSyncEntry(entry.getUid(), cEntry);
}
remoteCTag = entries.get(entries.size() - 1).getUid();
pushed += entries.size();
}
} finally {
// FIXME: A bit fragile, we assume the order in createLocalEntries
int left = pushed;
for (LocalResource local : localDeleted) {
if (pushed-- <= 0) {
break;
}
local.delete();
}
if (left > 0) {
localDeleted.subList(0, Math.min(left, localDeleted.size())).clear();
}
left = pushed;
for (LocalResource local : localDirty) {
if (pushed-- <= 0) {
break;
@ -409,13 +416,13 @@ abstract public class SyncManager {
App.log.info("Added/changed resource with UUID: " + local.getUuid());
local.clearDirty(local.getUuid());
}
if (left > 0) {
localDirty = Arrays.copyOfRange(localDirty, left, localDirty.length);
}
if (pushed > 0) {
App.log.severe("Unprocessed localentries left, this should never happen!");
}
localDirty = null;
localDeleted = null;
}
}
@ -431,6 +438,10 @@ abstract public class SyncManager {
tmp.update(crypto, entry.toJson(), previousEntry);
previousEntry = tmp;
localEntries.add(previousEntry);
if (localEntries.size() == MAX_PUSH) {
return;
}
}
for (LocalResource local : localDirty) {
@ -446,6 +457,10 @@ abstract public class SyncManager {
tmp.update(crypto, entry.toJson(), previousEntry);
previousEntry = tmp;
localEntries.add(previousEntry);
if (localEntries.size() == MAX_PUSH) {
return;
}
}
}