Branch 'kolab/integration/4.13.0' - 4 commits - akonadi/monitor_p.cpp akonadi/tests akonadi/transactionsequence.cpp
Christian Mollekopf
mollekopf at kolabsys.com
Mon Oct 6 09:58:50 CEST 2014
akonadi/monitor_p.cpp | 34 ++++++++----
akonadi/tests/itemsynctest.cpp | 110 ++++++++++++++++++++++++++++++++++++++++
akonadi/transactionsequence.cpp | 10 +++
3 files changed, 143 insertions(+), 11 deletions(-)
New commits:
commit cab0de7f76e0e7fc5a3c612b3fdfd8c8e3353a70
Author: Dan Vrátil <dvratil at redhat.com>
Date: Wed Oct 1 20:56:53 2014 +0200
Monitor: don't try to compress new notifications
The server has performed all compressions already (currently that means it only
compressed Collection Modify notifications), so there's no need to iterate over
the (potentially very long) queue of notifications again in each client.
diff --git a/akonadi/monitor_p.cpp b/akonadi/monitor_p.cpp
index 6ce4c6b..7fa898c 100644
--- a/akonadi/monitor_p.cpp
+++ b/akonadi/monitor_p.cpp
@@ -615,12 +615,14 @@ int MonitorPrivate::translateAndCompress(QQueue< NotificationMessageV3 > ¬ifi
// We have to split moves into insert or remove if the source or destination
// is not monitored.
if (msg.operation() != NotificationMessageV2::Move) {
- return NotificationMessageV3::appendAndCompress(notificationQueue, msg) ? 1 : 0;
+ notificationQueue.enqueue(msg);
+ return 1;
}
// Always handle tags
if (msg.type() == NotificationMessageV2::Tags) {
- return NotificationMessageV3::appendAndCompress(notificationQueue, msg);
+ notificationQueue.enqueue(msg);
+ return 1;
}
bool sourceWatched = false;
@@ -647,7 +649,8 @@ int MonitorPrivate::translateAndCompress(QQueue< NotificationMessageV3 > ¬ifi
}
if ((sourceWatched && destWatched) || (!collectionMoveTranslationEnabled && msg.type() == NotificationMessageV2::Collections)) {
- return NotificationMessageV3::appendAndCompress(notificationQueue, msg) ? 1 : 0;
+ notificationQueue.enqueue(msg);
+ return 1;
}
if (sourceWatched) {
@@ -655,7 +658,8 @@ int MonitorPrivate::translateAndCompress(QQueue< NotificationMessageV3 > ¬ifi
NotificationMessageV3 removalMessage = msg;
removalMessage.setOperation(NotificationMessageV2::Remove);
removalMessage.setParentDestCollection(-1);
- return NotificationMessageV3::appendAndCompress(notificationQueue, removalMessage) ? 1 : 0;
+ notificationQueue.enqueue(removalMessage);
+ return 1;
}
// Transform into an insertion
@@ -665,13 +669,10 @@ int MonitorPrivate::translateAndCompress(QQueue< NotificationMessageV3 > ¬ifi
insertionMessage.setParentDestCollection(-1);
// We don't support batch insertion, so we have to do it one by one
const NotificationMessageV3::List split = splitMessage(insertionMessage, false);
- int appended = 0;
Q_FOREACH (const NotificationMessageV3 &insertion, split) {
- if (NotificationMessageV3::appendAndCompress(notificationQueue, insertion)) {
- ++appended;
- }
+ notificationQueue.enqueue(insertion);
}
- return appended;
+ return split.count();
}
/*
commit afc24995c526e6f6ee3d231a21ce21bf08fb69d4
Author: Dan Vrátil <dvratil at redhat.com>
Date: Fri Sep 26 12:37:07 2014 +0200
Always assume someoneWasListening when we get invalid notification in emityNotification()
When we get invalid entry from EntityCache in emitNotification(), we don't
emit the notification (because we don't have any item to emit it on) and
instead just skip it silently. This change makes sure that we set
someoneWasListening to true (even though we don't know if someone was really
listening), so that we avoid calling cleanOldNotifications() later, because
it's rather expensive.
diff --git a/akonadi/monitor_p.cpp b/akonadi/monitor_p.cpp
index 8213896..6ce4c6b 100644
--- a/akonadi/monitor_p.cpp
+++ b/akonadi/monitor_p.cpp
@@ -534,6 +534,11 @@ bool MonitorPrivate::emitNotification(const NotificationMessageV3 &msg)
if (emitCollectionNotification(msg, col, parent, destParent) && !someoneWasListening) {
someoneWasListening = true;
}
+ } else {
+ // We don't know if someone is actually listening, but we don't want
+ // to trigger cleanOldNotifications() every time we run into an invalid
+ // notification, because it's rather expensive
+ someoneWasListening = true;
}
}
} else if (msg.type() == NotificationMessageV2::Items) {
@@ -541,6 +546,8 @@ bool MonitorPrivate::emitNotification(const NotificationMessageV3 &msg)
const Item::List items = itemCache->retrieve(msg.uids());
if (!items.isEmpty()) {
someoneWasListening = emitItemsNotification(msg, items, parent, destParent);
+ } else {
+ someoneWasListening = true;
}
}
}
commit 1a576d44171f26d7f14e03bb80398f8534eefcd9
Author: Dan Vrátil <dvratil at redhat.com>
Date: Mon Sep 22 15:22:27 2014 +0200
Monitor: handle correctly situation when item no longer exists on server
This can happen when item is removed on the server, but we only now got to
actually process the notification.
diff --git a/akonadi/monitor_p.cpp b/akonadi/monitor_p.cpp
index c439611..8213896 100644
--- a/akonadi/monitor_p.cpp
+++ b/akonadi/monitor_p.cpp
@@ -530,14 +530,18 @@ bool MonitorPrivate::emitNotification(const NotificationMessageV3 &msg)
Collection col;
Q_FOREACH (const NotificationMessageV2::Entity &entity, msg.entities()) {
col = collectionCache->retrieve(entity.id);
- if (emitCollectionNotification(msg, col, parent, destParent) && !someoneWasListening) {
- someoneWasListening = true;
+ if (col.isValid()) {
+ if (emitCollectionNotification(msg, col, parent, destParent) && !someoneWasListening) {
+ someoneWasListening = true;
+ }
}
}
} else if (msg.type() == NotificationMessageV2::Items) {
//In case of a Remove notification this will return a list of invalid entities (we'll deal later with them)
const Item::List items = itemCache->retrieve(msg.uids());
- someoneWasListening = emitItemsNotification(msg, items, parent, destParent);
+ if (!items.isEmpty()) {
+ someoneWasListening = emitItemsNotification(msg, items, parent, destParent);
+ }
}
}
commit fce7fc673e8554e28f9da63daaaaa309bd65c020
Author: Christian Mollekopf <chrigi_1 at fastmail.fm>
Date: Sun Oct 5 19:40:49 2014 +0200
ItemSync/TransactionSquence: Don't abort on error
The ItemSync must never emit result before it's complete. This patch fixes
this behvaiour in case of an error on the ItemCreateJob.
* Adding a job does not reset the status if a rollback was initiated already,
and thus avoids a second rollback on commit() after one was already performed
earlier (this resulted in the "No transaction in progress" error message).
* The rollback job, that is equally added using addSubjob still gets regularly
executed.
diff --git a/akonadi/tests/itemsynctest.cpp b/akonadi/tests/itemsynctest.cpp
index d0b7cee..9ea7453 100644
--- a/akonadi/tests/itemsynctest.cpp
+++ b/akonadi/tests/itemsynctest.cpp
@@ -448,6 +448,116 @@ class ItemsyncTest : public QObject
QCOMPARE(fetchJob->items().first().remoteId(), QString::fromLatin1("rid3"));
}
+ /*
+ * This test verifies that ItemSync doesn't prematurly emit it's result if a job inside a transaction fails.
+ * ItemSync is supposed to continue the sync but simply ignoring all delivered data.
+ */
+ void testFailingJob()
+ {
+ const Collection col = Collection( collectionIdFromPath( "res1/foo" ) );
+ QVERIFY( col.isValid() );
+ Item::List origItems = fetchItems( col );
+
+ ItemSync* syncer = new ItemSync( col );
+ QSignalSpy transactionSpy(syncer, SIGNAL(transactionCommitted()));
+ QVERIFY(transactionSpy.isValid());
+ QSignalSpy spy( syncer, SIGNAL(result(KJob*)) );
+ QVERIFY( spy.isValid() );
+ syncer->setStreamingEnabled( true );
+ syncer->setTransactionMode(ItemSync::MultipleTransactions);
+ QTest::qWait( 0 );
+ QCOMPARE( spy.count(), 0 );
+
+ for ( int i = 0; i < syncer->batchSize(); ++i ) {
+ Item::List l;
+ //Modify to trigger a changed signal
+ Item item = modifyItem(origItems[i]);
+ // item.setRemoteId(QByteArray("foo"));
+ item.setRemoteId(QByteArray());
+ item.setId(-1);
+ l << item;
+ syncer->setIncrementalSyncItems( l, Item::List() );
+ if ( i < (syncer->batchSize() - 1) ) {
+ QTest::qWait( 0 ); // enter the event loop so itemsync actually can do something
+ }
+ QCOMPARE( spy.count(), 0 );
+ }
+ QTest::qWait(100);
+ QTRY_COMPARE( spy.count(), 0 );
+
+ for ( int i = syncer->batchSize(); i < origItems.count(); ++i ) {
+ Item::List l;
+ //Modify to trigger a changed signal
+ l << modifyItem(origItems[i]);
+ syncer->setIncrementalSyncItems( l, Item::List() );
+ if ( i < origItems.count() - 1 ) {
+ QTest::qWait( 0 ); // enter the event loop so itemsync actually can do something
+ }
+ QCOMPARE( spy.count(), 0 );
+ }
+
+ syncer->deliveryDone();
+ QTRY_COMPARE( spy.count(), 1 );
+ }
+
+ /*
+ * This test verifies that ItemSync doesn't prematurly emit it's result if a job inside a transaction fails, due to a duplicate.
+ * This case used to break the TransactionSequence.
+ * ItemSync is supposed to continue the sync but simply ignoring all delivered data.
+ */
+ void testFailingDueToDuplicateJob()
+ {
+ const Collection col = Collection( collectionIdFromPath( "res1/foo" ) );
+ QVERIFY( col.isValid() );
+ Item::List origItems = fetchItems( col );
+
+ //Create a duplicate that will trigger an error during the first batch
+ Item duplicate = origItems.first();
+ duplicate.setId(-1);
+ {
+ ItemCreateJob *job = new ItemCreateJob(duplicate, col);
+ AKVERIFYEXEC(job);
+ }
+ origItems = fetchItems( col );
+
+ ItemSync* syncer = new ItemSync( col );
+ QSignalSpy transactionSpy(syncer, SIGNAL(transactionCommitted()));
+ QVERIFY(transactionSpy.isValid());
+ QSignalSpy spy( syncer, SIGNAL(result(KJob*)) );
+ QVERIFY( spy.isValid() );
+ syncer->setStreamingEnabled( true );
+ syncer->setTransactionMode(ItemSync::MultipleTransactions);
+ QTest::qWait( 0 );
+ QCOMPARE( spy.count(), 0 );
+
+ for ( int i = 0; i < syncer->batchSize(); ++i ) {
+ Item::List l;
+ //Modify to trigger a changed signal
+ l << modifyItem(origItems[i]);
+ syncer->setIncrementalSyncItems( l, Item::List() );
+ if ( i < (syncer->batchSize() - 1) ) {
+ QTest::qWait( 0 ); // enter the event loop so itemsync actually can do something
+ }
+ QCOMPARE( spy.count(), 0 );
+ }
+ QTest::qWait(100);
+ //Ensure the job hasn't finished yet due to the errors
+ QTRY_COMPARE( spy.count(), 0 );
+
+ for ( int i = syncer->batchSize(); i < origItems.count(); ++i ) {
+ Item::List l;
+ //Modify to trigger a changed signal
+ l << modifyItem(origItems[i]);
+ syncer->setIncrementalSyncItems( l, Item::List() );
+ if ( i < origItems.count() - 1 ) {
+ QTest::qWait( 0 ); // enter the event loop so itemsync actually can do something
+ }
+ QCOMPARE( spy.count(), 0 );
+ }
+
+ syncer->deliveryDone();
+ QTRY_COMPARE( spy.count(), 1 );
+ }
};
QTEST_AKONADIMAIN( ItemsyncTest, NoGUI )
diff --git a/akonadi/transactionsequence.cpp b/akonadi/transactionsequence.cpp
index 2d6bb7b..e5939e1 100644
--- a/akonadi/transactionsequence.cpp
+++ b/akonadi/transactionsequence.cpp
@@ -84,6 +84,16 @@ bool TransactionSequence::addSubjob(KJob *job)
{
Q_D(TransactionSequence);
+ //Don't abort the rollback job, while keeping the state set.
+ if (d->mState == TransactionSequencePrivate::RollingBack) {
+ return Job::addSubjob(job);
+ }
+
+ if (error()) {
+ //This can happen if a rollback is in progress, so make sure we don't set the state back to running.
+ job->kill(EmitResult);
+ return false;
+ }
// TODO KDE5: remove property hack once SpecialCollectionsRequestJob has been fixed
if (d->mState == TransactionSequencePrivate::Idle && !property("transactionsDisabled").toBool()) {
d->mState = TransactionSequencePrivate::Running; // needs to be set before creating the transaction job to avoid infinite recursion
More information about the commits
mailing list