Merge lp:~verterok/ubuntuone-client/transactions-for-sync into lp:ubuntuone-client

Proposed by Guillermo Gonzalez
Status: Merged
Approved by: dobey
Approved revision: 131
Merged at revision: not available
Proposed branch: lp:~verterok/ubuntuone-client/transactions-for-sync
Merge into: lp:ubuntuone-client
Diff against target: None lines
To merge this branch: bzr merge lp:~verterok/ubuntuone-client/transactions-for-sync
Reviewer Review Type Date Requested Status
dobey (community) Approve
Elliot Murphy (community) Approve
Review via email: mp+9974@code.launchpad.net

Commit message

Add 'transactions' to Sync (FSKey) regarding FileSystemManager changes

To post a comment you must log in.
Revision history for this message
Guillermo Gonzalez (verterok) wrote :

This branch adds "transactions" to Syncdaemon Sync class, basically almost all FileSystemManager operations are queued and executed when FSKey.sync() is called. This allow us to only update the metadata once instead of doing multiple writes. (scanning a ~9k files tree is ~15sec faster)

Revision history for this message
Elliot Murphy (statik) :
review: Approve
Revision history for this message
dobey (dobey) wrote :

Looks ok to me.

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'tests/syncdaemon/test_fsm.py'
--- tests/syncdaemon/test_fsm.py 2009-07-31 18:24:54 +0000
+++ tests/syncdaemon/test_fsm.py 2009-08-11 12:22:34 +0000
@@ -160,6 +160,28 @@
160 now = time.time()160 now = time.time()
161 self.assertTrue(now-3 <= when <= now) # 3 seconds test range161 self.assertTrue(now-3 <= when <= now) # 3 seconds test range
162162
163 def test_with_node_id(self):
164 '''Test creation with node_id'''
165 # create, but not twice
166 path = os.path.join(self.share.path, 'path')
167 self.fsm.create(path, "share", node_id='a_node_id')
168 self.assertRaises(ValueError, self.fsm.create, path, "share")
169 self.assertRaises(ValueError, self.fsm.create, path, "other")
170 mdobj = self.fsm.get_by_path(path)
171 self.assertEqual(mdobj.path, "path")
172 self.assertEqual(mdobj.share_id, "share")
173 self.assertEqual(mdobj.node_id, "a_node_id")
174 when = mdobj.info.created
175 now = time.time()
176 self.assertTrue(now-3 <= when <= now) # 3 seconds test range
177
178 # set uuid using valid path, but not twice
179 self.assertRaises(ValueError, self.fsm.set_node_id, path, "whatever")
180 mdobj = self.fsm.get_by_path(path)
181 when = mdobj.info.node_id_assigned
182 now = time.time()
183 self.assertTrue(now-3 <= when <= now) # 3 seconds test range
184
163 def test_invalid_args(self):185 def test_invalid_args(self):
164 '''Test using invalid args in set_node_id.'''186 '''Test using invalid args in set_node_id.'''
165 path = os.path.join(self.share.path, 'path')187 path = os.path.join(self.share.path, 'path')
@@ -180,7 +202,7 @@
180 self.fsm.set_node_id(path, "uuid")202 self.fsm.set_node_id(path, "uuid")
181203
182 # opening another FSM204 # opening another FSM
183 fsm = FileSystemManager(self.fsmdir, self.fsm.vm)205 FileSystemManager(self.fsmdir, self.fsm.vm)
184 self.fsm.set_node_id(path, "uuid")206 self.fsm.set_node_id(path, "uuid")
185207
186 def test_twice_different_bad(self):208 def test_twice_different_bad(self):
@@ -519,12 +541,6 @@
519 self.assertRaises(ValueError, self.fsm.set_by_mdid, mdid, info="-")541 self.assertRaises(ValueError, self.fsm.set_by_mdid, mdid, info="-")
520 self.assertRaises(ValueError, self.fsm.set_by_path, path, info="-")542 self.assertRaises(ValueError, self.fsm.set_by_path, path, info="-")
521543
522 # test with forbidden stat
523 self.assertRaises(ValueError, self.fsm.set_by_node_id, "uuid", "share",
524 stat="-")
525 self.assertRaises(ValueError, self.fsm.set_by_mdid, mdid, stat="-")
526 self.assertRaises(ValueError, self.fsm.set_by_path, path, stat="-")
527
528 # test with forbidden share544 # test with forbidden share
529 self.assertRaises(ValueError, self.fsm.set_by_mdid, mdid, share_id="-")545 self.assertRaises(ValueError, self.fsm.set_by_mdid, mdid, share_id="-")
530 self.assertRaises(ValueError, self.fsm.set_by_path, path, share_id="-")546 self.assertRaises(ValueError, self.fsm.set_by_path, path, share_id="-")
@@ -666,6 +682,31 @@
666 self.assertTrue(mdid2 in all)682 self.assertTrue(mdid2 in all)
667 self.assertTrue(mdid3 in all)683 self.assertTrue(mdid3 in all)
668684
685 def test_internal_set_node_id(self):
686 """Test _set_node_id"""
687 path = os.path.join(self.share.path, 'path')
688 mdid = self.fsm.create(path, "share")
689 mdobj = self.fsm.fs[mdid]
690 # yes, it's a unit test, I access protected members.
691 # pylint: disable-msg=W0212
692 self.fsm._set_node_id(mdobj, "uuid", path)
693
694 self.assertEquals('uuid', mdobj['node_id'])
695 self.fsm.set_node_id(path, "uuid")
696 new_mdobj = self.fsm.get_by_node_id('share', 'uuid')
697 for k, v in mdobj.items():
698 if k == 'info':
699 for k1, v1 in v.items():
700 self.assertEquals(v1, getattr(new_mdobj.info, k1))
701 else:
702 self.assertEquals(v, getattr(new_mdobj, k))
703
704 # test using bad uuid
705 mdobj = self.fsm.fs[mdid]
706 self.assertEquals('uuid', mdobj['node_id'])
707 self.assertRaises(ValueError,
708 self.fsm._set_node_id, mdobj, 'bad-uuid', path)
709
669710
670class StatTests(FSMTestCase):711class StatTests(FSMTestCase):
671 '''Test all the behaviour regarding the stats.'''712 '''Test all the behaviour regarding the stats.'''
@@ -772,7 +813,7 @@
772 mdobj2 = self.fsm.get_by_path(path2)813 mdobj2 = self.fsm.get_by_path(path2)
773 self.assertEqual(mdobj2.stat, os.stat(path2))814 self.assertEqual(mdobj2.stat, os.stat(path2))
774815
775 def test_update_stat(self):816 def test_set_stat_by_mdid(self):
776 '''Test that update_stat works.'''817 '''Test that update_stat works.'''
777 path = os.path.join(self.share.path, "thisfile")818 path = os.path.join(self.share.path, "thisfile")
778 open(path, "w").close()819 open(path, "w").close()
@@ -787,7 +828,7 @@
787 self.assertEqual(mdobj.stat, oldstat)828 self.assertEqual(mdobj.stat, oldstat)
788829
789 # it's updated when asked, even if it's an old stat830 # it's updated when asked, even if it's an old stat
790 self.fsm.update_stat(mdid, oldstat)831 self.fsm.set_by_mdid(mdid, stat=oldstat)
791 mdobj = self.fsm.get_by_mdid(mdid)832 mdobj = self.fsm.get_by_mdid(mdid)
792 self.assertEqual(mdobj.stat, oldstat)833 self.assertEqual(mdobj.stat, oldstat)
793834
794835
=== modified file 'ubuntuone/syncdaemon/filesystem_manager.py'
--- ubuntuone/syncdaemon/filesystem_manager.py 2009-08-10 13:26:21 +0000
+++ ubuntuone/syncdaemon/filesystem_manager.py 2009-08-11 12:22:34 +0000
@@ -124,7 +124,7 @@
124logger = functools.partial(fsm_logger.log, logging.INFO)124logger = functools.partial(fsm_logger.log, logging.INFO)
125log_warning = functools.partial(fsm_logger.log, logging.WARNING)125log_warning = functools.partial(fsm_logger.log, logging.WARNING)
126126
127is_forbidden = set("info path node_id share_id is_dir stat".split()127is_forbidden = set("info path node_id share_id is_dir".split()
128 ).intersection128 ).intersection
129129
130class InconsistencyError(Exception):130class InconsistencyError(Exception):
@@ -323,7 +323,7 @@
323 if mdobj["node_id"] is not None:323 if mdobj["node_id"] is not None:
324 self._idx_node_id[(mdobj["share_id"], mdobj["node_id"])] = mdid324 self._idx_node_id[(mdobj["share_id"], mdobj["node_id"])] = mdid
325325
326 def create(self, path, share_id, is_dir=False):326 def create(self, path, share_id, node_id=None, is_dir=False):
327 '''Creates a new md object.'''327 '''Creates a new md object.'''
328 if not path.strip():328 if not path.strip():
329 raise ValueError("Empty paths are not allowed (got %r)" % path)329 raise ValueError("Empty paths are not allowed (got %r)" % path)
@@ -341,6 +341,8 @@
341 newobj["info"] = dict(created=time.time(), is_partial=False)341 newobj["info"] = dict(created=time.time(), is_partial=False)
342 # only one stat, (instead of os.path.exists & os.stat)342 # only one stat, (instead of os.path.exists & os.stat)
343 newobj["stat"] = get_stat(path)343 newobj["stat"] = get_stat(path)
344 if node_id is not None:
345 self._set_node_id(newobj, node_id, path)
344346
345 logger("create: path=%r mdid=%r share_id=%r node_id=%r is_dir=%r" % (347 logger("create: path=%r mdid=%r share_id=%r node_id=%r is_dir=%r" % (
346 path, mdid, share_id, None, is_dir))348 path, mdid, share_id, None, is_dir))
@@ -356,26 +358,29 @@
356 path = os.path.normpath(path)358 path = os.path.normpath(path)
357 mdid = self._idx_path[path]359 mdid = self._idx_path[path]
358 mdobj = self.fs[mdid]360 mdobj = self.fs[mdid]
361 self._set_node_id(mdobj, node_id, path)
362 self.fs[mdid] = mdobj
363
364 def _set_node_id(self, mdobj, node_id, path):
365 """Set the node_id to the mdobj, but don't 'save' the mdobj"""
359 if mdobj["node_id"] is not None:366 if mdobj["node_id"] is not None:
360 # the object is already there! it's ok if it has the same id367 # the object is already there! it's ok if it has the same id
361 if mdobj["node_id"] == node_id:368 if mdobj["node_id"] == node_id:
362 logger("set_node_id (repeated!): path=%r mdid=%r node_id=%r"369 logger("set_node_id (repeated!): path=%r mdid=%r node_id=%r"
363 % (path, mdid, node_id))370 % (path, mdobj['mdid'], node_id))
364 return371 return
365 msg = "The path %r already has node_id (%r)" % (path, node_id)372 msg = "The path %r already has node_id (%r)" % (path, node_id)
366 raise ValueError(msg)373 raise ValueError(msg)
367
368 # adjust the index374 # adjust the index
369 share_id = mdobj["share_id"]375 share_id = mdobj["share_id"]
370 self._idx_node_id[(share_id, node_id)] = mdid376 self._idx_node_id[(share_id, node_id)] = mdobj['mdid']
371377
372 # set the node_id378 # set the node_id
373 mdobj["node_id"] = node_id379 mdobj["node_id"] = node_id
374 mdobj["info"]["node_id_assigned"] = time.time()380 mdobj["info"]["node_id_assigned"] = time.time()
375 self.fs[mdid] = mdobj
376381
377 logger("set_node_id: path=%r mdid=%r share_id=%r node_id=%r" % (382 logger("set_node_id: path=%r mdid=%r share_id=%r node_id=%r" % (
378 path, mdid, share_id, node_id))383 path, mdobj['mdid'], share_id, node_id))
379384
380 def get_mdobjs_by_share_id(self, share_id, base_path=None):385 def get_mdobjs_by_share_id(self, share_id, base_path=None):
381 """Returns all the mdobj that belongs to a share and it path386 """Returns all the mdobj that belongs to a share and it path
@@ -456,13 +461,6 @@
456 mdobj["stat"] = get_stat(path)461 mdobj["stat"] = get_stat(path)
457 self.fs[mdid] = mdobj462 self.fs[mdid] = mdobj
458463
459 def update_stat(self, mdid, stat):
460 '''Updates the stat of a md object.'''
461 logger("update stat of mdid=%r", mdid)
462 mdobj = self.fs[mdid]
463 mdobj["stat"] = stat
464 self.fs[mdid] = mdobj
465
466 def move_file(self, new_share_id, path_from, path_to):464 def move_file(self, new_share_id, path_from, path_to):
467 '''Moves a file/dir from one point to the other.'''465 '''Moves a file/dir from one point to the other.'''
468 path_from = os.path.normpath(path_from)466 path_from = os.path.normpath(path_from)
469467
=== modified file 'ubuntuone/syncdaemon/sync.py'
--- ubuntuone/syncdaemon/sync.py 2009-08-04 20:00:44 +0000
+++ ubuntuone/syncdaemon/sync.py 2009-08-11 00:36:15 +0000
@@ -42,9 +42,13 @@
42 """create"""42 """create"""
43 self.fs = fs43 self.fs = fs
44 self.keys = keys44 self.keys = keys
45 self.mdid = None
46 self._changes = {}
4547
46 def get_mdid(self):48 def get_mdid(self):
47 """Get the metadata id."""49 """Get the metadata id."""
50 if self.mdid is not None:
51 return self.mdid
48 if len(self.keys) == 1 and "path" in self.keys:52 if len(self.keys) == 1 and "path" in self.keys:
49 # pylint: disable-msg=W021253 # pylint: disable-msg=W0212
50 mdid = self.fs._idx_path[self.keys["path"]]54 mdid = self.fs._idx_path[self.keys["path"]]
@@ -59,6 +63,7 @@
59 raise KeyError("Incorrect keys: %s" % self.keys)63 raise KeyError("Incorrect keys: %s" % self.keys)
60 if mdid is None:64 if mdid is None:
61 raise KeyError("cant find mdid")65 raise KeyError("cant find mdid")
66 self.mdid = mdid
62 return mdid67 return mdid
6368
64 def get(self, key):69 def get(self, key):
@@ -88,8 +93,12 @@
8893
89 def set(self, **kwargs):94 def set(self, **kwargs):
90 """Set the values for kwargs."""95 """Set the values for kwargs."""
91 mdid = self.get_mdid()96 self._changes.update(kwargs)
92 self.fs.set_by_mdid(mdid, **kwargs)97
98 def sync(self):
99 """sync the changes back to FSM"""
100 if self._changes:
101 self.fs.set_by_mdid(self.get_mdid(), **self._changes)
93102
94 def has_metadata(self):103 def has_metadata(self):
95 """The State Machine value version of has_metadata."""104 """The State Machine value version of has_metadata."""
@@ -192,8 +201,6 @@
192 self.fs.create_file(self.get_mdid())201 self.fs.create_file(self.get_mdid())
193202
194203
195
196
197def loglevel(lvl):204def loglevel(lvl):
198 """Make a function that logs at lvl log level."""205 """Make a function that logs at lvl log level."""
199 def level_log(self, message, *args, **kwargs):206 def level_log(self, message, *args, **kwargs):
@@ -328,8 +335,8 @@
328 """create a local file."""335 """create a local file."""
329 mdobj = self.m.fs.get_by_node_id(share_id, parent_id)336 mdobj = self.m.fs.get_by_node_id(share_id, parent_id)
330 path = os.path.join(self.m.fs.get_abspath(share_id, mdobj.path), name)337 path = os.path.join(self.m.fs.get_abspath(share_id, mdobj.path), name)
331 self.m.fs.create(path=path, share_id=share_id, is_dir=True)338 self.m.fs.create(path=path, share_id=share_id, node_id=node_id,
332 self.m.fs.set_node_id(path, node_id)339 is_dir=True)
333 self.m.action_q.query([(share_id, node_id, "")])340 self.m.action_q.query([(share_id, node_id, "")])
334 # pylint: disable-msg=W0704341 # pylint: disable-msg=W0704
335 # this should be provided by FSM, fix!!342 # this should be provided by FSM, fix!!
@@ -370,10 +377,12 @@
370 self.key['node_id'],377 self.key['node_id'],
371 self.key['local_hash'] or "")])378 self.key['local_hash'] or "")])
372 self.key.set(server_hash=self.key['local_hash'])379 self.key.set(server_hash=self.key['local_hash'])
380 self.key.sync()
373381
374 def get_dir(self, event, params, hash):382 def get_dir(self, event, params, hash):
375 """Get the directory."""383 """Get the directory."""
376 self.key.set(server_hash=hash)384 self.key.set(server_hash=hash)
385 self.key.sync()
377 self.m.fs.create_partial(node_id=self.key['node_id'],386 self.m.fs.create_partial(node_id=self.key['node_id'],
378 share_id=self.key['share_id'])387 share_id=self.key['share_id'])
379 self.m.action_q.listdir(388 self.m.action_q.listdir(
@@ -410,6 +419,7 @@
410 except InconsistencyError:419 except InconsistencyError:
411 self.key.remove_partial()420 self.key.remove_partial()
412 self.key.set(server_hash=self.key['local_hash'])421 self.key.set(server_hash=self.key['local_hash'])
422 self.key.sync()
413 self.m.action_q.query([423 self.m.action_q.query([
414 (self.key["share_id"], self.key["node_id"], "")])424 (self.key["share_id"], self.key["node_id"], "")])
415 # we dont perform the merge, we try to re get it425 # we dont perform the merge, we try to re get it
@@ -478,15 +488,17 @@
478488
479 self.key.remove_partial()489 self.key.remove_partial()
480 self.key.set(local_hash=hash)490 self.key.set(local_hash=hash)
491 self.key.sync()
481492
482 def new_file(self, event, params, share_id, node_id, parent_id, name):493 def new_file(self, event, params, share_id, node_id, parent_id, name):
483 """create a local file."""494 """create a local file."""
484 mdobj = self.m.fs.get_by_node_id(share_id, parent_id)495 mdobj = self.m.fs.get_by_node_id(share_id, parent_id)
485 path = os.path.join(self.m.fs.get_abspath(share_id, mdobj.path), name)496 path = os.path.join(self.m.fs.get_abspath(share_id, mdobj.path), name)
486 self.m.fs.create(path=path, share_id=share_id, is_dir=False)497 self.m.fs.create(path=path, share_id=share_id, node_id=node_id,
487 self.m.fs.set_node_id(path, node_id)498 is_dir=False)
488 self.key.set(server_hash="")499 self.key.set(server_hash="")
489 self.key.set(local_hash="")500 self.key.set(local_hash="")
501 self.key.sync()
490 self.key.make_file()502 self.key.make_file()
491 self.m.action_q.query([(share_id, node_id, "")])503 self.m.action_q.query([(share_id, node_id, "")])
492504
@@ -500,6 +512,7 @@
500 def get_file(self, event, params, hash):512 def get_file(self, event, params, hash):
501 """Get the contents for the file."""513 """Get the contents for the file."""
502 self.key.set(server_hash=hash)514 self.key.set(server_hash=hash)
515 self.key.sync()
503 self.m.fs.create_partial(node_id=self.key['node_id'],516 self.m.fs.create_partial(node_id=self.key['node_id'],
504 share_id=self.key['share_id'])517 share_id=self.key['share_id'])
505 self.m.action_q.download(518 self.m.action_q.download(
@@ -513,6 +526,7 @@
513 def reget_file(self, event, params, hash):526 def reget_file(self, event, params, hash):
514 """cancel and reget this download."""527 """cancel and reget this download."""
515 self.key.set(server_hash=hash)528 self.key.set(server_hash=hash)
529 self.key.sync()
516 self.m.action_q.cancel_download(share_id=self.key['share_id'],530 self.m.action_q.cancel_download(share_id=self.key['share_id'],
517 node_id=self.key['node_id'])531 node_id=self.key['node_id'])
518 self.key.remove_partial()532 self.key.remove_partial()
@@ -547,6 +561,7 @@
547 def server_file_changed_back(self, event, params, hash):561 def server_file_changed_back(self, event, params, hash):
548 """cancel and dont reget this download."""562 """cancel and dont reget this download."""
549 self.key.set(server_hash=hash)563 self.key.set(server_hash=hash)
564 self.key.sync()
550 self.m.action_q.cancel_download(share_id=self.key['share_id'],565 self.m.action_q.cancel_download(share_id=self.key['share_id'],
551 node_id=self.key['node_id'])566 node_id=self.key['node_id'])
552 self.key.remove_partial()567 self.key.remove_partial()
@@ -561,6 +576,7 @@
561 # start work to go to a good state576 # start work to go to a good state
562 self.key.remove_partial()577 self.key.remove_partial()
563 self.key.set(server_hash=self.key['local_hash'])578 self.key.set(server_hash=self.key['local_hash'])
579 self.key.sync()
564 self.m.action_q.query([580 self.m.action_q.query([
565 (self.key["share_id"], self.key["node_id"], "")])581 (self.key["share_id"], self.key["node_id"], "")])
566582
@@ -574,6 +590,7 @@
574 self.m.fs.create(path=path, share_id=share_id, is_dir=False)590 self.m.fs.create(path=path, share_id=share_id, is_dir=False)
575 self.key.set(local_hash=empty_hash)591 self.key.set(local_hash=empty_hash)
576 self.key.set(server_hash=empty_hash)592 self.key.set(server_hash=empty_hash)
593 self.key.sync()
577 name = os.path.basename(path)594 name = os.path.basename(path)
578 marker = MDMarker(self.key.get_mdid())595 marker = MDMarker(self.key.get_mdid())
579 self.m.action_q.make_file(share_id, parent_id, name, marker)596 self.m.action_q.make_file(share_id, parent_id, name, marker)
@@ -611,8 +628,9 @@
611 def put_file(self, event, params, hash, crc32, size, stat):628 def put_file(self, event, params, hash, crc32, size, stat):
612 """upload the file to the server."""629 """upload the file to the server."""
613 previous_hash = self.key['server_hash']630 previous_hash = self.key['server_hash']
614 self.key.set(local_hash=hash)631 self.key.set(local_hash=hash, stat=stat)
615 self.m.fs.update_stat(self.key.get_mdid(), stat)632 self.key.sync()
633
616 self.m.action_q.upload(share_id=self.key['share_id'],634 self.m.action_q.upload(share_id=self.key['share_id'],
617 node_id=self.key['node_id'], previous_hash=previous_hash,635 node_id=self.key['node_id'], previous_hash=previous_hash,
618 hash=hash, crc32=crc32, size=size,636 hash=hash, crc32=crc32, size=size,
@@ -623,8 +641,8 @@
623 self.m.action_q.cancel_download(share_id=self.key['share_id'],641 self.m.action_q.cancel_download(share_id=self.key['share_id'],
624 node_id=self.key['node_id'])642 node_id=self.key['node_id'])
625 self.key.remove_partial()643 self.key.remove_partial()
626 self.key.set(local_hash=hash)644 self.key.set(local_hash=hash, stat=stat)
627 self.m.fs.update_stat(self.key.get_mdid(), stat)645 self.key.sync()
628646
629 def reput_file_from_ok(self, event, param, hash):647 def reput_file_from_ok(self, event, param, hash):
630 """put the file again, mark upload as ok"""648 """put the file again, mark upload as ok"""
@@ -632,6 +650,7 @@
632 node_id=self.key['node_id'])650 node_id=self.key['node_id'])
633 self.key.set(local_hash=hash)651 self.key.set(local_hash=hash)
634 self.key.set(server_hash=hash)652 self.key.set(server_hash=hash)
653 self.key.sync()
635 self.m.hash_q.insert(self.key['path'])654 self.m.hash_q.insert(self.key['path'])
636655
637656
@@ -641,8 +660,8 @@
641 node_id=self.key['node_id'])660 node_id=self.key['node_id'])
642 previous_hash = self.key['server_hash']661 previous_hash = self.key['server_hash']
643662
644 self.key.set(local_hash=hash)663 self.key.set(local_hash=hash, stat=stat)
645 self.m.fs.update_stat(self.key.get_mdid(), stat)664 self.key.sync()
646 self.m.action_q.upload(share_id=self.key['share_id'],665 self.m.action_q.upload(share_id=self.key['share_id'],
647 node_id=self.key['node_id'], previous_hash=previous_hash,666 node_id=self.key['node_id'], previous_hash=previous_hash,
648 hash=hash, crc32=crc32, size=size,667 hash=hash, crc32=crc32, size=size,
@@ -653,6 +672,7 @@
653 self.m.action_q.cancel_upload(share_id=self.key['share_id'],672 self.m.action_q.cancel_upload(share_id=self.key['share_id'],
654 node_id=self.key['node_id'])673 node_id=self.key['node_id'])
655 self.key.set(server_hash=hash)674 self.key.set(server_hash=hash)
675 self.key.sync()
656676
657 def commit_upload(self, event, params, hash):677 def commit_upload(self, event, params, hash):
658 """Finish an upload."""678 """Finish an upload."""
@@ -754,6 +774,7 @@
754 self.m.action_q.cancel_upload(share_id=self.key['share_id'],774 self.m.action_q.cancel_upload(share_id=self.key['share_id'],
755 node_id=self.key['node_id'])775 node_id=self.key['node_id'])
756 self.key.set(local_hash=self.key['server_hash'])776 self.key.set(local_hash=self.key['server_hash'])
777 self.key.sync()
757 self.client_moved(event, params, path_from, path_to)778 self.client_moved(event, params, path_from, path_to)
758 self.m.hash_q.insert(self.key['path'])779 self.m.hash_q.insert(self.key['path'])
759780
@@ -766,6 +787,7 @@
766 node_id=self.key['node_id'])787 node_id=self.key['node_id'])
767 self.key.remove_partial()788 self.key.remove_partial()
768 self.key.set(server_hash=self.key['local_hash'])789 self.key.set(server_hash=self.key['local_hash'])
790 self.key.sync()
769 self.m.action_q.query([(self.key['share_id'],791 self.m.action_q.query([(self.key['share_id'],
770 self.key['node_id'],792 self.key['node_id'],
771 self.key['local_hash'] or "")])793 self.key['local_hash'] or "")])
@@ -778,7 +800,8 @@
778800
779 def save_stat(self, event, params, hash, crc32, size, stat):801 def save_stat(self, event, params, hash, crc32, size, stat):
780 """Save the stat"""802 """Save the stat"""
781 self.m.fs.update_stat(self.key.get_mdid(), stat)803 self.key.set(stat=stat)
804 self.key.sync()
782805
783806
784class Sync(object):807class Sync(object):

Subscribers

People subscribed via source and target branches