Merge lp:~verterok/ubuntuone-client/transactions-for-sync into lp:ubuntuone-client
- transactions-for-sync
- Merge into trunk
Proposed by
Guillermo Gonzalez
Status: | Merged |
---|---|
Approved by: | dobey |
Approved revision: | 131 |
Merged at revision: | not available |
Proposed branch: | lp:~verterok/ubuntuone-client/transactions-for-sync |
Merge into: | lp:ubuntuone-client |
Diff against target: | None lines |
To merge this branch: | bzr merge lp:~verterok/ubuntuone-client/transactions-for-sync |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
dobey (community) | Approve | ||
Elliot Murphy (community) | Approve | ||
Review via email: mp+9974@code.launchpad.net |
Commit message
Add 'transactions' to Sync (FSKey) regarding FileSystemManager changes
Description of the change
To post a comment you must log in.
Revision history for this message
Guillermo Gonzalez (verterok) wrote : | # |
Revision history for this message
Elliot Murphy (statik) : | # |
review:
Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'tests/syncdaemon/test_fsm.py' | |||
2 | --- tests/syncdaemon/test_fsm.py 2009-07-31 18:24:54 +0000 | |||
3 | +++ tests/syncdaemon/test_fsm.py 2009-08-11 12:22:34 +0000 | |||
4 | @@ -160,6 +160,28 @@ | |||
5 | 160 | now = time.time() | 160 | now = time.time() |
6 | 161 | self.assertTrue(now-3 <= when <= now) # 3 seconds test range | 161 | self.assertTrue(now-3 <= when <= now) # 3 seconds test range |
7 | 162 | 162 | ||
8 | 163 | def test_with_node_id(self): | ||
9 | 164 | '''Test creation with node_id''' | ||
10 | 165 | # create, but not twice | ||
11 | 166 | path = os.path.join(self.share.path, 'path') | ||
12 | 167 | self.fsm.create(path, "share", node_id='a_node_id') | ||
13 | 168 | self.assertRaises(ValueError, self.fsm.create, path, "share") | ||
14 | 169 | self.assertRaises(ValueError, self.fsm.create, path, "other") | ||
15 | 170 | mdobj = self.fsm.get_by_path(path) | ||
16 | 171 | self.assertEqual(mdobj.path, "path") | ||
17 | 172 | self.assertEqual(mdobj.share_id, "share") | ||
18 | 173 | self.assertEqual(mdobj.node_id, "a_node_id") | ||
19 | 174 | when = mdobj.info.created | ||
20 | 175 | now = time.time() | ||
21 | 176 | self.assertTrue(now-3 <= when <= now) # 3 seconds test range | ||
22 | 177 | |||
23 | 178 | # set uuid using valid path, but not twice | ||
24 | 179 | self.assertRaises(ValueError, self.fsm.set_node_id, path, "whatever") | ||
25 | 180 | mdobj = self.fsm.get_by_path(path) | ||
26 | 181 | when = mdobj.info.node_id_assigned | ||
27 | 182 | now = time.time() | ||
28 | 183 | self.assertTrue(now-3 <= when <= now) # 3 seconds test range | ||
29 | 184 | |||
30 | 163 | def test_invalid_args(self): | 185 | def test_invalid_args(self): |
31 | 164 | '''Test using invalid args in set_node_id.''' | 186 | '''Test using invalid args in set_node_id.''' |
32 | 165 | path = os.path.join(self.share.path, 'path') | 187 | path = os.path.join(self.share.path, 'path') |
33 | @@ -180,7 +202,7 @@ | |||
34 | 180 | self.fsm.set_node_id(path, "uuid") | 202 | self.fsm.set_node_id(path, "uuid") |
35 | 181 | 203 | ||
36 | 182 | # opening another FSM | 204 | # opening another FSM |
38 | 183 | fsm = FileSystemManager(self.fsmdir, self.fsm.vm) | 205 | FileSystemManager(self.fsmdir, self.fsm.vm) |
39 | 184 | self.fsm.set_node_id(path, "uuid") | 206 | self.fsm.set_node_id(path, "uuid") |
40 | 185 | 207 | ||
41 | 186 | def test_twice_different_bad(self): | 208 | def test_twice_different_bad(self): |
42 | @@ -519,12 +541,6 @@ | |||
43 | 519 | self.assertRaises(ValueError, self.fsm.set_by_mdid, mdid, info="-") | 541 | self.assertRaises(ValueError, self.fsm.set_by_mdid, mdid, info="-") |
44 | 520 | self.assertRaises(ValueError, self.fsm.set_by_path, path, info="-") | 542 | self.assertRaises(ValueError, self.fsm.set_by_path, path, info="-") |
45 | 521 | 543 | ||
46 | 522 | # test with forbidden stat | ||
47 | 523 | self.assertRaises(ValueError, self.fsm.set_by_node_id, "uuid", "share", | ||
48 | 524 | stat="-") | ||
49 | 525 | self.assertRaises(ValueError, self.fsm.set_by_mdid, mdid, stat="-") | ||
50 | 526 | self.assertRaises(ValueError, self.fsm.set_by_path, path, stat="-") | ||
51 | 527 | |||
52 | 528 | # test with forbidden share | 544 | # test with forbidden share |
53 | 529 | self.assertRaises(ValueError, self.fsm.set_by_mdid, mdid, share_id="-") | 545 | self.assertRaises(ValueError, self.fsm.set_by_mdid, mdid, share_id="-") |
54 | 530 | self.assertRaises(ValueError, self.fsm.set_by_path, path, share_id="-") | 546 | self.assertRaises(ValueError, self.fsm.set_by_path, path, share_id="-") |
55 | @@ -666,6 +682,31 @@ | |||
56 | 666 | self.assertTrue(mdid2 in all) | 682 | self.assertTrue(mdid2 in all) |
57 | 667 | self.assertTrue(mdid3 in all) | 683 | self.assertTrue(mdid3 in all) |
58 | 668 | 684 | ||
59 | 685 | def test_internal_set_node_id(self): | ||
60 | 686 | """Test _set_node_id""" | ||
61 | 687 | path = os.path.join(self.share.path, 'path') | ||
62 | 688 | mdid = self.fsm.create(path, "share") | ||
63 | 689 | mdobj = self.fsm.fs[mdid] | ||
64 | 690 | # yes, it's a unit test, I access protected members. | ||
65 | 691 | # pylint: disable-msg=W0212 | ||
66 | 692 | self.fsm._set_node_id(mdobj, "uuid", path) | ||
67 | 693 | |||
68 | 694 | self.assertEquals('uuid', mdobj['node_id']) | ||
69 | 695 | self.fsm.set_node_id(path, "uuid") | ||
70 | 696 | new_mdobj = self.fsm.get_by_node_id('share', 'uuid') | ||
71 | 697 | for k, v in mdobj.items(): | ||
72 | 698 | if k == 'info': | ||
73 | 699 | for k1, v1 in v.items(): | ||
74 | 700 | self.assertEquals(v1, getattr(new_mdobj.info, k1)) | ||
75 | 701 | else: | ||
76 | 702 | self.assertEquals(v, getattr(new_mdobj, k)) | ||
77 | 703 | |||
78 | 704 | # test using bad uuid | ||
79 | 705 | mdobj = self.fsm.fs[mdid] | ||
80 | 706 | self.assertEquals('uuid', mdobj['node_id']) | ||
81 | 707 | self.assertRaises(ValueError, | ||
82 | 708 | self.fsm._set_node_id, mdobj, 'bad-uuid', path) | ||
83 | 709 | |||
84 | 669 | 710 | ||
85 | 670 | class StatTests(FSMTestCase): | 711 | class StatTests(FSMTestCase): |
86 | 671 | '''Test all the behaviour regarding the stats.''' | 712 | '''Test all the behaviour regarding the stats.''' |
87 | @@ -772,7 +813,7 @@ | |||
88 | 772 | mdobj2 = self.fsm.get_by_path(path2) | 813 | mdobj2 = self.fsm.get_by_path(path2) |
89 | 773 | self.assertEqual(mdobj2.stat, os.stat(path2)) | 814 | self.assertEqual(mdobj2.stat, os.stat(path2)) |
90 | 774 | 815 | ||
92 | 775 | def test_update_stat(self): | 816 | def test_set_stat_by_mdid(self): |
93 | 776 | '''Test that update_stat works.''' | 817 | '''Test that update_stat works.''' |
94 | 777 | path = os.path.join(self.share.path, "thisfile") | 818 | path = os.path.join(self.share.path, "thisfile") |
95 | 778 | open(path, "w").close() | 819 | open(path, "w").close() |
96 | @@ -787,7 +828,7 @@ | |||
97 | 787 | self.assertEqual(mdobj.stat, oldstat) | 828 | self.assertEqual(mdobj.stat, oldstat) |
98 | 788 | 829 | ||
99 | 789 | # it's updated when asked, even if it's an old stat | 830 | # it's updated when asked, even if it's an old stat |
101 | 790 | self.fsm.update_stat(mdid, oldstat) | 831 | self.fsm.set_by_mdid(mdid, stat=oldstat) |
102 | 791 | mdobj = self.fsm.get_by_mdid(mdid) | 832 | mdobj = self.fsm.get_by_mdid(mdid) |
103 | 792 | self.assertEqual(mdobj.stat, oldstat) | 833 | self.assertEqual(mdobj.stat, oldstat) |
104 | 793 | 834 | ||
105 | 794 | 835 | ||
106 | === modified file 'ubuntuone/syncdaemon/filesystem_manager.py' | |||
107 | --- ubuntuone/syncdaemon/filesystem_manager.py 2009-08-10 13:26:21 +0000 | |||
108 | +++ ubuntuone/syncdaemon/filesystem_manager.py 2009-08-11 12:22:34 +0000 | |||
109 | @@ -124,7 +124,7 @@ | |||
110 | 124 | logger = functools.partial(fsm_logger.log, logging.INFO) | 124 | logger = functools.partial(fsm_logger.log, logging.INFO) |
111 | 125 | log_warning = functools.partial(fsm_logger.log, logging.WARNING) | 125 | log_warning = functools.partial(fsm_logger.log, logging.WARNING) |
112 | 126 | 126 | ||
114 | 127 | is_forbidden = set("info path node_id share_id is_dir stat".split() | 127 | is_forbidden = set("info path node_id share_id is_dir".split() |
115 | 128 | ).intersection | 128 | ).intersection |
116 | 129 | 129 | ||
117 | 130 | class InconsistencyError(Exception): | 130 | class InconsistencyError(Exception): |
118 | @@ -323,7 +323,7 @@ | |||
119 | 323 | if mdobj["node_id"] is not None: | 323 | if mdobj["node_id"] is not None: |
120 | 324 | self._idx_node_id[(mdobj["share_id"], mdobj["node_id"])] = mdid | 324 | self._idx_node_id[(mdobj["share_id"], mdobj["node_id"])] = mdid |
121 | 325 | 325 | ||
123 | 326 | def create(self, path, share_id, is_dir=False): | 326 | def create(self, path, share_id, node_id=None, is_dir=False): |
124 | 327 | '''Creates a new md object.''' | 327 | '''Creates a new md object.''' |
125 | 328 | if not path.strip(): | 328 | if not path.strip(): |
126 | 329 | raise ValueError("Empty paths are not allowed (got %r)" % path) | 329 | raise ValueError("Empty paths are not allowed (got %r)" % path) |
127 | @@ -341,6 +341,8 @@ | |||
128 | 341 | newobj["info"] = dict(created=time.time(), is_partial=False) | 341 | newobj["info"] = dict(created=time.time(), is_partial=False) |
129 | 342 | # only one stat, (instead of os.path.exists & os.stat) | 342 | # only one stat, (instead of os.path.exists & os.stat) |
130 | 343 | newobj["stat"] = get_stat(path) | 343 | newobj["stat"] = get_stat(path) |
131 | 344 | if node_id is not None: | ||
132 | 345 | self._set_node_id(newobj, node_id, path) | ||
133 | 344 | 346 | ||
134 | 345 | logger("create: path=%r mdid=%r share_id=%r node_id=%r is_dir=%r" % ( | 347 | logger("create: path=%r mdid=%r share_id=%r node_id=%r is_dir=%r" % ( |
135 | 346 | path, mdid, share_id, None, is_dir)) | 348 | path, mdid, share_id, None, is_dir)) |
136 | @@ -356,26 +358,29 @@ | |||
137 | 356 | path = os.path.normpath(path) | 358 | path = os.path.normpath(path) |
138 | 357 | mdid = self._idx_path[path] | 359 | mdid = self._idx_path[path] |
139 | 358 | mdobj = self.fs[mdid] | 360 | mdobj = self.fs[mdid] |
140 | 361 | self._set_node_id(mdobj, node_id, path) | ||
141 | 362 | self.fs[mdid] = mdobj | ||
142 | 363 | |||
143 | 364 | def _set_node_id(self, mdobj, node_id, path): | ||
144 | 365 | """Set the node_id to the mdobj, but don't 'save' the mdobj""" | ||
145 | 359 | if mdobj["node_id"] is not None: | 366 | if mdobj["node_id"] is not None: |
146 | 360 | # the object is already there! it's ok if it has the same id | 367 | # the object is already there! it's ok if it has the same id |
147 | 361 | if mdobj["node_id"] == node_id: | 368 | if mdobj["node_id"] == node_id: |
148 | 362 | logger("set_node_id (repeated!): path=%r mdid=%r node_id=%r" | 369 | logger("set_node_id (repeated!): path=%r mdid=%r node_id=%r" |
150 | 363 | % (path, mdid, node_id)) | 370 | % (path, mdobj['mdid'], node_id)) |
151 | 364 | return | 371 | return |
152 | 365 | msg = "The path %r already has node_id (%r)" % (path, node_id) | 372 | msg = "The path %r already has node_id (%r)" % (path, node_id) |
153 | 366 | raise ValueError(msg) | 373 | raise ValueError(msg) |
154 | 367 | |||
155 | 368 | # adjust the index | 374 | # adjust the index |
156 | 369 | share_id = mdobj["share_id"] | 375 | share_id = mdobj["share_id"] |
158 | 370 | self._idx_node_id[(share_id, node_id)] = mdid | 376 | self._idx_node_id[(share_id, node_id)] = mdobj['mdid'] |
159 | 371 | 377 | ||
160 | 372 | # set the node_id | 378 | # set the node_id |
161 | 373 | mdobj["node_id"] = node_id | 379 | mdobj["node_id"] = node_id |
162 | 374 | mdobj["info"]["node_id_assigned"] = time.time() | 380 | mdobj["info"]["node_id_assigned"] = time.time() |
163 | 375 | self.fs[mdid] = mdobj | ||
164 | 376 | 381 | ||
165 | 377 | logger("set_node_id: path=%r mdid=%r share_id=%r node_id=%r" % ( | 382 | logger("set_node_id: path=%r mdid=%r share_id=%r node_id=%r" % ( |
167 | 378 | path, mdid, share_id, node_id)) | 383 | path, mdobj['mdid'], share_id, node_id)) |
168 | 379 | 384 | ||
169 | 380 | def get_mdobjs_by_share_id(self, share_id, base_path=None): | 385 | def get_mdobjs_by_share_id(self, share_id, base_path=None): |
170 | 381 | """Returns all the mdobj that belongs to a share and it path | 386 | """Returns all the mdobj that belongs to a share and it path |
171 | @@ -456,13 +461,6 @@ | |||
172 | 456 | mdobj["stat"] = get_stat(path) | 461 | mdobj["stat"] = get_stat(path) |
173 | 457 | self.fs[mdid] = mdobj | 462 | self.fs[mdid] = mdobj |
174 | 458 | 463 | ||
175 | 459 | def update_stat(self, mdid, stat): | ||
176 | 460 | '''Updates the stat of a md object.''' | ||
177 | 461 | logger("update stat of mdid=%r", mdid) | ||
178 | 462 | mdobj = self.fs[mdid] | ||
179 | 463 | mdobj["stat"] = stat | ||
180 | 464 | self.fs[mdid] = mdobj | ||
181 | 465 | |||
182 | 466 | def move_file(self, new_share_id, path_from, path_to): | 464 | def move_file(self, new_share_id, path_from, path_to): |
183 | 467 | '''Moves a file/dir from one point to the other.''' | 465 | '''Moves a file/dir from one point to the other.''' |
184 | 468 | path_from = os.path.normpath(path_from) | 466 | path_from = os.path.normpath(path_from) |
185 | 469 | 467 | ||
186 | === modified file 'ubuntuone/syncdaemon/sync.py' | |||
187 | --- ubuntuone/syncdaemon/sync.py 2009-08-04 20:00:44 +0000 | |||
188 | +++ ubuntuone/syncdaemon/sync.py 2009-08-11 00:36:15 +0000 | |||
189 | @@ -42,9 +42,13 @@ | |||
190 | 42 | """create""" | 42 | """create""" |
191 | 43 | self.fs = fs | 43 | self.fs = fs |
192 | 44 | self.keys = keys | 44 | self.keys = keys |
193 | 45 | self.mdid = None | ||
194 | 46 | self._changes = {} | ||
195 | 45 | 47 | ||
196 | 46 | def get_mdid(self): | 48 | def get_mdid(self): |
197 | 47 | """Get the metadata id.""" | 49 | """Get the metadata id.""" |
198 | 50 | if self.mdid is not None: | ||
199 | 51 | return self.mdid | ||
200 | 48 | if len(self.keys) == 1 and "path" in self.keys: | 52 | if len(self.keys) == 1 and "path" in self.keys: |
201 | 49 | # pylint: disable-msg=W0212 | 53 | # pylint: disable-msg=W0212 |
202 | 50 | mdid = self.fs._idx_path[self.keys["path"]] | 54 | mdid = self.fs._idx_path[self.keys["path"]] |
203 | @@ -59,6 +63,7 @@ | |||
204 | 59 | raise KeyError("Incorrect keys: %s" % self.keys) | 63 | raise KeyError("Incorrect keys: %s" % self.keys) |
205 | 60 | if mdid is None: | 64 | if mdid is None: |
206 | 61 | raise KeyError("cant find mdid") | 65 | raise KeyError("cant find mdid") |
207 | 66 | self.mdid = mdid | ||
208 | 62 | return mdid | 67 | return mdid |
209 | 63 | 68 | ||
210 | 64 | def get(self, key): | 69 | def get(self, key): |
211 | @@ -88,8 +93,12 @@ | |||
212 | 88 | 93 | ||
213 | 89 | def set(self, **kwargs): | 94 | def set(self, **kwargs): |
214 | 90 | """Set the values for kwargs.""" | 95 | """Set the values for kwargs.""" |
217 | 91 | mdid = self.get_mdid() | 96 | self._changes.update(kwargs) |
218 | 92 | self.fs.set_by_mdid(mdid, **kwargs) | 97 | |
219 | 98 | def sync(self): | ||
220 | 99 | """sync the changes back to FSM""" | ||
221 | 100 | if self._changes: | ||
222 | 101 | self.fs.set_by_mdid(self.get_mdid(), **self._changes) | ||
223 | 93 | 102 | ||
224 | 94 | def has_metadata(self): | 103 | def has_metadata(self): |
225 | 95 | """The State Machine value version of has_metadata.""" | 104 | """The State Machine value version of has_metadata.""" |
226 | @@ -192,8 +201,6 @@ | |||
227 | 192 | self.fs.create_file(self.get_mdid()) | 201 | self.fs.create_file(self.get_mdid()) |
228 | 193 | 202 | ||
229 | 194 | 203 | ||
230 | 195 | |||
231 | 196 | |||
232 | 197 | def loglevel(lvl): | 204 | def loglevel(lvl): |
233 | 198 | """Make a function that logs at lvl log level.""" | 205 | """Make a function that logs at lvl log level.""" |
234 | 199 | def level_log(self, message, *args, **kwargs): | 206 | def level_log(self, message, *args, **kwargs): |
235 | @@ -328,8 +335,8 @@ | |||
236 | 328 | """create a local file.""" | 335 | """create a local file.""" |
237 | 329 | mdobj = self.m.fs.get_by_node_id(share_id, parent_id) | 336 | mdobj = self.m.fs.get_by_node_id(share_id, parent_id) |
238 | 330 | path = os.path.join(self.m.fs.get_abspath(share_id, mdobj.path), name) | 337 | path = os.path.join(self.m.fs.get_abspath(share_id, mdobj.path), name) |
241 | 331 | self.m.fs.create(path=path, share_id=share_id, is_dir=True) | 338 | self.m.fs.create(path=path, share_id=share_id, node_id=node_id, |
242 | 332 | self.m.fs.set_node_id(path, node_id) | 339 | is_dir=True) |
243 | 333 | self.m.action_q.query([(share_id, node_id, "")]) | 340 | self.m.action_q.query([(share_id, node_id, "")]) |
244 | 334 | # pylint: disable-msg=W0704 | 341 | # pylint: disable-msg=W0704 |
245 | 335 | # this should be provided by FSM, fix!! | 342 | # this should be provided by FSM, fix!! |
246 | @@ -370,10 +377,12 @@ | |||
247 | 370 | self.key['node_id'], | 377 | self.key['node_id'], |
248 | 371 | self.key['local_hash'] or "")]) | 378 | self.key['local_hash'] or "")]) |
249 | 372 | self.key.set(server_hash=self.key['local_hash']) | 379 | self.key.set(server_hash=self.key['local_hash']) |
250 | 380 | self.key.sync() | ||
251 | 373 | 381 | ||
252 | 374 | def get_dir(self, event, params, hash): | 382 | def get_dir(self, event, params, hash): |
253 | 375 | """Get the directory.""" | 383 | """Get the directory.""" |
254 | 376 | self.key.set(server_hash=hash) | 384 | self.key.set(server_hash=hash) |
255 | 385 | self.key.sync() | ||
256 | 377 | self.m.fs.create_partial(node_id=self.key['node_id'], | 386 | self.m.fs.create_partial(node_id=self.key['node_id'], |
257 | 378 | share_id=self.key['share_id']) | 387 | share_id=self.key['share_id']) |
258 | 379 | self.m.action_q.listdir( | 388 | self.m.action_q.listdir( |
259 | @@ -410,6 +419,7 @@ | |||
260 | 410 | except InconsistencyError: | 419 | except InconsistencyError: |
261 | 411 | self.key.remove_partial() | 420 | self.key.remove_partial() |
262 | 412 | self.key.set(server_hash=self.key['local_hash']) | 421 | self.key.set(server_hash=self.key['local_hash']) |
263 | 422 | self.key.sync() | ||
264 | 413 | self.m.action_q.query([ | 423 | self.m.action_q.query([ |
265 | 414 | (self.key["share_id"], self.key["node_id"], "")]) | 424 | (self.key["share_id"], self.key["node_id"], "")]) |
266 | 415 | # we dont perform the merge, we try to re get it | 425 | # we dont perform the merge, we try to re get it |
267 | @@ -478,15 +488,17 @@ | |||
268 | 478 | 488 | ||
269 | 479 | self.key.remove_partial() | 489 | self.key.remove_partial() |
270 | 480 | self.key.set(local_hash=hash) | 490 | self.key.set(local_hash=hash) |
271 | 491 | self.key.sync() | ||
272 | 481 | 492 | ||
273 | 482 | def new_file(self, event, params, share_id, node_id, parent_id, name): | 493 | def new_file(self, event, params, share_id, node_id, parent_id, name): |
274 | 483 | """create a local file.""" | 494 | """create a local file.""" |
275 | 484 | mdobj = self.m.fs.get_by_node_id(share_id, parent_id) | 495 | mdobj = self.m.fs.get_by_node_id(share_id, parent_id) |
276 | 485 | path = os.path.join(self.m.fs.get_abspath(share_id, mdobj.path), name) | 496 | path = os.path.join(self.m.fs.get_abspath(share_id, mdobj.path), name) |
279 | 486 | self.m.fs.create(path=path, share_id=share_id, is_dir=False) | 497 | self.m.fs.create(path=path, share_id=share_id, node_id=node_id, |
280 | 487 | self.m.fs.set_node_id(path, node_id) | 498 | is_dir=False) |
281 | 488 | self.key.set(server_hash="") | 499 | self.key.set(server_hash="") |
282 | 489 | self.key.set(local_hash="") | 500 | self.key.set(local_hash="") |
283 | 501 | self.key.sync() | ||
284 | 490 | self.key.make_file() | 502 | self.key.make_file() |
285 | 491 | self.m.action_q.query([(share_id, node_id, "")]) | 503 | self.m.action_q.query([(share_id, node_id, "")]) |
286 | 492 | 504 | ||
287 | @@ -500,6 +512,7 @@ | |||
288 | 500 | def get_file(self, event, params, hash): | 512 | def get_file(self, event, params, hash): |
289 | 501 | """Get the contents for the file.""" | 513 | """Get the contents for the file.""" |
290 | 502 | self.key.set(server_hash=hash) | 514 | self.key.set(server_hash=hash) |
291 | 515 | self.key.sync() | ||
292 | 503 | self.m.fs.create_partial(node_id=self.key['node_id'], | 516 | self.m.fs.create_partial(node_id=self.key['node_id'], |
293 | 504 | share_id=self.key['share_id']) | 517 | share_id=self.key['share_id']) |
294 | 505 | self.m.action_q.download( | 518 | self.m.action_q.download( |
295 | @@ -513,6 +526,7 @@ | |||
296 | 513 | def reget_file(self, event, params, hash): | 526 | def reget_file(self, event, params, hash): |
297 | 514 | """cancel and reget this download.""" | 527 | """cancel and reget this download.""" |
298 | 515 | self.key.set(server_hash=hash) | 528 | self.key.set(server_hash=hash) |
299 | 529 | self.key.sync() | ||
300 | 516 | self.m.action_q.cancel_download(share_id=self.key['share_id'], | 530 | self.m.action_q.cancel_download(share_id=self.key['share_id'], |
301 | 517 | node_id=self.key['node_id']) | 531 | node_id=self.key['node_id']) |
302 | 518 | self.key.remove_partial() | 532 | self.key.remove_partial() |
303 | @@ -547,6 +561,7 @@ | |||
304 | 547 | def server_file_changed_back(self, event, params, hash): | 561 | def server_file_changed_back(self, event, params, hash): |
305 | 548 | """cancel and dont reget this download.""" | 562 | """cancel and dont reget this download.""" |
306 | 549 | self.key.set(server_hash=hash) | 563 | self.key.set(server_hash=hash) |
307 | 564 | self.key.sync() | ||
308 | 550 | self.m.action_q.cancel_download(share_id=self.key['share_id'], | 565 | self.m.action_q.cancel_download(share_id=self.key['share_id'], |
309 | 551 | node_id=self.key['node_id']) | 566 | node_id=self.key['node_id']) |
310 | 552 | self.key.remove_partial() | 567 | self.key.remove_partial() |
311 | @@ -561,6 +576,7 @@ | |||
312 | 561 | # start work to go to a good state | 576 | # start work to go to a good state |
313 | 562 | self.key.remove_partial() | 577 | self.key.remove_partial() |
314 | 563 | self.key.set(server_hash=self.key['local_hash']) | 578 | self.key.set(server_hash=self.key['local_hash']) |
315 | 579 | self.key.sync() | ||
316 | 564 | self.m.action_q.query([ | 580 | self.m.action_q.query([ |
317 | 565 | (self.key["share_id"], self.key["node_id"], "")]) | 581 | (self.key["share_id"], self.key["node_id"], "")]) |
318 | 566 | 582 | ||
319 | @@ -574,6 +590,7 @@ | |||
320 | 574 | self.m.fs.create(path=path, share_id=share_id, is_dir=False) | 590 | self.m.fs.create(path=path, share_id=share_id, is_dir=False) |
321 | 575 | self.key.set(local_hash=empty_hash) | 591 | self.key.set(local_hash=empty_hash) |
322 | 576 | self.key.set(server_hash=empty_hash) | 592 | self.key.set(server_hash=empty_hash) |
323 | 593 | self.key.sync() | ||
324 | 577 | name = os.path.basename(path) | 594 | name = os.path.basename(path) |
325 | 578 | marker = MDMarker(self.key.get_mdid()) | 595 | marker = MDMarker(self.key.get_mdid()) |
326 | 579 | self.m.action_q.make_file(share_id, parent_id, name, marker) | 596 | self.m.action_q.make_file(share_id, parent_id, name, marker) |
327 | @@ -611,8 +628,9 @@ | |||
328 | 611 | def put_file(self, event, params, hash, crc32, size, stat): | 628 | def put_file(self, event, params, hash, crc32, size, stat): |
329 | 612 | """upload the file to the server.""" | 629 | """upload the file to the server.""" |
330 | 613 | previous_hash = self.key['server_hash'] | 630 | previous_hash = self.key['server_hash'] |
333 | 614 | self.key.set(local_hash=hash) | 631 | self.key.set(local_hash=hash, stat=stat) |
334 | 615 | self.m.fs.update_stat(self.key.get_mdid(), stat) | 632 | self.key.sync() |
335 | 633 | |||
336 | 616 | self.m.action_q.upload(share_id=self.key['share_id'], | 634 | self.m.action_q.upload(share_id=self.key['share_id'], |
337 | 617 | node_id=self.key['node_id'], previous_hash=previous_hash, | 635 | node_id=self.key['node_id'], previous_hash=previous_hash, |
338 | 618 | hash=hash, crc32=crc32, size=size, | 636 | hash=hash, crc32=crc32, size=size, |
339 | @@ -623,8 +641,8 @@ | |||
340 | 623 | self.m.action_q.cancel_download(share_id=self.key['share_id'], | 641 | self.m.action_q.cancel_download(share_id=self.key['share_id'], |
341 | 624 | node_id=self.key['node_id']) | 642 | node_id=self.key['node_id']) |
342 | 625 | self.key.remove_partial() | 643 | self.key.remove_partial() |
345 | 626 | self.key.set(local_hash=hash) | 644 | self.key.set(local_hash=hash, stat=stat) |
346 | 627 | self.m.fs.update_stat(self.key.get_mdid(), stat) | 645 | self.key.sync() |
347 | 628 | 646 | ||
348 | 629 | def reput_file_from_ok(self, event, param, hash): | 647 | def reput_file_from_ok(self, event, param, hash): |
349 | 630 | """put the file again, mark upload as ok""" | 648 | """put the file again, mark upload as ok""" |
350 | @@ -632,6 +650,7 @@ | |||
351 | 632 | node_id=self.key['node_id']) | 650 | node_id=self.key['node_id']) |
352 | 633 | self.key.set(local_hash=hash) | 651 | self.key.set(local_hash=hash) |
353 | 634 | self.key.set(server_hash=hash) | 652 | self.key.set(server_hash=hash) |
354 | 653 | self.key.sync() | ||
355 | 635 | self.m.hash_q.insert(self.key['path']) | 654 | self.m.hash_q.insert(self.key['path']) |
356 | 636 | 655 | ||
357 | 637 | 656 | ||
358 | @@ -641,8 +660,8 @@ | |||
359 | 641 | node_id=self.key['node_id']) | 660 | node_id=self.key['node_id']) |
360 | 642 | previous_hash = self.key['server_hash'] | 661 | previous_hash = self.key['server_hash'] |
361 | 643 | 662 | ||
364 | 644 | self.key.set(local_hash=hash) | 663 | self.key.set(local_hash=hash, stat=stat) |
365 | 645 | self.m.fs.update_stat(self.key.get_mdid(), stat) | 664 | self.key.sync() |
366 | 646 | self.m.action_q.upload(share_id=self.key['share_id'], | 665 | self.m.action_q.upload(share_id=self.key['share_id'], |
367 | 647 | node_id=self.key['node_id'], previous_hash=previous_hash, | 666 | node_id=self.key['node_id'], previous_hash=previous_hash, |
368 | 648 | hash=hash, crc32=crc32, size=size, | 667 | hash=hash, crc32=crc32, size=size, |
369 | @@ -653,6 +672,7 @@ | |||
370 | 653 | self.m.action_q.cancel_upload(share_id=self.key['share_id'], | 672 | self.m.action_q.cancel_upload(share_id=self.key['share_id'], |
371 | 654 | node_id=self.key['node_id']) | 673 | node_id=self.key['node_id']) |
372 | 655 | self.key.set(server_hash=hash) | 674 | self.key.set(server_hash=hash) |
373 | 675 | self.key.sync() | ||
374 | 656 | 676 | ||
375 | 657 | def commit_upload(self, event, params, hash): | 677 | def commit_upload(self, event, params, hash): |
376 | 658 | """Finish an upload.""" | 678 | """Finish an upload.""" |
377 | @@ -754,6 +774,7 @@ | |||
378 | 754 | self.m.action_q.cancel_upload(share_id=self.key['share_id'], | 774 | self.m.action_q.cancel_upload(share_id=self.key['share_id'], |
379 | 755 | node_id=self.key['node_id']) | 775 | node_id=self.key['node_id']) |
380 | 756 | self.key.set(local_hash=self.key['server_hash']) | 776 | self.key.set(local_hash=self.key['server_hash']) |
381 | 777 | self.key.sync() | ||
382 | 757 | self.client_moved(event, params, path_from, path_to) | 778 | self.client_moved(event, params, path_from, path_to) |
383 | 758 | self.m.hash_q.insert(self.key['path']) | 779 | self.m.hash_q.insert(self.key['path']) |
384 | 759 | 780 | ||
385 | @@ -766,6 +787,7 @@ | |||
386 | 766 | node_id=self.key['node_id']) | 787 | node_id=self.key['node_id']) |
387 | 767 | self.key.remove_partial() | 788 | self.key.remove_partial() |
388 | 768 | self.key.set(server_hash=self.key['local_hash']) | 789 | self.key.set(server_hash=self.key['local_hash']) |
389 | 790 | self.key.sync() | ||
390 | 769 | self.m.action_q.query([(self.key['share_id'], | 791 | self.m.action_q.query([(self.key['share_id'], |
391 | 770 | self.key['node_id'], | 792 | self.key['node_id'], |
392 | 771 | self.key['local_hash'] or "")]) | 793 | self.key['local_hash'] or "")]) |
393 | @@ -778,7 +800,8 @@ | |||
394 | 778 | 800 | ||
395 | 779 | def save_stat(self, event, params, hash, crc32, size, stat): | 801 | def save_stat(self, event, params, hash, crc32, size, stat): |
396 | 780 | """Save the stat""" | 802 | """Save the stat""" |
398 | 781 | self.m.fs.update_stat(self.key.get_mdid(), stat) | 803 | self.key.set(stat=stat) |
399 | 804 | self.key.sync() | ||
400 | 782 | 805 | ||
401 | 783 | 806 | ||
402 | 784 | class Sync(object): | 807 | class Sync(object): |
This branch adds "transactions" to Syncdaemon Sync class, basically almost all FileSystemManager operations are queued and executed when FSKey.sync() is called. This allow us to only update the metadata once instead of doing multiple writes. (scanning a ~9k files tree is ~15sec faster)