diff --git a/mercurial/pure/parsers.py b/mercurial/pure/parsers.py --- a/mercurial/pure/parsers.py +++ b/mercurial/pure/parsers.py @@ -164,11 +164,13 @@ """ if self._nm_root is None: return None + docket = self._nm_docket changed, data = nodemaputil.update_persistent_data( - self, self._nm_root, self._nm_max_idx, self._nm_rev + self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev ) - self._nm_root = self._nm_max_idx = self._nm_rev = None - return changed, data + + self._nm_root = self._nm_max_idx = self._nm_docket = None + return docket, changed, data def update_nodemap_data(self, docket, nm_data): """provide full block of persisted binary data for a nodemap @@ -178,9 +180,9 @@ if nm_data is not None: self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data) if self._nm_root: - self._nm_rev = docket.tip_rev + self._nm_docket = docket else: - self._nm_root = self._nm_max_idx = self._nm_rev = None + self._nm_root = self._nm_max_idx = self._nm_docket = None class InlinedIndexObject(BaseIndexObject): diff --git a/mercurial/revlogutils/nodemap.py b/mercurial/revlogutils/nodemap.py --- a/mercurial/revlogutils/nodemap.py +++ b/mercurial/revlogutils/nodemap.py @@ -77,18 +77,27 @@ can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental") ondisk_docket = revlog._nodemap_docket + data = None # first attemp an incremental update of the data if can_incremental and ondisk_docket is not None: target_docket = revlog._nodemap_docket.copy() - data_changed_count, data = revlog.index.nodemap_data_incremental() - datafile = _rawdata_filepath(revlog, target_docket) - # EXP-TODO: if this is a cache, this should use a cache vfs, not a - # store vfs - with revlog.opener(datafile, b'a') as fd: - fd.write(data) - target_docket.data_length += len(data) - target_docket.data_unused += data_changed_count - else: + ( + src_docket, + data_changed_count, + data, + ) = revlog.index.nodemap_data_incremental() + if src_docket != target_docket: + data = None + else: + datafile = _rawdata_filepath(revlog, target_docket) + # EXP-TODO: if this is a cache, this should use a cache vfs, not a + # store vfs + with revlog.opener(datafile, b'a') as fd: + fd.write(data) + target_docket.data_length += len(data) + target_docket.data_unused += data_changed_count + + if data is None: # otherwise fallback to a full new export target_docket = NodeMapDocket() datafile = _rawdata_filepath(revlog, target_docket) @@ -182,6 +191,20 @@ new.data_unused = self.data_unused return new + def __cmp__(self, other): + if self.uid < other.uid: + return -1 + if self.uid > other.uid: + return 1 + elif self.data_length < other.data_length: + return -1 + elif self.data_length > other.data_length: + return 1 + return 0 + + def __eq__(self, other): + return self.uid == other.uid and self.data_length == other.data_length + def serialize(self): """return serialized bytes for a docket using the passed uid""" data = []