Mercurial > jhg
comparison cmdline/org/tmatesoft/hg/console/Incoming.java @ 74:6f1b88693d48
Complete refactoring to org.tmatesoft
| author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
|---|---|
| date | Mon, 24 Jan 2011 03:14:45 +0100 |
| parents | src/com/tmate/hgkit/console/Incoming.java@565ce0835674 |
| children | ee2c750b036d |
comparison
equal
deleted
inserted
replaced
| 73:0d279bcc4442 | 74:6f1b88693d48 |
|---|---|
| 1 /* | |
| 2 * Copyright (c) 2011 TMate Software Ltd | |
| 3 * | |
| 4 * This program is free software; you can redistribute it and/or modify | |
| 5 * it under the terms of the GNU General Public License as published by | |
| 6 * the Free Software Foundation; version 2 of the License. | |
| 7 * | |
| 8 * This program is distributed in the hope that it will be useful, | |
| 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
| 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
| 11 * GNU General Public License for more details. | |
| 12 * | |
| 13 * For information on how to redistribute this software under | |
| 14 * the terms of a license other than GNU General Public License | |
| 15 * contact TMate Software at support@svnkit.com | |
| 16 */ | |
| 17 package org.tmatesoft.hg.console; | |
| 18 | |
| 19 import java.util.Collection; | |
| 20 import java.util.HashSet; | |
| 21 import java.util.LinkedHashSet; | |
| 22 import java.util.LinkedList; | |
| 23 import java.util.List; | |
| 24 | |
| 25 import org.tmatesoft.hg.core.Nodeid; | |
| 26 import org.tmatesoft.hg.repo.Changelog; | |
| 27 import org.tmatesoft.hg.repo.HgRepository; | |
| 28 | |
| 29 | |
| 30 /** | |
| 31 * WORK IN PROGRESS, DO NOT USE | |
| 32 * hg in counterpart | |
| 33 * | |
| 34 * @author Artem Tikhomirov | |
| 35 * @author TMate Software Ltd. | |
| 36 */ | |
| 37 public class Incoming { | |
| 38 | |
| 39 public static void main(String[] args) throws Exception { | |
| 40 Options cmdLineOpts = Options.parse(args); | |
| 41 HgRepository hgRepo = cmdLineOpts.findRepository(); | |
| 42 if (hgRepo.isInvalid()) { | |
| 43 System.err.printf("Can't find repository in: %s\n", hgRepo.getLocation()); | |
| 44 return; | |
| 45 } | |
| 46 // in fact, all we need from changelog is set of all nodeids. However, since ParentWalker reuses same Nodeids, it's not too expensive | |
| 47 // to reuse it here, XXX although later this may need to be refactored | |
| 48 final Changelog.ParentWalker pw = hgRepo.getChangelog().new ParentWalker(); | |
| 49 pw.init(); | |
| 50 // | |
| 51 HashSet<Nodeid> base = new HashSet<Nodeid>(); | |
| 52 HashSet<Nodeid> unknownRemoteHeads = new HashSet<Nodeid>(); | |
| 53 // imagine empty repository - any nodeid from remote heads would be unknown | |
| 54 unknownRemoteHeads.add(Nodeid.fromAscii("382cfe9463db0484a14136e4b38407419525f0c0".getBytes(), 0, 40)); | |
| 55 // | |
| 56 LinkedList<RemoteBranch> remoteBranches = new LinkedList<RemoteBranch>(); | |
| 57 remoteBranches(unknownRemoteHeads, remoteBranches); | |
| 58 // | |
| 59 HashSet<Nodeid> visited = new HashSet<Nodeid>(); | |
| 60 HashSet<RemoteBranch> processed = new HashSet<RemoteBranch>(); | |
| 61 LinkedList<Nodeid[]> toScan = new LinkedList<Nodeid[]>(); | |
| 62 LinkedHashSet<Nodeid> toFetch = new LinkedHashSet<Nodeid>(); | |
| 63 // next one seems to track heads we've asked (or plan to ask) remote.branches for | |
| 64 HashSet<Nodeid> unknownHeads /*req*/ = new HashSet<Nodeid>(unknownRemoteHeads); | |
| 65 while (!remoteBranches.isEmpty()) { | |
| 66 LinkedList<Nodeid> toQueryRemote = new LinkedList<Nodeid>(); | |
| 67 while (!remoteBranches.isEmpty()) { | |
| 68 RemoteBranch next = remoteBranches.removeFirst(); | |
| 69 if (visited.contains(next.head) || processed.contains(next)) { | |
| 70 continue; | |
| 71 } | |
| 72 if (Nodeid.NULL.equals(next.head)) { | |
| 73 // it's discovery.py that expects next.head to be nullid here, I can't imagine how this may happen, hence this exception | |
| 74 throw new IllegalStateException("I wonder if null if may ever get here with remote branches"); | |
| 75 } else if (pw.knownNode(next.root)) { | |
| 76 // root of the remote change is known locally, analyze to find exact missing changesets | |
| 77 toScan.addLast(new Nodeid[] { next.head, next.root }); | |
| 78 processed.add(next); | |
| 79 } else { | |
| 80 if (!visited.contains(next.root) && !toFetch.contains(next.root)) { | |
| 81 // if parents are locally known, this is new branch (sequence of changes) (sequence sprang out of known parents) | |
| 82 if ((next.p1 == null || pw.knownNode(next.p1)) && (next.p2 == null || pw.knownNode(next.p2))) { | |
| 83 toFetch.add(next.root); | |
| 84 } | |
| 85 // XXX perhaps, may combine this parent processing below (I don't understand what this code is exactly about) | |
| 86 if (pw.knownNode(next.p1)) { | |
| 87 base.add(next.p1); | |
| 88 } | |
| 89 if (pw.knownNode(next.p2)) { | |
| 90 base.add(next.p2); | |
| 91 } | |
| 92 } | |
| 93 if (next.p1 != null && !pw.knownNode(next.p1) && !unknownHeads.contains(next.p1)) { | |
| 94 toQueryRemote.add(next.p1); | |
| 95 unknownHeads.add(next.p1); | |
| 96 } | |
| 97 if (next.p2 != null && !pw.knownNode(next.p2) && !unknownHeads.contains(next.p2)) { | |
| 98 toQueryRemote.add(next.p2); | |
| 99 unknownHeads.add(next.p2); | |
| 100 } | |
| 101 } | |
| 102 visited.add(next.head); | |
| 103 } | |
| 104 if (!toQueryRemote.isEmpty()) { | |
| 105 // discovery.py in fact does this in batches of 10 revisions a time. | |
| 106 // however, this slicing may be done in remoteBranches call instead (if needed) | |
| 107 remoteBranches(toQueryRemote, remoteBranches); | |
| 108 } | |
| 109 } | |
| 110 while (!toScan.isEmpty()) { | |
| 111 Nodeid[] head_root = toScan.removeFirst(); | |
| 112 List<Nodeid> nodesBetween = remoteBetween(head_root[0], head_root[1], new LinkedList<Nodeid>()); | |
| 113 nodesBetween.add(head_root[1]); | |
| 114 int x = 1; | |
| 115 Nodeid p = head_root[0]; | |
| 116 for (Nodeid i : nodesBetween) { | |
| 117 System.out.println("narrowing " + x + ":" + nodesBetween.size() + " " + i.shortNotation()); | |
| 118 if (pw.knownNode(i)) { | |
| 119 if (x <= 2) { | |
| 120 toFetch.add(p); | |
| 121 base.add(i); | |
| 122 } else { | |
| 123 // XXX original discovery.py collects new elements to scan separately | |
| 124 // likely to "batch" calls to server | |
| 125 System.out.println("narrowed branch search to " + p.shortNotation() + ":" + i.shortNotation()); | |
| 126 toScan.addLast(new Nodeid[] { p, i }); | |
| 127 } | |
| 128 break; | |
| 129 } | |
| 130 x = x << 1; | |
| 131 p = i; | |
| 132 } | |
| 133 } | |
| 134 for (Nodeid n : toFetch) { | |
| 135 if (pw.knownNode(n)) { | |
| 136 System.out.println("Erroneous to fetch:" + n); | |
| 137 } else { | |
| 138 System.out.println(n); | |
| 139 } | |
| 140 } | |
| 141 | |
| 142 } | |
| 143 | |
| 144 static final class RemoteBranch { | |
| 145 public Nodeid head, root, p1, p2; | |
| 146 | |
| 147 @Override | |
| 148 public boolean equals(Object obj) { | |
| 149 if (this == obj) { | |
| 150 return true; | |
| 151 } | |
| 152 if (false == obj instanceof RemoteBranch) { | |
| 153 return false; | |
| 154 } | |
| 155 RemoteBranch o = (RemoteBranch) obj; | |
| 156 return head.equals(o.head) && root.equals(o.root) && (p1 == null && o.p1 == null || p1.equals(o.p1)) && (p2 == null && o.p2 == null || p2.equals(o.p2)); | |
| 157 } | |
| 158 } | |
| 159 | |
| 160 private static void remoteBranches(Collection<Nodeid> unknownRemoteHeads, List<RemoteBranch> remoteBranches) { | |
| 161 // discovery.findcommonincoming: | |
| 162 // unknown = remote.branches(remote.heads); | |
| 163 // sent: cmd=branches&roots=d6d2a630f4a6d670c90a5ca909150f2b426ec88f+ | |
| 164 // received: d6d2a630f4a6d670c90a5ca909150f2b426ec88f dbd663faec1f0175619cf7668bddc6350548b8d6 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 | |
| 165 // head, root, first parent, second parent | |
| 166 // | |
| 167 // TODO implement this with remote access | |
| 168 // | |
| 169 RemoteBranch rb = new RemoteBranch(); | |
| 170 rb.head = unknownRemoteHeads.iterator().next(); | |
| 171 rb.root = Nodeid.fromAscii("dbd663faec1f0175619cf7668bddc6350548b8d6".getBytes(), 0, 40); | |
| 172 remoteBranches.add(rb); | |
| 173 } | |
| 174 | |
| 175 private static List<Nodeid> remoteBetween(Nodeid nodeid1, Nodeid nodeid2, List<Nodeid> list) { | |
| 176 // sent: cmd=between&pairs=d6d2a630f4a6d670c90a5ca909150f2b426ec88f-dbd663faec1f0175619cf7668bddc6350548b8d6 | |
| 177 // received: a78c980749e3ccebb47138b547e9b644a22797a9 286d221f6c28cbfce25ea314e1f46a23b7f979d3 fc265ddeab262ff5c34b4cf4e2522d8d41f1f05b a3576694a4d1edaa681cab15b89d6b556b02aff4 | |
| 178 // 1st, 2nd, fourth and eights of total 8 changes between rev9 and rev0 | |
| 179 // | |
| 180 // | |
| 181 // a78c980749e3ccebb47138b547e9b644a22797a9 286d221f6c28cbfce25ea314e1f46a23b7f979d3 fc265ddeab262ff5c34b4cf4e2522d8d41f1f05b a3576694a4d1edaa681cab15b89d6b556b02aff4 | |
| 182 //d6d2a630f4a6d670c90a5ca909150f2b426ec88f a78c980749e3ccebb47138b547e9b644a22797a9 5abe5af181bd6a6d3e94c378376c901f0f80da50 08db726a0fb7914ac9d27ba26dc8bbf6385a0554 | |
| 183 | |
| 184 // TODO implement with remote access | |
| 185 String response = null; | |
| 186 if (nodeid1.equals(Nodeid.fromAscii("382cfe9463db0484a14136e4b38407419525f0c0".getBytes(), 0, 40)) && nodeid2.equals(Nodeid.fromAscii("dbd663faec1f0175619cf7668bddc6350548b8d6".getBytes(), 0, 40))) { | |
| 187 response = "d6d2a630f4a6d670c90a5ca909150f2b426ec88f a78c980749e3ccebb47138b547e9b644a22797a9 5abe5af181bd6a6d3e94c378376c901f0f80da50 08db726a0fb7914ac9d27ba26dc8bbf6385a0554"; | |
| 188 } else if (nodeid1.equals(Nodeid.fromAscii("a78c980749e3ccebb47138b547e9b644a22797a9".getBytes(), 0, 40)) && nodeid2.equals(Nodeid.fromAscii("5abe5af181bd6a6d3e94c378376c901f0f80da50".getBytes(), 0, 40))) { | |
| 189 response = "286d221f6c28cbfce25ea314e1f46a23b7f979d3"; | |
| 190 } | |
| 191 if (response == null) { | |
| 192 throw HgRepository.notImplemented(); | |
| 193 } | |
| 194 for (String s : response.split(" ")) { | |
| 195 list.add(Nodeid.fromAscii(s.getBytes(), 0, 40)); | |
| 196 } | |
| 197 return list; | |
| 198 } | |
| 199 | |
| 200 } |
