343 help="list tests instead of running them") |
343 help="list tests instead of running them") |
344 harness.add_argument("--loop", action="store_true", |
344 harness.add_argument("--loop", action="store_true", |
345 help="loop tests repeatedly") |
345 help="loop tests repeatedly") |
346 harness.add_argument('--random', action="store_true", |
346 harness.add_argument('--random', action="store_true", |
347 help='run tests in random order') |
347 help='run tests in random order') |
|
348 harness.add_argument('--order-by-runtime', action="store_true", |
|
349 help='run slowest tests first, according to .testtimes') |
348 harness.add_argument("-p", "--port", type=int, |
350 harness.add_argument("-p", "--port", type=int, |
349 help="port on which servers should listen" |
351 help="port on which servers should listen" |
350 " (default: $%s or %d)" % defaults['port']) |
352 " (default: $%s or %d)" % defaults['port']) |
351 harness.add_argument('--profile-runner', action='store_true', |
353 harness.add_argument('--profile-runner', action='store_true', |
352 help='run statprof on run-tests') |
354 help='run statprof on run-tests') |
2305 outcome[tc.name] = tres |
2307 outcome[tc.name] = tres |
2306 jsonout = json.dumps(outcome, sort_keys=True, indent=4, |
2308 jsonout = json.dumps(outcome, sort_keys=True, indent=4, |
2307 separators=(',', ': ')) |
2309 separators=(',', ': ')) |
2308 outf.writelines(("testreport =", jsonout)) |
2310 outf.writelines(("testreport =", jsonout)) |
2309 |
2311 |
2310 def sorttests(testdescs, shuffle=False): |
2312 def sorttests(testdescs, previoustimes, shuffle=False): |
2311 """Do an in-place sort of tests.""" |
2313 """Do an in-place sort of tests.""" |
2312 if shuffle: |
2314 if shuffle: |
2313 random.shuffle(testdescs) |
2315 random.shuffle(testdescs) |
2314 return |
2316 return |
2315 |
2317 |
2316 # keywords for slow tests |
2318 if previoustimes: |
2317 slow = {b'svn': 10, |
2319 def sortkey(f): |
2318 b'cvs': 10, |
2320 f = f['path'] |
2319 b'hghave': 10, |
2321 if f in previoustimes: |
2320 b'largefiles-update': 10, |
2322 # Use most recent time as estimate |
2321 b'run-tests': 10, |
2323 return -previoustimes[f][-1] |
2322 b'corruption': 10, |
2324 else: |
2323 b'race': 10, |
2325 # Default to a rather arbitrary value of 1 second for new tests |
2324 b'i18n': 10, |
2326 return -1.0 |
2325 b'check': 100, |
2327 else: |
2326 b'gendoc': 100, |
2328 # keywords for slow tests |
2327 b'contrib-perf': 200, |
2329 slow = {b'svn': 10, |
2328 } |
2330 b'cvs': 10, |
2329 perf = {} |
2331 b'hghave': 10, |
2330 |
2332 b'largefiles-update': 10, |
2331 def sortkey(f): |
2333 b'run-tests': 10, |
2332 # run largest tests first, as they tend to take the longest |
2334 b'corruption': 10, |
2333 f = f['path'] |
2335 b'race': 10, |
2334 try: |
2336 b'i18n': 10, |
2335 return perf[f] |
2337 b'check': 100, |
2336 except KeyError: |
2338 b'gendoc': 100, |
|
2339 b'contrib-perf': 200, |
|
2340 } |
|
2341 perf = {} |
|
2342 |
|
2343 def sortkey(f): |
|
2344 # run largest tests first, as they tend to take the longest |
|
2345 f = f['path'] |
2337 try: |
2346 try: |
2338 val = -os.stat(f).st_size |
2347 return perf[f] |
2339 except OSError as e: |
2348 except KeyError: |
2340 if e.errno != errno.ENOENT: |
2349 try: |
2341 raise |
2350 val = -os.stat(f).st_size |
2342 perf[f] = -1e9 # file does not exist, tell early |
2351 except OSError as e: |
2343 return -1e9 |
2352 if e.errno != errno.ENOENT: |
2344 for kw, mul in slow.items(): |
2353 raise |
2345 if kw in f: |
2354 perf[f] = -1e9 # file does not exist, tell early |
2346 val *= mul |
2355 return -1e9 |
2347 if f.endswith(b'.py'): |
2356 for kw, mul in slow.items(): |
2348 val /= 10.0 |
2357 if kw in f: |
2349 perf[f] = val / 1000.0 |
2358 val *= mul |
2350 return perf[f] |
2359 if f.endswith(b'.py'): |
|
2360 val /= 10.0 |
|
2361 perf[f] = val / 1000.0 |
|
2362 return perf[f] |
2351 |
2363 |
2352 testdescs.sort(key=sortkey) |
2364 testdescs.sort(key=sortkey) |
2353 |
2365 |
2354 class TestRunner(object): |
2366 class TestRunner(object): |
2355 """Holds context for executing tests. |
2367 """Holds context for executing tests. |
2416 |
2428 |
2417 finally: |
2429 finally: |
2418 os.umask(oldmask) |
2430 os.umask(oldmask) |
2419 |
2431 |
2420 def _run(self, testdescs): |
2432 def _run(self, testdescs): |
2421 sorttests(testdescs, shuffle=self.options.random) |
|
2422 |
|
2423 self._testdir = osenvironb[b'TESTDIR'] = getattr( |
2433 self._testdir = osenvironb[b'TESTDIR'] = getattr( |
2424 os, 'getcwdb', os.getcwd)() |
2434 os, 'getcwdb', os.getcwd)() |
2425 # assume all tests in same folder for now |
2435 # assume all tests in same folder for now |
2426 if testdescs: |
2436 if testdescs: |
2427 pathname = os.path.dirname(testdescs[0]['path']) |
2437 pathname = os.path.dirname(testdescs[0]['path']) |
2432 self._outputdir = canonpath(_bytespath(self.options.outputdir)) |
2442 self._outputdir = canonpath(_bytespath(self.options.outputdir)) |
2433 else: |
2443 else: |
2434 self._outputdir = self._testdir |
2444 self._outputdir = self._testdir |
2435 if testdescs and pathname: |
2445 if testdescs and pathname: |
2436 self._outputdir = os.path.join(self._outputdir, pathname) |
2446 self._outputdir = os.path.join(self._outputdir, pathname) |
|
2447 previoustimes = {} |
|
2448 if self.options.order_by_runtime: |
|
2449 previoustimes = dict(loadtimes(self._outputdir)) |
|
2450 sorttests(testdescs, previoustimes, shuffle=self.options.random) |
2437 |
2451 |
2438 if 'PYTHONHASHSEED' not in os.environ: |
2452 if 'PYTHONHASHSEED' not in os.environ: |
2439 # use a random python hash seed all the time |
2453 # use a random python hash seed all the time |
2440 # we do the randomness ourself to know what seed is used |
2454 # we do the randomness ourself to know what seed is used |
2441 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32)) |
2455 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32)) |