hgext/infinitepush/sqlindexapi.py
author Georges Racinet <georges.racinet@octobus.net>
Wed, 16 Jan 2019 16:19:26 +0100
changeset 41764 37ead13fb3d4
parent 37234 c1fac3878196
child 43076 2372284d9457
permissions -rw-r--r--
rust-cpython: using rustext.dagop.headrevs in revlog As with the previous oxidation series, revlog plays the role of the factory, either using its parents function, or passing the index. We include below results of revsetbenchmarks.py taken on the PyPy repository on those of contrib/all-revsets.tx that involve `heads()`. In most of the cases, this seems to be either neutral or an improvement. In the cases where it's actually a bit slower, we suspect that differences in `heads()` performance is actually burried in variance on the incoming revset (probably several orders of magnitude slower). The precheck for filtered revisions of parent changeset has a significative performance benefit, too. Result by revset ================ Revision: 0) 0c7b353ce100; rust-cpython: binding for headrevs() 1) Parent of this changeset; changelog: prefilter in headrevs() 2) This changeset revset #0: heads(commonancestors(last(head(), 2))) plain min max first last reverse rev..rst rev..ast sort sor..rst sor..ast 0) 0.001379 0.001361 0.001381 0.001410 0.001393 0.001372 0.001414 0.001387 0.001411 0.001429 0.001415 1) 0.001351 0.001373 0.001383 0.001392 0.001401 0.001385 0.001405 0.001406 0.001385 0.001424 0.001399 2) 0.001365 0.001362 0.001375 0.001393 0.001370 0.001365 0.001413 0.001386 0.001377 0.001415 0.001411 revset #1: heads(commonancestors(head())) plain min max first last reverse rev..rst rev..ast sort sor..rst sor..ast 0) 0.047578 0.048578 0.047764 0.048065 0.047289 0.047305 0.047729 0.047370 0.047611 0.048005 0.047755 1) 0.048072 0.047471 0.048351 0.048193 0.048380 0.047968 0.047683 0.047355 0.048587 0.047044 0.048299 2) 0.047124 0.046699 0.046896 0.047250 0.046920 0.047379 0.046855 0.047753 0.047289 0.047219 0.046991 revset #2: heads(all()) plain min max first last reverse rev..rst rev..ast sort sor..rst sor..ast 0) 0.037654 0.037814 0.037149 0.037457 0.037609 0.037053 0.036825 0.037054 0.037739 0.036816 0.037604 1) 0.021845 58% 0.022172 58% 0.022148 59% 0.022059 58% 0.022261 59% 0.022246 60% 0.021691 58% 0.021967 59% 0.022156 58% 0.021820 59% 0.023141 61% 2) 0.014459 66% 0.014470 65% 0.014420 65% 0.014413 65% 0.014421 64% 0.014492 65% 0.014512 66% 0.014579 66% 0.014500 65% 0.014501 66% 0.014537 62% revset #3: heads(-10000:-1) plain min max first last reverse rev..rst rev..ast sort sor..rst sor..ast 0) 0.003696 0.003681 0.003719 0.003746 0.003725 0.003750 0.003692 0.003747 0.003712 0.003754 0.003763 1) 0.002131 57% 0.002142 58% 0.002147 57% 0.002203 58% 0.002143 57% 0.002208 58% 0.002158 58% 0.002182 58% 0.002169 58% 0.002209 58% 0.002201 58% 2) 0.001490 69% 0.001524 71% 0.001515 70% 0.001528 69% 0.001531 71% 0.001520 68% 0.001549 71% 0.001542 70% 0.001560 71% 0.001559 70% 0.001544 70% revset #4: (-5000:-1000) and heads(-10000:-1) plain min max first last reverse rev..rst rev..ast sort sor..rst sor..ast 0) 0.003832 0.003816 0.003747 0.003814 0.003749 0.003894 0.003784 0.003796 0.003915 0.003829 0.003795 1) 0.002282 59% 0.002208 57% 0.002220 59% 0.002240 58% 0.002210 58% 0.002276 58% 0.002250 59% 0.002250 59% 0.002311 59% 0.002230 58% 0.002241 59% 2) 0.001658 72% 0.001662 75% 0.001568 70% 0.001599 71% 0.001588 71% 0.001696 74% 0.001615 71% 0.001593 70% 0.001710 73% 0.001622 72% 0.001616 72% revset #5: heads(matching(tip, "author")) plain min max first last reverse rev..rst rev..ast sort sor..rst sor..ast 0) 7.826449 7.563260 7.581034 7.688493 7.634001 7.777860 7.768228 8.026097 7.767422 7.565254 7.938643 1) 7.750766 7.562555 7.660426 7.574089 7.492220 7.438582 7.562015 7.530635 93% 7.636343 7.636712 7.645113 2) 7.617941 7.519601 7.584922 7.507653 7.547440 7.524436 7.575291 7.883991 7.792142 7.709622 7.868595 revset #6: heads(matching(tip, "author")) and -10000:-1 plain min max first last reverse rev..rst rev..ast sort sor..rst sor..ast 0) 7.744489 7.728684 7.734065 7.928513 7.875949 7.883727 7.815492 7.791335 7.784793 7.761218 7.815731 1) 7.808956 7.480446 7.618759 7.920270 7.676343 7.803613 7.770210 7.713100 7.584420 7.767335 7.825140 2) 7.519987 7.938748 106% 7.805328 7.694162 7.750129 7.714229 7.603825 7.580734 7.555291 7.524207 7.504580 revset #7: (-10000:-1) and heads(matching(tip, "author")) plain min max first last reverse rev..rst rev..ast sort sor..rst sor..ast 0) 7.909321 7.694357 7.666021 7.538686 7.771821 7.876217 7.852103 7.812727 7.545919 7.788860 7.764585 1) 7.749232 7.683715 7.968393 7.895257 7.764160 8.314884 105% 7.921697 7.882613 7.867209 7.684707 7.544501 2) 7.824903 7.784605 7.727846 7.566613 7.581994 7.539205 90% 7.555316 7.535572 7.581786 7.901795 7.662832

# Infinite push
#
# Copyright 2016 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from __future__ import absolute_import

import logging
import os
import time

import warnings
import mysql.connector

from . import indexapi

def _convertbookmarkpattern(pattern):
    pattern = pattern.replace('_', '\\_')
    pattern = pattern.replace('%', '\\%')
    if pattern.endswith('*'):
        pattern = pattern[:-1] + '%'
    return pattern

class sqlindexapi(indexapi.indexapi):
    '''
    Sql backend for infinitepush index. See schema.sql
    '''

    def __init__(self, reponame, host, port,
                 database, user, password, logfile, loglevel,
                 waittimeout=300, locktimeout=120):
        super(sqlindexapi, self).__init__()
        self.reponame = reponame
        self.sqlargs = {
            'host': host,
            'port': port,
            'database': database,
            'user': user,
            'password': password,
        }
        self.sqlconn = None
        self.sqlcursor = None
        if not logfile:
            logfile = os.devnull
        logging.basicConfig(filename=logfile)
        self.log = logging.getLogger()
        self.log.setLevel(loglevel)
        self._connected = False
        self._waittimeout = waittimeout
        self._locktimeout = locktimeout

    def sqlconnect(self):
        if self.sqlconn:
            raise indexapi.indexexception("SQL connection already open")
        if self.sqlcursor:
            raise indexapi.indexexception("SQL cursor already open without"
                                          " connection")
        retry = 3
        while True:
            try:
                self.sqlconn = mysql.connector.connect(**self.sqlargs)

                # Code is copy-pasted from hgsql. Bug fixes need to be
                # back-ported!
                # The default behavior is to return byte arrays, when we
                # need strings. This custom convert returns strings.
                self.sqlconn.set_converter_class(CustomConverter)
                self.sqlconn.autocommit = False
                break
            except mysql.connector.errors.Error:
                # mysql can be flakey occasionally, so do some minimal
                # retrying.
                retry -= 1
                if retry == 0:
                    raise
                time.sleep(0.2)

        waittimeout = self.sqlconn.converter.escape('%s' % self._waittimeout)

        self.sqlcursor = self.sqlconn.cursor()
        self.sqlcursor.execute("SET wait_timeout=%s" % waittimeout)
        self.sqlcursor.execute("SET innodb_lock_wait_timeout=%s" %
                               self._locktimeout)
        self._connected = True

    def close(self):
        """Cleans up the metadata store connection."""
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            self.sqlcursor.close()
            self.sqlconn.close()
        self.sqlcursor = None
        self.sqlconn = None

    def __enter__(self):
        if not self._connected:
            self.sqlconnect()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        if exc_type is None:
            self.sqlconn.commit()
        else:
            self.sqlconn.rollback()

    def addbundle(self, bundleid, nodesctx):
        if not self._connected:
            self.sqlconnect()
        self.log.info("ADD BUNDLE %r %r" % (self.reponame, bundleid))
        self.sqlcursor.execute(
            "INSERT INTO bundles(bundle, reponame) VALUES "
            "(%s, %s)", params=(bundleid, self.reponame))
        for ctx in nodesctx:
            self.sqlcursor.execute(
                "INSERT INTO nodestobundle(node, bundle, reponame) "
                "VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE "
                "bundle=VALUES(bundle)",
                params=(ctx.hex(), bundleid, self.reponame))

            extra = ctx.extra()
            author_name = ctx.user()
            committer_name = extra.get('committer', ctx.user())
            author_date = int(ctx.date()[0])
            committer_date = int(extra.get('committer_date', author_date))
            self.sqlcursor.execute(
                "INSERT IGNORE INTO nodesmetadata(node, message, p1, p2, "
                "author, committer, author_date, committer_date, "
                "reponame) VALUES "
                "(%s, %s, %s, %s, %s, %s, %s, %s, %s)",
                params=(ctx.hex(), ctx.description(),
                        ctx.p1().hex(), ctx.p2().hex(), author_name,
                        committer_name, author_date, committer_date,
                        self.reponame)
            )

    def addbookmark(self, bookmark, node):
        """Takes a bookmark name and hash, and records mapping in the metadata
        store."""
        if not self._connected:
            self.sqlconnect()
        self.log.info(
            "ADD BOOKMARKS %r bookmark: %r node: %r" %
            (self.reponame, bookmark, node))
        self.sqlcursor.execute(
            "INSERT INTO bookmarkstonode(bookmark, node, reponame) "
            "VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE node=VALUES(node)",
            params=(bookmark, node, self.reponame))

    def addmanybookmarks(self, bookmarks):
        if not self._connected:
            self.sqlconnect()
        args = []
        values = []
        for bookmark, node in bookmarks.iteritems():
            args.append('(%s, %s, %s)')
            values.extend((bookmark, node, self.reponame))
        args = ','.join(args)

        self.sqlcursor.execute(
            "INSERT INTO bookmarkstonode(bookmark, node, reponame) "
            "VALUES %s ON DUPLICATE KEY UPDATE node=VALUES(node)" % args,
            params=values)

    def deletebookmarks(self, patterns):
        """Accepts list of bookmark patterns and deletes them.
        If `commit` is set then bookmark will actually be deleted. Otherwise
        deletion will be delayed until the end of transaction.
        """
        if not self._connected:
            self.sqlconnect()
        self.log.info("DELETE BOOKMARKS: %s" % patterns)
        for pattern in patterns:
            pattern = _convertbookmarkpattern(pattern)
            self.sqlcursor.execute(
                "DELETE from bookmarkstonode WHERE bookmark LIKE (%s) "
                "and reponame = %s",
                params=(pattern, self.reponame))

    def getbundle(self, node):
        """Returns the bundleid for the bundle that contains the given node."""
        if not self._connected:
            self.sqlconnect()
        self.log.info("GET BUNDLE %r %r" % (self.reponame, node))
        self.sqlcursor.execute(
            "SELECT bundle from nodestobundle "
            "WHERE node = %s AND reponame = %s", params=(node, self.reponame))
        result = self.sqlcursor.fetchall()
        if len(result) != 1 or len(result[0]) != 1:
            self.log.info("No matching node")
            return None
        bundle = result[0][0]
        self.log.info("Found bundle %r" % bundle)
        return bundle

    def getnode(self, bookmark):
        """Returns the node for the given bookmark. None if it doesn't exist."""
        if not self._connected:
            self.sqlconnect()
        self.log.info(
            "GET NODE reponame: %r bookmark: %r" % (self.reponame, bookmark))
        self.sqlcursor.execute(
            "SELECT node from bookmarkstonode WHERE "
            "bookmark = %s AND reponame = %s", params=(bookmark, self.reponame))
        result = self.sqlcursor.fetchall()
        if len(result) != 1 or len(result[0]) != 1:
            self.log.info("No matching bookmark")
            return None
        node = result[0][0]
        self.log.info("Found node %r" % node)
        return node

    def getbookmarks(self, query):
        if not self._connected:
            self.sqlconnect()
        self.log.info(
            "QUERY BOOKMARKS reponame: %r query: %r" % (self.reponame, query))
        query = _convertbookmarkpattern(query)
        self.sqlcursor.execute(
            "SELECT bookmark, node from bookmarkstonode WHERE "
            "reponame = %s AND bookmark LIKE %s",
            params=(self.reponame, query))
        result = self.sqlcursor.fetchall()
        bookmarks = {}
        for row in result:
            if len(row) != 2:
                self.log.info("Bad row returned: %s" % row)
                continue
            bookmarks[row[0]] = row[1]
        return bookmarks

    def saveoptionaljsonmetadata(self, node, jsonmetadata):
        if not self._connected:
            self.sqlconnect()
        self.log.info(
            ("INSERT METADATA, QUERY BOOKMARKS reponame: %r " +
             "node: %r, jsonmetadata: %s") %
            (self.reponame, node, jsonmetadata))

        self.sqlcursor.execute(
            "UPDATE nodesmetadata SET optional_json_metadata=%s WHERE "
            "reponame=%s AND node=%s",
            params=(jsonmetadata, self.reponame, node))

class CustomConverter(mysql.connector.conversion.MySQLConverter):
    """Ensure that all values being returned are returned as python string
    (versus the default byte arrays)."""
    def _STRING_to_python(self, value, dsc=None):
        return str(value)

    def _VAR_STRING_to_python(self, value, dsc=None):
        return str(value)

    def _BLOB_to_python(self, value, dsc=None):
        return str(value)