Skip to content
Snippets Groups Projects

mkpkglists improvements

Merged Kevin Morris requested to merge kevr/aurweb:teapot into master
All threads resolved!
Compare and Show latest version
3 files
+ 243
74
Compare changes
  • Side-by-side
  • Inline
Files
3
+ 241
74
#!/usr/bin/env python3
"""
Produces package, package base and user archives for the AUR
database.
Archives:
packages.gz | A line-separated list of package names
packages-meta-v1.json | A type=search RPC-formatted JSON dataset
packages-meta-ext-v1.json | An --extended or --all archive
pkgbase.gz | A line-separated list of package base names
users.gz | A line-separated list of user names
This script takes an optional argument: --extended or --all. Based
on the following, right-hand side fields are added to each item.
--extended | License, Keywords, Groups
--all | License, Keywords, Groups, relations and dependencies
"""
import datetime
import gzip
import json
import os
import sys
from collections import defaultdict
from decimal import Decimal
from typing import Tuple
import orjson
import aurweb.config
import aurweb.db
def state_path(archive: str) -> str:
# A hard-coded /tmp state directory.
# TODO: Use Redis cache to store this state after we merge
# FastAPI into master and removed PHP from the tree.
return os.path.join("/tmp", os.path.basename(archive) + ".state")
packagesfile = aurweb.config.get('mkpkglists', 'packagesfile')
packagesmetafile = aurweb.config.get('mkpkglists', 'packagesmetafile')
packagesmetaextfile = aurweb.config.get('mkpkglists', 'packagesmetaextfile')
packages_state = state_path(packagesfile)
pkgbasefile = aurweb.config.get('mkpkglists', 'pkgbasefile')
pkgbases_state = state_path(pkgbasefile)
userfile = aurweb.config.get('mkpkglists', 'userfile')
users_state = state_path(userfile)
def should_update(tablename: str) -> int:
def should_update(state: str, tablename: str) -> Tuple[bool, int]:
if aurweb.config.get("database", "backend") != "mysql":
return False
conn = aurweb.db.Connection()
return (False, 0)
db_name = aurweb.config.get("database", "name")
conn = aurweb.db.Connection()
cur = conn.execute("SELECT auto_increment FROM information_schema.tables "
"WHERE table_schema = ? AND table_name = ?",
(db_name, tablename,))
update_time = cur.fetchone()[0]
cached_update_time = 0
if os.path.exists(f"/tmp/{tablename}.update-time.cache"):
with open(f"/tmp/{tablename}.update-time.cache") as f:
cached_update_time = int(f.read().strip())
saved_update_time = 0
if os.path.exists(state):
with open(state) as f:
saved_update_time = int(f.read().strip())
return (cached_update_time == update_time, update_time)
return (saved_update_time == update_time, update_time)
def update_cache(tablename: str, update_time: int) -> None:
with open(f"/tmp/{tablename}.update-time.cache", "w") as f:
def update_state(state: str, update_time: int) -> None:
with open(state, "w") as f:
f.write(str(update_time))
TYPE_MAP = {
"depends": "Depends",
"makedepends": "MakeDepends",
"checkdepends": "CheckDepends",
"optdepends": "OptDepends",
"conflicts": "Conflicts",
"provides": "Provides",
"replaces": "Replaces",
}
def get_extended_dict(query: str):
"""
Produce data in the form in a single bulk SQL query:
{
<integer_package_id>: {
"Depends": [...],
"Conflicts": [...],
"License": [...]
}
}
The caller can then use this data to populate a dataset of packages.
output = produce_base_output_data()
data = get_extended_dict(query)
for i in range(len(output)):
package_id = output[i].get("ID")
output[i].update(data.get(package_id))
"""
conn = aurweb.db.Connection()
cursor = conn.execute(query)
data = defaultdict(lambda: defaultdict(list))
for result in cursor.fetchall():
pkgid = result[0]
key = TYPE_MAP.get(result[1])
output = result[2]
if result[3]:
output += result[3]
# In all cases, we have at least an empty License list.
if "License" not in data[pkgid]:
data[pkgid]["License"] = []
# In all cases, we have at least an empty Keywords list.
if "Keywords" not in data[pkgid]:
data[pkgid]["Keywords"] = []
if "Groups" not in data[pkgid]:
data[pkgid]["Groups"] = []
data[pkgid][key].append(output)
conn.close()
return data
def get_limited_extended_fields():
# Returns: [ID, Type, Name, Cond]
query = """
SELECT PackageGroups.PackageID AS ID, 'Groups' AS Type,
Groups.Name, '' AS Cond
FROM Groups INNER JOIN PackageGroups
ON PackageGroups.GroupID = Groups.ID
UNION SELECT PackageLicenses.PackageID AS ID, 'License' AS Type,
Licenses.Name, '' as Cond
FROM Licenses INNER JOIN PackageLicenses
ON PackageLicenses.LicenseID = Licenses.ID
UNION SELECT Packages.ID AS ID, 'Keywords' AS Type,
PackageKeywords.Keyword AS Name, '' as Cond
FROM PackageKeywords
INNER JOIN Packages ON Packages.PackageBaseID = PackageKeywords.PackageBaseID
"""
return get_extended_dict(query)
def get_extended_fields():
# Returns: [ID, Type, Name, Cond]
query = """
SELECT PackageDepends.PackageID AS ID, DependencyTypes.Name AS Type,
PackageDepends.DepName AS Name, PackageDepends.DepCondition AS Cond
FROM PackageDepends
LEFT JOIN DependencyTypes
ON DependencyTypes.ID = PackageDepends.DepTypeID
UNION SELECT PackageRelations.PackageID AS ID, RelationTypes.Name AS Type,
PackageRelations.RelName AS Name,
PackageRelations.RelCondition AS Cond
FROM PackageRelations
LEFT JOIN RelationTypes
ON RelationTypes.ID = PackageRelations.RelTypeID
UNION SELECT PackageGroups.PackageID AS ID, 'Groups' AS Type,
Groups.Name, '' AS Cond
FROM Groups
INNER JOIN PackageGroups ON PackageGroups.GroupID = Groups.ID
UNION SELECT PackageLicenses.PackageID AS ID, 'License' AS Type,
Licenses.Name, '' as Cond
FROM Licenses
INNER JOIN PackageLicenses ON PackageLicenses.LicenseID = Licenses.ID
UNION SELECT Packages.ID AS ID, 'Keywords' AS Type,
PackageKeywords.Keyword AS Name, '' as Cond
FROM PackageKeywords
INNER JOIN Packages ON Packages.PackageBaseID = PackageKeywords.PackageBaseID
"""
return get_extended_dict(query)
EXTENDED_FIELD_HANDLERS = {
"--extended": get_limited_extended_fields,
"--all": get_extended_fields
}
def is_decimal(column):
""" Check if an SQL column is of decimal.Decimal type. """
if isinstance(column, Decimal):
return float(column)
return column
def write_archive(archive: str, output: list):
with gzip.open(archive, "wb") as f:
f.write(b"[\n")
for i, item in enumerate(output):
f.write(orjson.dumps(item))
if i < len(output) - 1:
f.write(b",")
f.write(b"\n")
f.write(b"]")
def main():
conn = aurweb.db.Connection()
@@ -47,94 +221,87 @@ def main():
pkgbaselist_header = "# AUR package base list, generated on " + datestr
userlist_header = "# AUR user name list, generated on " + datestr
updated, update_time = should_update("Packages")
updated, update_time = should_update(packages_state, "Packages")
if not updated:
print("Updating Packages...")
columns = ("Packages.ID, PackageBaseID, Packages.Name, "
"Version, Description, URL")
# Query columns; copied from RPC.
columns = ("Packages.ID, Packages.Name, "
"PackageBases.ID AS PackageBaseID, "
"PackageBases.Name AS PackageBase, "
"Version, Description, URL, NumVotes, "
"Popularity, OutOfDateTS AS OutOfDate, "
"Users.UserName AS Maintainer, "
"SubmittedTS AS FirstSubmitted, "
"ModifiedTS AS LastModified")
# Perform query.
cur = conn.execute(f"SELECT {columns} FROM Packages "
"INNER JOIN PackageBases "
"LEFT JOIN PackageBases "
"ON PackageBases.ID = Packages.PackageBaseID "
"LEFT JOIN Users "
"ON PackageBases.MaintainerUID = Users.ID "
"WHERE PackageBases.PackagerUID IS NOT NULL")
results = cur.fetchall()
with gzip.open(packagesfile, "w") as f:
# Produce packages-meta-v1.json.gz
output = list()
snapshot_uri = aurweb.config.get("options", "snapshot_uri")
for result in cur.fetchall():
item = {
column[0]: is_decimal(result[i])
for i, column in enumerate(cur.description)
}
item["URLPath"] = snapshot_uri % item.get("Name")
output.append(item)
write_archive(packagesmetafile, output)
# Produce packages-meta-ext-v1.json.gz
if len(sys.argv) > 1 and sys.argv[1] in EXTENDED_FIELD_HANDLERS:
f = EXTENDED_FIELD_HANDLERS.get(sys.argv[1])
data = f()
default_ = {"Groups": [], "License": [], "Keywords": []}
for i in range(len(output)):
data_ = data.get(output[i].get("ID"), default_)
output[i].update(data_)
write_archive(packagesmetaextfile, output)
# Produce packages.gz
with gzip.open(packagesfile, "wb") as f:
f.write(bytes(pkglist_header + "\n", "UTF-8"))
f.writelines([bytes(x[2] + "\n", "UTF-8") for x in results])
with gzip.open(packagesmetafile, "wt") as f:
""" The output "data" json key points to a list of dictionaries,
each representing a single result, filled with column names as
keys and column values as values.
The output "mapping" json key points to a dictionary of Package
name key -> "data"-list index pairs. This provides users of
the meta archive a way to perform O(1) searches based on a
package name, while still providing a sequential list for
loopability.
i = json_data["mapping"]["package_name"]
package_data = json_data["data"][i]
name = package_data.get("Name")
version = package_data.get("Version")
Example:
{
"data": [
{
"ID": 123,
"Name": "package_name",
"PackageBaseID": 234,
"Version": "0.1.1",
"Description": "Some description...",
"URL": "https://some.url"
},
...
],
"mapping": {
"package_name": 0,
...
}
}
"""
json.dump({
"warning": ("This is a experimental! It can be removed "
"or modified without warning!"),
"mapping": {
result[2]: i
for i, result in enumerate(results)
},
"data": [{
column[0]: result[i]
for i, column in enumerate(cur.description)
} for result in results]
}, f)
update_cache("Packages", update_time)
f.writelines([
bytes(x.get("Name") + "\n", "UTF-8")
for x in output
])
update_state(packages_state, update_time)
else:
print("Packages have not been updated; skipping.")
updated, update_time = should_update("PackageBases")
updated, update_time = should_update(pkgbases_state, "PackageBases")
if not updated:
print("Updating PackageBases...")
# Produce pkgbase.gz
with gzip.open(pkgbasefile, "w") as f:
f.write(bytes(pkgbaselist_header + "\n", "UTF-8"))
cur = conn.execute("SELECT Name FROM PackageBases " +
"WHERE PackagerUID IS NOT NULL")
f.writelines([bytes(x[0] + "\n", "UTF-8") for x in cur.fetchall()])
update_cache("PackageBases", update_time)
update_state(pkgbases_state, update_time)
else:
print("PackageBases have not been updated; skipping.")
updated, update_time = should_update("Users")
updated, update_time = should_update(users_state, "Users")
if not updated:
print("Updating Users...")
# Produce users.gz
with gzip.open(userfile, "w") as f:
f.write(bytes(userlist_header + "\n", "UTF-8"))
cur = conn.execute("SELECT UserName FROM Users")
f.writelines([bytes(x[0] + "\n", "UTF-8") for x in cur.fetchall()])
update_cache("Users", update_time)
update_state(users_state, update_time)
else:
print("Users have not been updated; skipping.")
Loading