WIP: Python part

pull/3491/head
matejcik 5 months ago
parent 6166c318bc
commit db8e019711

@ -0,0 +1,343 @@
from __future__ import annotations
import json
import typing as t
import unicodedata
from hashlib import sha256
from pathlib import Path
import construct as c
from construct_classes import Struct, subcon
from typing_extensions import Self, TypedDict
from ..firmware.models import Model
from ..models import TrezorModel
from ..tools import EnumAdapter, TupleAdapter
# All sections need to be aligned to 2 bytes for the offset tables using u16 to work properly
ALIGNMENT = 2
JsonTranslationData = t.Dict[str, t.Dict[str, str]]
TranslatedStrings = t.Dict[str, str]
JsonFontInfo = t.Dict[str, str]
Order = t.Dict[int, str]
class JsonHeader(TypedDict):
language: str
version: str
change_language_title: str
change_language_prompt: str
class JsonDef(TypedDict):
header: JsonHeader
translations: JsonTranslationData
fonts: dict[str, JsonFontInfo]
def _normalize(what: str) -> str:
return unicodedata.normalize("NFKC", what)
def offsets_seq(data: t.Iterable[bytes]) -> t.Iterator[int]:
offset = 0
for item in data:
yield offset
offset += len(item)
yield offset
def _version_to_tuple(version: str) -> tuple[int, int, int, int]:
items = [int(n) for n in version.split(".")]
assert len(items) == 3
return (*items, 0)
class TranslationsHeader(Struct):
language: str
model: Model
firmware_version: tuple[int, int, int, int]
data_len: int
data_hash: bytes
change_language_title: str
change_language_prompt: str
# fmt: off
SUBCON = c.Struct(
"magic" / c.Const(b"TR"),
"language" / c.PaddedString(4, "ascii"), # locale specifier without dash, enUS esES geDE etc
"model" / EnumAdapter(c.Bytes(4), Model),
"firmware_version" / TupleAdapter(c.Int8ul, c.Int8ul, c.Int8ul, c.Int8ul),
"data_len" / c.Int16ul,
"data_hash" / c.Bytes(32),
"change_language_title" / c.PascalString(c.Int8ul, "utf8"),
"change_language_prompt" / c.PascalString(c.Int8ul, "utf8"),
c.Aligned(ALIGNMENT, c.Pass),
c.Terminated,
)
# fmt: on
class TranslationsProof(Struct):
merkle_proof: list[bytes]
sigmask: int
signature: bytes
# fmt: off
SUBCON = c.Struct(
"merkle_proof" / c.PrefixedArray(c.Int8ul, c.Bytes(32)),
"sigmask" / c.Byte,
"signature" / c.Bytes(64),
c.Terminated,
)
# fmt: on
class BlobTable(Struct):
offsets: list[tuple[int, int]]
data: bytes
SENTINEL: t.ClassVar[int] = 0xFFFF
# fmt: off
SUBCON = c.Struct(
"_length" / c.Rebuild(c.Int16ul, c.len_(c.this.offsets) - 1),
"offsets" / c.Array(c.this._length + 1, TupleAdapter(c.Int16ul, c.Int16ul)),
"data" / c.GreedyBytes,
c.Aligned(ALIGNMENT, c.Pass),
c.Terminated,
)
# fmt: on
@classmethod
def from_items(cls, items: dict[int, bytes]) -> Self:
assert not any(key >= cls.SENTINEL for key in items.keys())
keys = sorted(items.keys())
items_sorted = [items[key] for key in keys]
offsets = list(offsets_seq(items_sorted))
keys.append(cls.SENTINEL)
assert len(keys) == len(offsets)
return cls(
offsets=list(zip(keys, offsets)),
data=b"".join(items_sorted),
)
def __len__(self) -> int:
return len(self.offsets) - 1
def get(self, id: int) -> bytes | None:
if id == self.SENTINEL:
return None
for key, offset in self.offsets:
if key == id:
return self.data[offset : self.offsets[key + 1][1]]
return None
class TranslationsData(Struct):
offsets: list[int]
strings: bytes
# fmt: off
SUBCON = c.Struct(
"_length" / c.Rebuild(c.Int16ul, c.len_(c.this.offsets) - 1),
"offsets" / c.Array(c.this._length + 1, c.Int16ul),
"strings" / c.Aligned(ALIGNMENT, c.GreedyBytes),
c.Terminated,
)
# fmt: on
@classmethod
def from_items(cls, items: list[str]) -> Self:
item_bytes = [_normalize(item).encode("utf-8") for item in items]
offsets = list(offsets_seq(item_bytes))
return cls(offsets=offsets, strings=b"".join(item_bytes))
def __len__(self) -> int:
return len(self.offsets) - 1
def get(self, idx: int) -> str | None:
if idx >= len(self.offsets) - 1:
return None
return self.strings[self.offsets[idx] : self.offsets[idx + 1]].decode("utf-8")
# ===========
class Font(BlobTable):
@classmethod
def from_file(cls, file: Path) -> Self:
json_content = json.loads(file.read_text())
assert all(len(codepoint) == 1 for codepoint in json_content)
raw_content = {
ord(codepoint): bytes.fromhex(data)
for codepoint, data in json_content.items()
}
return cls.from_items(raw_content)
class FontsTable(BlobTable):
@classmethod
def from_dir(cls, model_fonts: dict[str, str], font_dir: Path) -> Self:
"""Example structure of the font dict:
(The beginning number corresponds to the C representation of each font)
{
"1_FONT_NORMAL": "font_tthoves_regular_21_cs.json",
"2_FONT_BOLD": "font_tthoves_bold_17_cs.json",
"3_FONT_MONO": "font_robotomono_medium_20_cs.json",
"4_FONT_BIG": null,
"5_FONT_DEMIBOLD": "font_tthoves_demibold_21_cs.json"
}
"""
fonts = {}
for font_name, file_name in model_fonts.items():
if not file_name:
continue
file_path = font_dir / file_name
font_num = int(font_name.split("_")[0])
try:
fonts[font_num] = Font.from_file(file_path).build()
except Exception as e:
raise ValueError(f"Failed to load font {file_name}") from e
return cls.from_items(fonts)
def get_font(self, font_id: int) -> Font | None:
font_bytes = self.get(font_id)
if font_bytes is None:
return None
return Font.parse(font_bytes)
# =========
class TranslationsPayload(Struct):
translations_bytes: bytes
fonts_bytes: bytes
# fmt: off
SUBCON = c.Struct(
"translations_bytes" / c.Prefixed(c.Int16ul, c.GreedyBytes),
"fonts_bytes" / c.Prefixed(c.Int16ul, c.GreedyBytes),
c.Terminated,
)
# fmt: on
class TranslationsBlob(Struct):
header_bytes: bytes
proof_bytes: bytes
payload: TranslationsPayload = subcon(TranslationsPayload)
# fmt: off
SUBCON = c.Struct(
"magic" / c.Const(b"TRTR00"),
"total_length" / c.Rebuild(
c.Int16ul,
(
c.len_(c.this.header_bytes)
+ c.len_(c.this.proof_bytes)
+ c.len_(c.this.payload.translations_bytes)
+ c.len_(c.this.payload.fonts_bytes)
+ 2 * 4 # sizeof(u16) * number of fields
)
),
"_start_offset" / c.Tell,
"header_bytes" / c.Prefixed(c.Int16ul, c.GreedyBytes),
"proof_bytes" / c.Prefixed(c.Int16ul, c.GreedyBytes),
"payload" / TranslationsPayload.SUBCON,
"_end_offset" / c.Tell,
c.Terminated,
c.Check(c.this.total_length == c.this._end_offset - c.this._start_offset),
)
# fmt: on
@property
def header(self):
return TranslationsHeader.parse(self.header_bytes)
@property
def proof(self):
return TranslationsProof.parse(self.proof_bytes)
@proof.setter
def proof(self, proof: TranslationsProof):
self.proof_bytes = proof.build()
@property
def translations(self):
return TranslationsData.parse(self.payload.translations_bytes)
@property
def fonts(self):
return FontsTable.parse(self.payload.fonts_bytes)
def verify(self) -> None:
header = self.header
data = self.payload.build()
assert header.data_len == len(data)
assert header.data_hash == sha256(data).digest()
# ====================
def make_blob(dir: Path, lang: str, model: TrezorModel) -> TranslationsBlob:
lang_file = dir / f"{lang}.json"
fonts_dir = dir / "fonts"
lang_data: JsonDef = json.loads(lang_file.read_text())
json_header: JsonHeader = lang_data["header"]
json_order = json.loads((dir / "order.json").read_text())
order: Order = {int(k): v for k, v in json_order.items()}
# flatten translations
translations_flattened = {
f"{section}__{key}": value
for section, section_data in lang_data["translations"].items()
for key, value in section_data.items()
}
# order translations -- python dicts keep insertion order
translations_ordered = [
translations_flattened.get(key, "") for _, key in sorted(order.items())
]
translations = TranslationsData.from_items(translations_ordered)
if model.internal_name not in lang_data["fonts"]:
raise ValueError(f"Model {model.internal_name} not found in {lang_file}")
model_fonts = lang_data["fonts"][model.internal_name]
fonts = FontsTable.from_dir(model_fonts, fonts_dir)
translations_bytes = translations.build()
assert len(translations_bytes) % ALIGNMENT == 0
fonts_bytes = fonts.build()
assert len(fonts_bytes) % ALIGNMENT == 0
payload = TranslationsPayload(
translations_bytes=translations_bytes,
fonts_bytes=fonts_bytes,
)
data = payload.build()
header = TranslationsHeader(
language=json_header["language"],
model=Model.from_trezor_model(model),
firmware_version=_version_to_tuple(json_header["version"]),
data_len=len(data),
data_hash=sha256(data).digest(),
change_language_title=json_header["change_language_title"],
change_language_prompt=json_header["change_language_prompt"],
)
return TranslationsBlob(
header_bytes=header.build(),
proof_bytes=b"",
payload=payload,
)

@ -16,11 +16,12 @@
import io
from pathlib import Path
from typing import TYPE_CHECKING, Optional, cast
from typing import TYPE_CHECKING, BinaryIO, Optional, cast
import click
import requests
from .. import device, messages, toif, translations
from .. import device, messages, toif
from . import AliasedGroup, ChoiceType, with_client
if TYPE_CHECKING:
@ -204,26 +205,31 @@ def label(client: "TrezorClient", label: str) -> str:
@cli.command()
@click.option("-f", "--file", type=str, help="Language JSON file with translations.")
@click.option("-u", "--url", help="Link to already created and signed blob.")
@click.option("-r", "--remove", is_flag=True, help="Switch back to english.")
@click.argument(
"path_or_url",
help="File path or URL to a signed translations blob.",
required=False,
)
@click.option("-r", "--remove", is_flag=True, default=False, help="Switch back to english.")
@with_client
def language(client: "TrezorClient", file: str, url: str, remove: bool) -> str:
def language(client: "TrezorClient", path_or_url: str | None, remove: bool) -> str:
"""Set new language with translations."""
if file and url:
raise click.ClickException("Please provide only one of -f or -u")
if remove != (path_or_url is None):
raise click.ClickException("Either provide a path or URL or use --remove")
if remove:
language_data = b""
else:
if file:
model = client.features.model
assert model is not None
language_data = translations.blob_from_file(Path(file), model)
elif url:
language_data = translations.blob_from_url(url)
else:
raise click.ClickException("Please provide either -f or -u")
assert path_or_url is not None
try:
language_data = Path(path_or_url).read_bytes()
except Exception:
try:
language_data = requests.get(path_or_url).content
except Exception:
raise click.ClickException(
f"Failed to load translations from {path_or_url}"
) from None
return device.change_language(client, language_data=language_data)

@ -49,7 +49,6 @@ from . import (
solana,
stellar,
tezos,
utils,
with_client,
)
@ -416,7 +415,6 @@ cli.add_command(settings.cli)
cli.add_command(solana.cli)
cli.add_command(stellar.cli)
cli.add_command(tezos.cli)
cli.add_command(utils.cli)
cli.add_command(firmware.cli)
cli.add_command(debug.cli)

@ -1,81 +0,0 @@
# This file is part of the Trezor project.
#
# Copyright (C) 2012-2022 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
from pathlib import Path
import click
from .. import translations
@click.group(name="utils")
def cli() -> None:
"""Utils commands."""
@cli.command()
@click.option(
"-f",
"--file",
type=str,
help="Language JSON file with translations.",
required=True,
)
@click.option("-m", "--model", required=True)
def sign_translations(file: str, model: str) -> None:
"""Sign translations blob."""
file_path = Path(file)
if not file_path.exists():
raise click.ClickException(f"File {file_path} does not exist.")
file_info = translations.get_file_info(file_path)
language = file_info["language"]
version = file_info["version"]
supported_models = file_info["supported_models"]
if model not in supported_models:
raise click.ClickException(
"Fonts for model {} not found in file. Available models: {}".format(
model, supported_models
)
)
click.echo(f"Creating blob for language {language} version {version}")
unsigned_blob = translations.blob_from_file(file_path, model, sign_dev=False)
signing = translations.Signing(unsigned_blob)
hash_to_sign = signing.hash_to_sign()
click.echo("Hash to sign: {}".format(hash_to_sign.hex()))
click.echo("Please sign this hash and paste the signature below.")
signature: str = click.prompt("Signature", type=str)
signature_bytes = bytes.fromhex(signature)
signed_blob = signing.apply_signature(signature_bytes)
# TODO: this is a pain point of model name "Safe 3" with a space
model = model.replace(" ", "")
output_file_name = f"translations_signed_{model}_{version}.dat"
output_file_path = file_path.parent / output_file_name
if output_file_path.exists():
overwrite = click.confirm(
f"WARNING: File {output_file_path} already exists. Overwrite?"
)
if overwrite:
click.echo("Overwriting file.")
else:
click.echo("Aborting and not overwriting file.")
return
with output_file_path.open("wb") as f:
f.write(signed_blob)
click.echo(f"Signed blob saved to {output_file_path}")

@ -24,6 +24,8 @@ from .util import FirmwareHashParameters
if t.TYPE_CHECKING:
from typing_extensions import Self
from ..models import TrezorModel
class Model(Enum):
T1B1 = b"T1B1"
@ -45,6 +47,10 @@ class Model(Enum):
return cls.T2T1
raise ValueError(f"Unknown hardware model: {hw_model}")
@classmethod
def from_trezor_model(cls, trezor_model: "TrezorModel") -> "Self":
return cls(trezor_model.internal_name.encode("ascii"))
def model_keys(self, dev_keys: bool = False) -> "ModelKeys":
if dev_keys:
model_map = MODEL_MAP_DEV

@ -1,351 +0,0 @@
from __future__ import annotations
import json
import struct
import typing as t
import unicodedata
from hashlib import sha256
from pathlib import Path
from typing_extensions import Self
import construct as c
import requests
from construct_classes import Struct, subcon
from typing_extensions import TypedDict
from .translations_dev_sign import sign_with_dev_keys
from .tools import TupleAdapter
# All sections need to be aligned to 2 bytes for the offset tables using u16 to work properly
ALIGNMENT = 2
HEADER_MAX_LEN = 128
HEADER_WLEN = 256
SIG_LEN = 65
# TODO: why is the sigmask used/useful?
SIGMASK = (0b111).to_bytes(1, "little")
JsonTranslationData = dict[str, dict[str, str]]
JsonHeaderData = dict[str, str]
JsonFontData = dict[str, str]
JsonOrderData = dict[int, str]
def _normalize(what: str) -> str:
return unicodedata.normalize("NFKC", what)
def offsets_seq(data: t.Iterable[bytes]) -> t.Iterator[int]:
offset = 0
for item in data:
yield offset
offset += len(item)
yield offset
def _version_to_tuple(version: str) -> tuple[int, int, int, int]:
items = [int(n) for n in version.split(".")]
assert len(items) == 3
return (*items, 0) # type: ignore
class TranslationsHeader(Struct):
language: str
version: tuple[int, int, int, int]
data_len: int
translations_count: int
fonts_offset: int
data_hash: bytes
change_language_title: str
change_language_prompt: str
# fmt: off
SUBCON = c.Struct(
"_start_offset" / c.Tell,
"magic" / c.Const(b"TRTR"),
"language" / c.PaddedString(2, "ascii"),
"version" / TupleAdapter(c.Int8ul, c.Int8ul, c.Int8ul, c.Int8ul),
"data_len" / c.Int16ul,
"translations_count" / c.Int16ul,
"fonts_offset" / c.Int16ul,
"data_hash" / c.Bytes(32),
"change_language_title" / c.PascalString(c.Int8ul, "utf8"),
"change_language_prompt" / c.PascalString(c.Int8ul, "utf8"),
"_end_offset" / c.Tell,
"_maxlen" / c.Check(c.this._end_offset - c.this._start_offset <= HEADER_MAX_LEN),
)
# fmt: on
class TranslationsSignedHeader(Struct):
header_bytes: bytes
merkle_proof: list[bytes]
sigmask: int
signature: bytes
header: TranslationsHeader = subcon(TranslationsHeader)
# fmt: off
SUBCON = c.Struct(
"header_bytes" / c.Padded(c.Bytes(HEADER_MAX_LEN), HEADER_MAX_LEN),
"merkle_proof" / c.PrefixedArray(c.Int8ul, c.Bytes(32)),
"sigmask" / c.Int8ul,
"signature" / c.Bytes(64),
"header" / c.RestreamData(c.this.header_bytes, TranslationsHeader.SUBCON),
)
# fmt: on
def __setattr__(self, name: str, value: t.Any) -> None:
if name == "header_bytes":
raise AttributeError("Cannot set header_bytes directly")
super().__setattr__(name, value)
if name == "header":
super().__setattr__("header_bytes", value.build())
class BlobTable(Struct):
length: int
offsets: list[tuple[int, int]]
data: bytes
# fmt: off
SUBCON = c.Struct(
"length" / c.Int16ul,
"offsets" / c.Array(c.this.length + 1, TupleAdapter(c.Int16ul, c.Int16ul)),
"data" / c.GreedyBytes,
)
# fmt: on
SENTINEL: t.ClassVar[int] = 0xFFFF
@classmethod
def from_items(cls, items: dict[int, bytes]) -> Self:
keys = sorted(items.keys()) + [cls.SENTINEL]
items_sorted = [items[key] for key in keys]
offsets = list(offsets_seq(items_sorted))
assert len(keys) == len(offsets)
return cls(
length=len(items),
offsets=list(zip(keys, offsets)),
data=b"".join(items_sorted),
)
def get(self, id: int) -> bytes | None:
if id == self.SENTINEL:
return None
for key, offset in self.offsets:
if key == id:
return self.data[offset : self.offsets[key + 1][1]]
return None
class TranslationData(Struct):
offsets: list[int]
strings: bytes
# fmt: off
SUBCON = c.Struct(
"offsets" / c.GreedyRange(c.Int16ul),
"strings" / c.GreedyBytes,
)
# fmt: on
@classmethod
def from_items(cls, items: list[str]) -> Self:
item_bytes = [_normalize(item).encode("utf-8") for item in items]
offsets = list(offsets_seq(item_bytes))
return cls(offsets=offsets, strings=b"".join(item_bytes))
def __len__(self) -> int:
return len(self.offsets) - 1
def get(self, idx: int) -> str | None:
if idx >= len(self.offsets) - 1:
return None
return self.strings[self.offsets[idx] : self.offsets[idx + 1]].decode("utf-8")
class FileInfo(TypedDict):
language: str
version: str
supported_models: list[str]
# TODO: might create some tests for reading the resulting blob
# TODO: try to apply some compression of the blob
class Signing:
def __init__(self, data_without_sig: bytes) -> None:
self.data_without_sig = data_without_sig
def perform_dev_signing(self) -> bytes:
"""Signs the appropriate data and returns the blob with the signature"""
signature = self._get_dev_signature()
return self.apply_signature(signature)
def _get_dev_signature(self) -> bytes:
"""Returns the development signature of the data."""
to_sign = self.hash_to_sign()
return sign_with_dev_keys(to_sign)
def hash_to_sign(self) -> bytes:
"""Returns the data that should be signed - hash of header with empty signature."""
data_to_sign = self.data_without_sig[:HEADER_LEN]
for byte in data_to_sign[-SIG_LEN:]:
assert byte == 0, "Signature should be empty"
return sha256(data_to_sign).digest()
def apply_signature(self, signature: bytes) -> bytes:
"""Put signature data at the right location into the header."""
assert len(signature) == SIG_LEN - 1, "Signature should be 64 bytes long"
to_write = SIGMASK + signature
assert len(to_write) == SIG_LEN, "Signature and sigmask should be 65 bytes long"
return (
self.data_without_sig[: HEADER_LEN - SIG_LEN]
+ to_write
+ self.data_without_sig[HEADER_LEN:]
)
def get_file_info(json_file: Path) -> FileInfo:
with open(json_file, "r") as f:
data = json.load(f)
header: JsonHeaderData = data["header"]
font = data["font"]
supported_models = list(font.keys())
return {
"language": header["language"],
"version": header["version"],
"supported_models": supported_models,
}
def blob_from_file(json_file: Path, model: str, sign_dev: bool = True) -> bytes:
with open(json_file, "r") as f:
data = json.load(f)
file_dir = json_file.parent
font_dir = file_dir / "fonts"
order_json_file = file_dir / "order.json"
return blob_from_dict(data, font_dir, order_json_file, model, sign_dev)
def blob_from_url(url: str) -> bytes:
r = requests.get(url)
r.raise_for_status()
return r.content
def blob_from_dict(
data: dict[str, t.Any],
font_dir: Path,
order_json_file: Path,
model: str,
sign_dev: bool = True,
) -> bytes:
header: JsonHeaderData = data["header"]
translations: JsonTranslationData = data["translations"]
font = data["font"]
if model not in font:
raise ValueError(
f"Font for model {model} not found --- use one of {list(font.keys())}"
)
model_font: JsonFontData = font[model]
order_raw: dict[str, str] = json.loads(order_json_file.read_text())
order: JsonOrderData = {int(k): v for k, v in order_raw.items()}
blob = _blob_from_data(header, translations, model_font, font_dir, order)
if sign_dev:
blob = Signing(blob).perform_dev_signing()
return blob
def _blob_from_data(
header: JsonHeaderData,
translations: JsonTranslationData,
font: JsonFontData,
font_dir: Path,
order: JsonOrderData,
) -> bytes:
translation_data = _create_translation_data(translations, order)
font_data = _create_font_data(font, font_dir)
translation_data_padded = c.Aligned(ALIGNMENT, c.GreedyBytes).build(
translation_data.build()
)
data_blob = translation_data_padded + font_data.build()
header_struct = TranslationsHeader(
language=header["language"],
version=_version_to_tuple(header["version"]),
data_len=len(data_blob),
translations_count=len(translation_data),
fonts_offset=len(translation_data_padded),
data_hash=sha256(data_blob).digest(),
change_language_title=header["change_language_title"],
change_language_prompt=header["change_language_prompt"],
)
header_blob = _create_header_blob(
magic=MAGIC,
lang=header["language"],
version=header["version"],
data_len=len(data_blob),
translations_length=len(translations_blob),
translations_num=translations_num,
data_hash=sha256(data_blob).digest(),
change_language_title=header["change_language_title"],
change_language_prompt=header["change_language_prompt"],
)
assert len(header_blob) == HEADER_LEN, "Header should be 256 bytes long"
final_blob = header_blob + data_blob
assert len(final_blob) % 2 == 0, "Final blob should be aligned to 2 bytes"
return final_blob
def _create_font_data(font: JsonFontData, font_dir: Path) -> BlobTable:
"""Example structure of the font dict:
(The beginning number corresponds to the C representation of each font)
{
"1_FONT_NORMAL": "font_tthoves_regular_21_cs.json",
"2_FONT_BOLD": "font_tthoves_bold_17_cs.json",
"3_FONT_MONO": "font_robotomono_medium_20_cs.json",
"4_FONT_BIG": null,
"5_FONT_DEMIBOLD": "font_tthoves_demibold_21_cs.json"
}
"""
fonts = {}
for font_name, file_name in font.items():
if not file_name:
continue
file_path = font_dir / file_name
font_num = int(font_name.split("_")[0])
fonts[font_num] = _font_blob_from_file(file_path)
return BlobTable.from_items(fonts)
def _font_blob_from_file(json_file: Path) -> bytes:
json_content = json.loads(json_file.read_text())
assert all(len(codepoint) == 1 for codepoint in json_content)
raw_content = {
ord(codepoint): bytes.fromhex(data) for codepoint, data in json_content.items()
}
table = BlobTable.from_items(raw_content).build()
return c.Aligned(ALIGNMENT, c.GreedyBytes).build(table)
def _create_translation_data(
translations: JsonTranslationData, order: JsonOrderData
) -> TranslationData:
items_to_write: dict[str, str] = {}
for section_name, section in translations.items():
for k, v in section.items():
name = f"{section_name}__{k}"
items_to_write[name] = v
sorted_order = sorted(order.items())
sorted_items = [items_to_write[name] for _, name in sorted_order]
return TranslationData.from_items(sorted_items)
Loading…
Cancel
Save