summaryrefslogtreecommitdiff
path: root/repo/www/helpers.py
blob: 12d9a4142e4a9525d5613c277b47e526e3cdc1d8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
from collections import defaultdict
from dataclasses import dataclass, field
from itertools import chain
from os import environ, path
from pathlib import Path
from subprocess import CalledProcessError, run
from tempfile import NamedTemporaryFile
from typing import Dict, Iterator, Union


@dataclass
class Directory:
    files: Iterator[str] = field(default_factory=list)
    subfolders: Iterator[str] = field(default_factory=set)

    def serialize(self):
        return {
            'files': sorted(self.files),
            'subfolders': sorted(self.subfolders)
        }

    @classmethod
    def deserialize(cls, d):
        return cls(**d)


def _find_files(extensions, repository):
    patterns = (f'**.{ext}' for ext in extensions)
    zero = '\x00'
    return repository.git.ls_files('-z', *patterns).strip(zero).split(zero)


def _fill_directories(files, top_dir):
    directories = defaultdict(Directory)

    for f in files:
        fdir, fname = path.split(f)

        directories[fdir].files.append(fname)

        while fdir:
            parent, child = path.split(fdir)
            directories[parent].subfolders.add(child)
            fdir = parent

    return directories


def compute_directories(extensions, repository):
    files = _find_files(extensions, repository)
    top_dir = path.relpath(repository.working_dir, path.curdir)
    return _fill_directories(files, top_dir)


def deserialize_directories(directories):
    return {
        k: Directory.deserialize(v) for k, v in directories.items()
    }


class _NullPreprocessor:
    def __init__(self, source_path):
        self._source_path = source_path

    def __enter__(self):
        self.output = self._source_path
        return self

    def __exit__(self, *args):
        pass

class _OrgPreprocessor:
    def __init__(self, source_path):
        self._source_path = source_path

    def __enter__(self):
        self._output = NamedTemporaryFile(mode='w+', suffix='.org')
        try:
            run((
                'emacs', '-Q', '--batch', '--load', 'preprocess-org.el',
                '--eval', f'(preprocess-org "{self._source_path}")'
            ), check=True, stdout=self._output)
        except CalledProcessError:
            self._output.close()
            raise

        self.output = self._output.name
        return self

    def __exit__(self, *args):
        self._output.close()

_PREPROCESSORS = defaultdict(lambda: _NullPreprocessor,
                             (('org', _OrgPreprocessor),))


_PathArg = Union[Path, str, bytes]

@dataclass
class PandocRunner:
    output: _PathArg
    template: _PathArg
    filters: Iterator[_PathArg]
    stylesheets: Iterator[_PathArg]
    variables: Dict[str, str] = field(default_factory=dict)

    def run(self, page, include_after=(), metadata=None):
        cmd = (
            'pandoc', '-s', '-o', self.output, '--template', self.template,
            *chain(*(('--lua-filter', f) for f in self.filters)),
            *chain(*(('--css', s) for s in self.stylesheets)),
            *chain(*(('--include-after-body', f) for f in include_after))
        )

        cmd += tuple(chain(
            *(('-V', f'{k}={v}') for k, v in self.variables.items())
        ))
        if metadata is not None:
            cmd += tuple(chain(
                *(('-M', f'{k}={v}') for k, v in metadata.items())
            ))

        environ['LUA_PATH'] = '.cache/?.lua;;'

        _, ext = path.splitext(page)
        preprocessor = _PREPROCESSORS[ext[1:]]

        with preprocessor(page) as preproc:
            cmd = cmd + (preproc.output,)
            run(cmd, check=True)


def generate_crumbs(target):
    parts = ('(top)', *target.parts)

    if parts[-1] == 'index':
        *crumbs, current = parts[:-1]
    else:
        crumbs = parts[:-1]
        current, _ = path.splitext(parts[-1])

    crumbs_li = (
        '<li><a href="{link}">{crumb}</a></li>'.format(
            link=(path.relpath(path.join(*crumbs[1:i], 'index.html'),
                               start=target.parent)),
            crumb=crumb
        )
        for i, crumb in enumerate(crumbs, start=1)
    )

    current_li = f'<li aria-current="page">{current}</li>'

    return '\n'.join((*crumbs_li, current_li))