Version 1.0.0
This commit is contained in:
commit
4b51d678bb
164
.gitignore
vendored
Normal file
164
.gitignore
vendored
Normal file
@ -0,0 +1,164 @@
|
||||
|
||||
3.0 KiB
|
||||
# ---> Python
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
9
LICENSE
Normal file
9
LICENSE
Normal file
@ -0,0 +1,9 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 Error
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
67
README.md
Normal file
67
README.md
Normal file
@ -0,0 +1,67 @@
|
||||
# RFCartography
|
||||
|
||||
Visualize relations between RFCs
|
||||
|
||||
## Set Up
|
||||
|
||||
Clone this repository and install dependencies, e.g. using `pip install -r requirements.txt`.
|
||||
Place the index file (can be downloaded [here](https://www.rfc-editor.org/rfc-index.xml)) in the instance folder create a configuration in that location (see `Configuration`).
|
||||
|
||||
Afterwards, RFCartography can be run by executing
|
||||
|
||||
flask --app rfcartography run
|
||||
|
||||
Note: Use `flask run` only for local development.
|
||||
If you want to deploy it to a production server, use a WSGI server instead, as explained in the [Flask documentation](https://flask.palletsprojects.com/en/latest/deploying/).
|
||||
|
||||
Hint: Generating large graphs takes a lot of time.
|
||||
It is highly recommended to cache responses to improve response times.
|
||||
|
||||
## Configuration
|
||||
|
||||
RFCartography (or rather Flask, the framework it's based on) searches for the configuration in the `instance` directory.
|
||||
To configure your instance, create a `config.py` file in this directory.
|
||||
|
||||
### Flask Configuration Parameters
|
||||
|
||||
- `SERVER_NAME`: Set the servers name, e.g. it's URL or address. RFCartography requires this value to be set correctly. If unset, it will default to `localhost`.
|
||||
|
||||
For further generic configuration parameters, please refer to the [Flask documentation](https://flask.palletsprojects.com/en/latest/config/).
|
||||
|
||||
### RFCartography Specific Configuration Parameters
|
||||
|
||||
- `INDEX_FILE`: XML file containing the RFC index, usually downloaded from [here](https://www.rfc-editor.org/rfc-index.xml)
|
||||
- `NAMESPACE`: XML namespace of the RFC index, defaults to `http://www.rfc-editor.org/rfc-index`
|
||||
- `DEPTH_DEFAULT`: Depth limit to be used for requests w/o specified depth limit, defaults to 8
|
||||
- `IMPRINT`: Content that shall be displayed on the imprint page, see `Custom Content` for the syntax
|
||||
- `PRIVACY`: Content that shall be displayed on the privacy page, see `Custom Content` for the syntax
|
||||
|
||||
### Custom Content
|
||||
|
||||
The content of the imprint and the privacy pages can be configured with the config file.
|
||||
The corresponding parameters take a list of tuples of strings.
|
||||
The first item of each tuple will be displayed as a headline, all following items as paragraphs.
|
||||
|
||||
Example:
|
||||
|
||||
PRIVACY = [('Privacy Statement', 'This website only processes data that is necessary in order to fulfill the user\'s request, e.g. the user\'s IP address. It does not generate access logs. Personal data is discarded once the request was served.')]
|
||||
|
||||
Additional line breaks within a paragraph can be inserted by using a tuple of multiple strings instead of a string as a paragraph, e.g.:
|
||||
|
||||
IMPRINT = [('E-Mail', 'My mail addresses are', ('example[at]not-a-real-mail-address.net', 'another-example[at]not-a-real-mail-address.net', 'one-more.example[at]not-a-real-mail-address.net'))]
|
||||
|
||||
## Maintenance
|
||||
|
||||
Remember to regularly update the rfc index file.
|
||||
|
||||
## Tests
|
||||
|
||||
Unittests a place in the `tests` directory.
|
||||
Run all tests by executing:
|
||||
|
||||
python -m unittest discover tests
|
||||
|
||||
If you have coverage.py installed, you can check the coverage by executing:
|
||||
|
||||
python -m coverage run -m unittest discover tests
|
||||
python -m coverage report
|
3
requirements.txt
Normal file
3
requirements.txt
Normal file
@ -0,0 +1,3 @@
|
||||
Flask
|
||||
defusedxml
|
||||
networkx[default]
|
48
rfcartography/__init__.py
Normal file
48
rfcartography/__init__.py
Normal file
@ -0,0 +1,48 @@
|
||||
from flask import Flask
|
||||
from rfcartography.index_parser import IndexParser
|
||||
from rfcartography.rfcartographer import RFCartographer
|
||||
from rfcartography.routing import register_routers
|
||||
from rfcartography.errors import register_errorhandlers
|
||||
|
||||
|
||||
META = {'NAME': "RFCartography",
|
||||
'VERSION': "1.0.0",
|
||||
'SOURCE': "https://git.undefinedbehavior.de/undef/RFCartography"}
|
||||
|
||||
def create_app(test_config: dict = None) -> Flask:
|
||||
"""set up the flask application"""
|
||||
# create app
|
||||
app = Flask(import_name=__name__,
|
||||
instance_relative_config=True)
|
||||
|
||||
# load configuration
|
||||
if test_config is None:
|
||||
app.config.from_pyfile(filename='config.py')
|
||||
else:
|
||||
app.config.from_mapping(mapping=test_config)
|
||||
|
||||
# apply default config values where missing
|
||||
if app.config['SERVER_NAME'] is None:
|
||||
app.config['SERVER_NAME'] = 'localhost'
|
||||
if not 'NAMESPACE' in app.config:
|
||||
app.config['NAMESPACE'] = 'http://www.rfc-editor.org/rfc-index'
|
||||
if not 'DEPTH_DEFAULT' in app.config:
|
||||
app.config['DEPTH_DEFAULT'] = 8
|
||||
app.config['META'] = META
|
||||
|
||||
# import rfc index data
|
||||
try:
|
||||
with app.open_instance_resource(app.config['INDEX_FILE']) as rfc_index:
|
||||
xml: str = rfc_index.read()
|
||||
except:
|
||||
print('Error: INDEX_FILE could no be opened.\nExiting...')
|
||||
exit(1)
|
||||
|
||||
# set up the RFCartographer
|
||||
parser: IndexParser = IndexParser(xml, app.config['NAMESPACE'])
|
||||
app.cartographer: RFCartographer = RFCartographer(parser.get_index())
|
||||
|
||||
# register request handlers
|
||||
register_errorhandlers(app)
|
||||
register_routers(app)
|
||||
return app
|
43
rfcartography/details.py
Normal file
43
rfcartography/details.py
Normal file
@ -0,0 +1,43 @@
|
||||
from flask import Blueprint, render_template, current_app, abort
|
||||
from rfcartography.index_parser import Document, NotIssued, DocType, Month
|
||||
|
||||
|
||||
details: Blueprint = Blueprint('details', __name__)
|
||||
|
||||
@details.route('/RFC<int:num>', methods=['GET'])
|
||||
def show_rfc(num: int) -> tuple[str, int]:
|
||||
"""handle requests for the details page for RFCs"""
|
||||
rfc: Document = current_app.cartographer.get_document(DocType.RFC, num)
|
||||
if rfc is None:
|
||||
abort(404)
|
||||
elif isinstance(rfc, NotIssued):
|
||||
content: dict = {'title': rfc.docID(),
|
||||
'content': [('Not Issued', 'This RFC number was retained as a place holder, but never issued.')]}
|
||||
return render_template('generic.html', **content), 200
|
||||
else:
|
||||
url: str = "http://" + current_app.config['SERVER_NAME']
|
||||
if url[-1] != '/':
|
||||
url = url + '/'
|
||||
if rfc.pub_date is not None:
|
||||
date: str = f"{Month(rfc.pub_date.month).name} {rfc.pub_date.year}"
|
||||
else:
|
||||
date: str = ""
|
||||
context: dict = {'rfc': rfc,
|
||||
'url': url,
|
||||
'date': date}
|
||||
return render_template('rfc.html', **context), 200
|
||||
|
||||
@details.route('/STD<int:num>', methods=['GET'], defaults={'doctype': DocType.STD})
|
||||
@details.route('/BCP<int:num>', methods=['GET'], defaults={'doctype': DocType.BCP})
|
||||
@details.route('/FYI<int:num>', methods=['GET'], defaults={'doctype': DocType.FYI})
|
||||
def show_details(num: int, doctype: DocType) -> tuple[str, int]:
|
||||
"""handle requests for the details page for STDs, BCPs and FYIs"""
|
||||
doc: Document = current_app.cartographer.get_document(doctype, num)
|
||||
if doc is None:
|
||||
abort(404)
|
||||
url: str = "http://" + current_app.config['SERVER_NAME']
|
||||
if url[-1] != '/':
|
||||
url = url + '/'
|
||||
context: dict = {'doc': doc,
|
||||
'url': url}
|
||||
return render_template('details.html', **context), 200
|
29
rfcartography/errors.py
Normal file
29
rfcartography/errors.py
Normal file
@ -0,0 +1,29 @@
|
||||
from flask import Flask, render_template
|
||||
|
||||
|
||||
def register_errorhandlers(app: Flask) -> None:
|
||||
@app.errorhandler(400)
|
||||
def bad_request(e) -> tuple:
|
||||
content: dict = {'title': 'Bad Request',
|
||||
'content': [('HTTP Status 400', 'The request is malformed and cannot be processed.')]}
|
||||
return render_template('generic.html', **content), 400
|
||||
|
||||
@app.errorhandler(404)
|
||||
def not_found(e) -> tuple:
|
||||
content: dict = {'title': 'Not Found',
|
||||
'content': [('HTTP Status 404', 'The requsted ressource was not found.')]}
|
||||
return render_template('generic.html', **content), 404
|
||||
|
||||
@app.errorhandler(405)
|
||||
def method_not_allowed(e) -> tuple:
|
||||
content: dict = {'title': 'Method Not Allowed',
|
||||
'content': [('HTTP Status 405', 'The requested method is not allowed for this ressource.')]}
|
||||
return render_template('generic.html', **content), 405
|
||||
|
||||
@app.errorhandler(500)
|
||||
def internal_server_error(e) -> tuple:
|
||||
content: dict = {'title': 'Internal Server Error',
|
||||
'content': [('HTTP Status 500', 'The request cannot be answered due to an internal server error.')]}
|
||||
return render_template('generic.html', **content), 500
|
||||
|
||||
return
|
514
rfcartography/index_parser.py
Normal file
514
rfcartography/index_parser.py
Normal file
@ -0,0 +1,514 @@
|
||||
from enum import Enum, auto
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import date
|
||||
from xml.etree.ElementTree import Element
|
||||
from defusedxml.ElementTree import fromstring
|
||||
|
||||
|
||||
class DocType(Enum):
|
||||
RFC = 1
|
||||
STD = 2
|
||||
BCP = 3
|
||||
FYI = 4
|
||||
NIC = 5
|
||||
IEN = 6
|
||||
RTR = 7
|
||||
|
||||
def docID(self,
|
||||
num: int) -> str:
|
||||
if self.value < 5: # RFC, STD, BCP, FYI
|
||||
return f"{self.name}{str(num).rjust(4, '0')}"
|
||||
else: # NIC, IEN, RTR
|
||||
return f"{self.name}{num}"
|
||||
|
||||
|
||||
class Status(Enum):
|
||||
INTERNET_STANDARD = auto()
|
||||
DRAFT_STANDARD = auto()
|
||||
PROPOSED_STANDARD = auto()
|
||||
UNKNOWN = auto()
|
||||
BEST_CURRENT_PRACTICE = auto()
|
||||
FOR_YOUR_INFORMATION = auto()
|
||||
EXPERIMENTAL = auto()
|
||||
HISTORIC = auto()
|
||||
INFORMATIONAL = auto()
|
||||
|
||||
|
||||
class FileFormat(Enum):
|
||||
ASCII = auto()
|
||||
PS = auto()
|
||||
PDF = auto()
|
||||
TGZ = auto()
|
||||
HTML = auto()
|
||||
XML = auto()
|
||||
TEXT = auto()
|
||||
|
||||
|
||||
class Stream(Enum):
|
||||
IETF = auto()
|
||||
IAB = auto()
|
||||
IRTF = auto()
|
||||
INDEPENDENT = auto()
|
||||
Editorial = auto()
|
||||
Legacy = auto()
|
||||
|
||||
|
||||
class Month(Enum):
|
||||
January = 1
|
||||
February = 2
|
||||
March = 3
|
||||
April = 4
|
||||
May = 5
|
||||
June = 6
|
||||
July = 7
|
||||
August = 8
|
||||
September = 9
|
||||
October = 10
|
||||
November = 11
|
||||
December = 12
|
||||
|
||||
|
||||
class Author:
|
||||
def __init__(self,
|
||||
name: str,
|
||||
title: str = "",
|
||||
organization: str = "",
|
||||
org_abbrev: str = ""):
|
||||
self.name: str = name
|
||||
self.title: str = title
|
||||
self.organization: str = organization
|
||||
self.org_abbrev: str = org_abbrev
|
||||
return
|
||||
|
||||
|
||||
class Document(ABC):
|
||||
def __init__(self,
|
||||
type: DocType,
|
||||
number: int,
|
||||
title: str = "",
|
||||
is_also: list['Document'] = []):
|
||||
self.type: DocType = type
|
||||
self.number: int = number
|
||||
self.title: str = title
|
||||
self.is_also: list['Document'] = is_also
|
||||
return
|
||||
|
||||
def docID(self) -> str:
|
||||
return self.type.docID(self.number)
|
||||
|
||||
@abstractmethod
|
||||
def update(self, **kwargs) -> 'Document':
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_references(self) -> list[tuple[str, 'Document']]:
|
||||
pass
|
||||
|
||||
|
||||
class RFC(Document):
|
||||
def __init__(self,
|
||||
number: int,
|
||||
title: str = "",
|
||||
authors: list[Author] = [],
|
||||
pub_date: date = None,
|
||||
current_status: Status = Status.UNKNOWN,
|
||||
pub_status: Status = Status.UNKNOWN,
|
||||
format: list[FileFormat] = [],
|
||||
page_count: int = None,
|
||||
keywords: list[str] = [],
|
||||
abstract: list[str] = [],
|
||||
draft: str = "",
|
||||
notes: str = "",
|
||||
obsoletes: list[Document] = [],
|
||||
obsoleted_by: list[Document] = [],
|
||||
updates: list[Document] = [],
|
||||
updated_by: list[Document] = [],
|
||||
is_also: list[Document] = [],
|
||||
see_also: list[Document] = [],
|
||||
stream: Stream = None,
|
||||
area: str = "",
|
||||
wg_acronym: str = "",
|
||||
errata_url: str = "",
|
||||
doi: str = ""):
|
||||
super().__init__(DocType.RFC, number, title, is_also)
|
||||
self.authors: list[Author] = authors
|
||||
self.pub_date: date = pub_date
|
||||
self.format: list[FileFormat] = format
|
||||
self.page_count: int = page_count
|
||||
self.keywords: list[str] = keywords
|
||||
self.abstract: list[str] = abstract
|
||||
self.draft: str = draft
|
||||
self.notes: str = notes
|
||||
self.obsoletes: list[Document] = obsoletes
|
||||
self.obsoleted_by: list[Document] = obsoleted_by
|
||||
self.updates: list[Document] = updates
|
||||
self.updated_by: list[Document] = updated_by
|
||||
self.see_also: list[Document] = see_also
|
||||
self.current_status: Status = current_status
|
||||
self.pub_status: Status = pub_status
|
||||
self.stream: Stream = stream
|
||||
self.area: str = area
|
||||
self.wg_acronym: str = wg_acronym
|
||||
self.errata_url: str = errata_url
|
||||
self.doi: str = doi
|
||||
return
|
||||
|
||||
def update(self, **kwargs) -> Document:
|
||||
if 'title' in kwargs:
|
||||
self.title = kwargs['title']
|
||||
if 'authors' in kwargs:
|
||||
self.authors = kwargs['authors']
|
||||
if 'pub_date' in kwargs:
|
||||
self.pub_date = kwargs["pub_date"]
|
||||
if 'current_status' in kwargs:
|
||||
self.current_status = kwargs["current_status"]
|
||||
if 'pub_status' in kwargs:
|
||||
self.pub_status = kwargs["pub_status"]
|
||||
if 'format' in kwargs:
|
||||
self.format = kwargs["format"]
|
||||
if 'page_count' in kwargs:
|
||||
self.page_count = kwargs["page_count"]
|
||||
if 'keywords' in kwargs:
|
||||
self.keywords = kwargs["keywords"]
|
||||
if 'abstract' in kwargs:
|
||||
self.abstract = kwargs["abstract"]
|
||||
if 'draft' in kwargs:
|
||||
self.draft = kwargs["draft"]
|
||||
if 'notes' in kwargs:
|
||||
self.notes = kwargs["notes"]
|
||||
if 'obsoletes' in kwargs:
|
||||
self.obsoletes = kwargs["obsoletes"]
|
||||
if 'obsoleted_by' in kwargs:
|
||||
self.obsoleted_by = kwargs["obsoleted_by"]
|
||||
if 'updates' in kwargs:
|
||||
self.updates = kwargs["updates"]
|
||||
if 'updated_by' in kwargs:
|
||||
self.updated_by = kwargs["updated_by"]
|
||||
if 'is_also' in kwargs:
|
||||
self.is_also = kwargs["is_also"]
|
||||
if 'see_also' in kwargs:
|
||||
self.see_also = kwargs["see_also"]
|
||||
if 'stream' in kwargs:
|
||||
self.stream = kwargs["stream"]
|
||||
if 'area' in kwargs:
|
||||
self.area = kwargs["area"]
|
||||
if 'wg_acronym' in kwargs:
|
||||
self.wg_acronym = kwargs["wg_acronym"]
|
||||
if 'errata_url' in kwargs:
|
||||
self.errata_url = kwargs["errata_url"]
|
||||
if 'doi' in kwargs:
|
||||
self.doi = kwargs["doi"]
|
||||
return self
|
||||
|
||||
def get_references(self) -> list[tuple[str, Document]]:
|
||||
reftypes: list[str] = ["obsoletes"]*len(self.obsoletes)\
|
||||
+ ["obsoleted by"]*len(self.obsoleted_by)\
|
||||
+ ["updates"]*len(self.updates)\
|
||||
+ ["updated by"]*len(self.updated_by)\
|
||||
+ ["is also"]*len(self.is_also)\
|
||||
+ ["see also"]*len(self.see_also)
|
||||
refs: list[Document] = self.obsoletes \
|
||||
+ self.obsoleted_by \
|
||||
+ self.updates \
|
||||
+ self.updated_by \
|
||||
+ self.is_also \
|
||||
+ self.see_also
|
||||
return list(zip(reftypes, refs))
|
||||
|
||||
|
||||
class NotIssued(Document):
|
||||
def __init__(self,
|
||||
number: int):
|
||||
super().__init__(DocType.RFC, number)
|
||||
return
|
||||
|
||||
def update(self, **kwargs) -> Document:
|
||||
return self
|
||||
|
||||
def get_references(self) -> list[tuple[str, Document]]:
|
||||
return []
|
||||
|
||||
|
||||
class STD(Document):
|
||||
def __init__(self,
|
||||
number: int,
|
||||
title: str = "",
|
||||
is_also: list[Document] = []):
|
||||
super().__init__(DocType.STD, number, title, is_also)
|
||||
return
|
||||
|
||||
def update(self, **kwargs) -> Document:
|
||||
if 'title' in kwargs:
|
||||
self.title = kwargs['title']
|
||||
if 'is_also' in kwargs:
|
||||
self.is_also = kwargs['is_also']
|
||||
return self
|
||||
|
||||
def get_references(self) -> list[tuple[str, Document]]:
|
||||
return list(zip(["is also"]*len(self.is_also), self.is_also))
|
||||
|
||||
|
||||
class BCP(Document):
|
||||
def __init__(self,
|
||||
number: int,
|
||||
title: str = "",
|
||||
is_also: list[Document] = []):
|
||||
super().__init__(DocType.BCP, number, title, is_also)
|
||||
return
|
||||
|
||||
def update(self, **kwargs) -> Document:
|
||||
if 'title' in kwargs:
|
||||
self.title = kwargs['title']
|
||||
if 'is_also' in kwargs:
|
||||
self.is_also = kwargs['is_also']
|
||||
return self
|
||||
|
||||
def get_references(self) -> list[tuple[str, Document]]:
|
||||
return list(zip(["is also"]*len(self.is_also), self.is_also))
|
||||
|
||||
|
||||
class FYI(Document):
|
||||
def __init__(self,
|
||||
number: int,
|
||||
title: str = "",
|
||||
is_also: list[Document] = []):
|
||||
super().__init__(DocType.FYI, number, title, is_also)
|
||||
return
|
||||
|
||||
def update(self, **kwargs) -> Document:
|
||||
if 'title' in kwargs:
|
||||
self.title = kwargs['title']
|
||||
if 'is_also' in kwargs:
|
||||
self.is_also = kwargs['is_also']
|
||||
return self
|
||||
|
||||
def get_references(self) -> list[tuple[str, Document]]:
|
||||
return list(zip(["is also"]*len(self.is_also), self.is_also))
|
||||
|
||||
|
||||
class NIC(Document):
|
||||
def __init__(self,
|
||||
number: int):
|
||||
super().__init__(DocType.NIC, number)
|
||||
return
|
||||
|
||||
def update(self, **kwargs) -> Document:
|
||||
return self
|
||||
|
||||
def get_references(self) -> list[tuple[str, Document]]:
|
||||
return []
|
||||
|
||||
|
||||
class IEN(Document):
|
||||
def __init__(self,
|
||||
number: int):
|
||||
super().__init__(DocType.IEN, number)
|
||||
return
|
||||
|
||||
def update(self, **kwargs) -> Document:
|
||||
return self
|
||||
|
||||
def get_references(self) -> list[tuple[str, Document]]:
|
||||
return []
|
||||
|
||||
|
||||
class RTR(Document):
|
||||
def __init__(self,
|
||||
number: int):
|
||||
super().__init__(DocType.RTR, number)
|
||||
return
|
||||
|
||||
def update(self, **kwargs) -> Document:
|
||||
return self
|
||||
|
||||
def get_references(self) -> list[tuple[str, Document]]:
|
||||
return []
|
||||
|
||||
|
||||
class IndexParser:
|
||||
def __init__(self,
|
||||
xml: str,
|
||||
namespace: str = "http://www.rfc-editor.org/rfc-index"):
|
||||
def _get_reflist(container: Element | None) -> list[Document]:
|
||||
reflist: list[Document] = []
|
||||
if container is not None:
|
||||
for ref in container.findall(f"{{{namespace}}}doc-id"):
|
||||
ref_type: str = DocType[ref.text[:3]]
|
||||
ref_num: int = int(ref.text[3:])
|
||||
if ref_num not in self.index[ref_type]:
|
||||
if ref_type == DocType.RFC:
|
||||
self.index[DocType.RFC][ref_num] = RFC(ref_num)
|
||||
elif ref_type == DocType.STD:
|
||||
self.index[DocType.STD][ref_num] = STD(ref_num)
|
||||
elif ref_type == DocType.BCP:
|
||||
self.index[DocType.BCP][ref_num] = BCP(ref_num)
|
||||
elif ref_type == DocType.FYI:
|
||||
self.index[DocType.FYI][ref_num] = FYI(ref_num)
|
||||
elif ref_type == DocType.NIC:
|
||||
self.index[DocType.NIC][ref_num] = NIC(ref_num)
|
||||
elif ref_type == DocType.IEN:
|
||||
self.index[DocType.IEN][ref_num] = IEN(ref_num)
|
||||
else: # ref_type == DocType.RTR
|
||||
self.index[DocType.RTR][ref_num] = RTR(ref_num)
|
||||
reflist.append(self.index[ref_type][ref_num])
|
||||
return reflist
|
||||
|
||||
self.index: dict[DocType: dict[int, Document]] = {DocType.RFC: {},
|
||||
DocType.STD: {},
|
||||
DocType.BCP: {},
|
||||
DocType.FYI: {},
|
||||
DocType.NIC: {},
|
||||
DocType.IEN: {},
|
||||
DocType.RTR: {}}
|
||||
|
||||
root: Element = fromstring(xml)
|
||||
for child in root:
|
||||
if child.tag == f"{{{namespace}}}rfc-entry":
|
||||
docID: str = child.findtext(f"{{{namespace}}}doc-id")
|
||||
number: int = int(docID[3:])
|
||||
title: str = child.findtext(f"{{{namespace}}}title")
|
||||
authors: list[Author] = []
|
||||
for author in child.findall(f"{{{namespace}}}author"):
|
||||
name: str = author.findtext(f"{{{namespace}}}name")
|
||||
auth_title: str = author.findtext(f"{{{namespace}}}title", "")
|
||||
org: str = author.findtext(f"{{{namespace}}}organization", "")
|
||||
org_abbrev: str = author.findtext(f"{{{namespace}}}org-abbrev", "")
|
||||
authors.append(Author(name, auth_title, org, org_abbrev))
|
||||
tmp: Element | None = child.find(f"{{{namespace}}}date")
|
||||
pub_year: int = int(tmp.findtext(f"{{{namespace}}}year"))
|
||||
pub_month: int = Month[tmp.findtext(f"{{{namespace}}}month")].value
|
||||
pub_day: int = int(tmp.findtext(f"{{{namespace}}}day", "1"))
|
||||
pub_date: date = date(pub_year, pub_month, pub_day)
|
||||
format: list[FileFormat] = []
|
||||
tmp = child.find(f"{{{namespace}}}format")
|
||||
if tmp is not None:
|
||||
for file_format in tmp.findall(f"{{{namespace}}}file-format"):
|
||||
format.append(FileFormat[file_format.text])
|
||||
page_count: int = int(child.findtext(f"{{{namespace}}}page-count", "-1"))
|
||||
if page_count < 0:
|
||||
page_count = None
|
||||
keywords: list[str] = []
|
||||
tmp = child.find(f"{{{namespace}}}keywords")
|
||||
if tmp is not None:
|
||||
for kw in tmp.findall(f"{{{namespace}}}kw"):
|
||||
keywords.append(kw.text)
|
||||
abstract: list[str] = []
|
||||
tmp = child.find(f"{{{namespace}}}abstract")
|
||||
if tmp is not None:
|
||||
for p in tmp.findall(f"{{{namespace}}}p"):
|
||||
abstract.append(p.text)
|
||||
draft: str = child.findtext(f"{{{namespace}}}draft", "")
|
||||
notes: str = child.findtext(f"{{{namespace}}}notes", "")
|
||||
tmp = child.find(f"{{{namespace}}}obsoletes")
|
||||
obsoletes: list[Document] = _get_reflist(tmp)
|
||||
tmp = child.find(f"{{{namespace}}}obsoleted-by")
|
||||
obsoleted_by: list[Document] = _get_reflist(tmp)
|
||||
tmp = child.find(f"{{{namespace}}}updates")
|
||||
updates: list[Document] = _get_reflist(tmp)
|
||||
tmp = child.find(f"{{{namespace}}}updated-by")
|
||||
updated_by: list[Document] = _get_reflist(tmp)
|
||||
tmp = child.find(f"{{{namespace}}}is-also")
|
||||
is_also: list[Document] = _get_reflist(tmp)
|
||||
tmp = child.find(f"{{{namespace}}}see-also")
|
||||
see_also: list[Document] = _get_reflist(tmp)
|
||||
current_status: Status = Status[child.findtext(f"{{{namespace}}}current-status").replace(" ", "_")]
|
||||
pub_status: Status = Status[child.findtext(f"{{{namespace}}}publication-status").replace(" ", "_")]
|
||||
stream: Stream = None
|
||||
tmp = child.find(f"{{{namespace}}}stream")
|
||||
if tmp is not None:
|
||||
stream = Stream[tmp.text]
|
||||
area: str = child.findtext(f"{{{namespace}}}area", "")
|
||||
wg_acronym: str = child.findtext(f"{{{namespace}}}wg_acronym", "")
|
||||
errata_url: str = child.findtext(f"{{{namespace}}}errata-url", "")
|
||||
doi: str = child.findtext(f"{{{namespace}}}doi", "")
|
||||
if number in self.index[DocType.RFC]:
|
||||
self.index[DocType.RFC][number].update(title=title,
|
||||
authors=authors,
|
||||
pub_date=pub_date,
|
||||
current_status=current_status,
|
||||
pub_status=pub_status,
|
||||
format=format,
|
||||
page_count=page_count,
|
||||
keywords=keywords,
|
||||
abstract=abstract,
|
||||
draft=draft,
|
||||
notes=notes,
|
||||
obsoletes=obsoletes,
|
||||
obsoleted_by=obsoleted_by,
|
||||
updates=updates,
|
||||
updated_by=updated_by,
|
||||
is_also=is_also,
|
||||
see_also=see_also,
|
||||
stream=stream,
|
||||
area=area,
|
||||
wg_acronym=wg_acronym,
|
||||
errata_url=errata_url,
|
||||
doi=doi)
|
||||
else:
|
||||
self.index[DocType.RFC][number] = RFC(number,
|
||||
title,
|
||||
authors,
|
||||
pub_date,
|
||||
current_status,
|
||||
pub_status,
|
||||
format,
|
||||
page_count,
|
||||
keywords,
|
||||
abstract,
|
||||
draft,
|
||||
notes,
|
||||
obsoletes,
|
||||
obsoleted_by,
|
||||
updates,
|
||||
updated_by,
|
||||
is_also,
|
||||
see_also,
|
||||
stream,
|
||||
area,
|
||||
wg_acronym,
|
||||
errata_url,
|
||||
doi)
|
||||
continue
|
||||
elif child.tag == f"{{{namespace}}}rfc-not-issued-entry":
|
||||
docID: str = child.findtext(f"{{{namespace}}}doc-id")
|
||||
number: int = int(docID[3:])
|
||||
if number not in self.index[DocType.RFC]:
|
||||
self.index[DocType.RFC][number] = NotIssued(number)
|
||||
continue
|
||||
elif child.tag == f"{{{namespace}}}std-entry":
|
||||
docID: str = child.findtext(f"{{{namespace}}}doc-id")
|
||||
number: int = int(docID[3:])
|
||||
title: str = child.findtext(f"{{{namespace}}}title")
|
||||
alias: Element = child.find(f"{{{namespace}}}is-also")
|
||||
is_also: list[Document] = _get_reflist(alias)
|
||||
if number in self.index[DocType.STD]:
|
||||
self.index[DocType.STD][number].update(title=title, is_also=is_also)
|
||||
else:
|
||||
self.index[DocType.STD][number] = STD(number, title, is_also)
|
||||
continue
|
||||
elif child.tag == f"{{{namespace}}}bcp-entry":
|
||||
docID: str = child.findtext(f"{{{namespace}}}doc-id")
|
||||
number: int = int(docID[3:])
|
||||
title: str = child.findtext(f"{{{namespace}}}title", "")
|
||||
alias: Element = child.find(f"{{{namespace}}}is-also")
|
||||
is_also: list[Document] = _get_reflist(alias)
|
||||
if number in self.index[DocType.BCP]:
|
||||
self.index[DocType.BCP][number].update(title=title, is_also=is_also)
|
||||
else:
|
||||
self.index[DocType.BCP][number] = BCP(number, title, is_also)
|
||||
continue
|
||||
elif child.tag == f"{{{namespace}}}fyi-entry":
|
||||
docID: str = child.findtext(f"{{{namespace}}}doc-id")
|
||||
number: int = int(docID[3:])
|
||||
title: str = child.findtext(f"{{{namespace}}}title", "")
|
||||
alias: Element = child.find(f"{{{namespace}}}is-also")
|
||||
is_also: list[Document] = _get_reflist(alias)
|
||||
if number in self.index[DocType.FYI]:
|
||||
self.index[DocType.FYI][number].update(title=title, is_also=is_also)
|
||||
else:
|
||||
self.index[DocType.FYI][number] = FYI(number, title, is_also)
|
||||
continue
|
||||
return
|
||||
|
||||
def get_index(self) -> dict[DocType: dict[int, Document]]:
|
||||
return self.index
|
172
rfcartography/rfcartographer.py
Normal file
172
rfcartography/rfcartographer.py
Normal file
@ -0,0 +1,172 @@
|
||||
from networkx import MultiDiGraph, kamada_kawai_layout, \
|
||||
draw_networkx_nodes, draw_networkx_edges
|
||||
from matplotlib.pyplot import figure, close
|
||||
from matplotlib.figure import Figure
|
||||
from matplotlib.layout_engine import TightLayoutEngine
|
||||
from io import StringIO
|
||||
from math import sqrt
|
||||
from rfcartography.index_parser import Document, DocType
|
||||
|
||||
|
||||
class RFCMap:
|
||||
def __init__(self,
|
||||
graph: MultiDiGraph,
|
||||
nodes: dict[DocType, list[Document]],
|
||||
edges: dict[str, tuple[Document, Document]],
|
||||
url_base: str,
|
||||
node_color: dict[DocType, str] = {DocType.RFC: '#2072b1',
|
||||
DocType.STD: '#c21a7e',
|
||||
DocType.BCP: '#6d388d',
|
||||
DocType.FYI: '#8bbd3e',
|
||||
DocType.NIC: '#efe50b',
|
||||
DocType.IEN: '#f28f20',
|
||||
DocType.RTR: '#e32326'},
|
||||
edge_style: dict[str, tuple[str, str]]= {'obsoletes': ('dashed', '#607d8d'),
|
||||
'obsoleted by': ('dashed', '#303c50'),
|
||||
'updates': ('dashdot', '#607d8d'),
|
||||
'updated by': ('dashdot', '#303c50'),
|
||||
'is also': ('solid', '#132e41'),
|
||||
'see also': ('dotted', '#008e90')}):
|
||||
self.graph: MultiDiGraph = graph
|
||||
self.nodes: dict[DocType, list[Document]] = nodes
|
||||
self.edges: dict[str, tuple[Document, Document]] = edges
|
||||
self.node_color: dict[DocType, str] = node_color
|
||||
self.edge_style: dict[str, tuple[str, str]] = edge_style
|
||||
self.url_base: str = url_base
|
||||
self.position: dict = kamada_kawai_layout(self.graph)
|
||||
return
|
||||
|
||||
def set_node_color(self,
|
||||
doctype: DocType,
|
||||
color: str) -> None:
|
||||
self.node_color[doctype] = color
|
||||
return
|
||||
|
||||
def set_edge_style(self,
|
||||
reftype: str,
|
||||
style: tuple[str, str]) -> None:
|
||||
self.edge_style[reftype] = style
|
||||
return
|
||||
|
||||
def set_url_base(self,
|
||||
url_base: str) -> None:
|
||||
self.url_base = url_base
|
||||
return
|
||||
|
||||
def get_node_colors(self) -> dict[DocType, str]:
|
||||
return self.node_color
|
||||
|
||||
def get_edge_styles(self) -> dict[str, tuple[str, str]]:
|
||||
return self.edge_style
|
||||
|
||||
def get_url_base(self) -> str:
|
||||
return self.url_base
|
||||
|
||||
def get_node_count(self) -> int:
|
||||
return self.graph.number_of_nodes()
|
||||
|
||||
def get_edge_count(self) -> int:
|
||||
return self.graph.size()
|
||||
|
||||
def draw(self) -> str:
|
||||
nodes: list[Document] = []
|
||||
node_colors: list[str] = []
|
||||
urls: list[str] = []
|
||||
|
||||
for doctype in self.nodes:
|
||||
nodes = nodes + self.nodes[doctype]
|
||||
node_colors = node_colors + [self.node_color[doctype]]*len(self.nodes[doctype])
|
||||
for node in self.nodes[doctype]:
|
||||
urls.append(f"{self.url_base}{node.docID()}")
|
||||
|
||||
edge_rad: dict[float] = {'obsoletes': 0.1 if len(self.edges['obsoleted by']) > 0 else 0,
|
||||
'obsoleted by': 0.1 if len(self.edges['obsoletes']) > 0 else 0,
|
||||
'updates': 0.1 if len(self.edges['updated by']) > 0 else 0,
|
||||
'updated by': 0.1 if len(self.edges['updates']) > 0 else 0,
|
||||
'is also': 0,
|
||||
'see also': 0}
|
||||
size: float = sqrt(len(nodes))
|
||||
fig: Figure = figure(figsize=(size, size), layout=TightLayoutEngine(pad=0.2))
|
||||
fig.clear()
|
||||
|
||||
draw_networkx_nodes(self.graph, self.position,
|
||||
nodelist=nodes,
|
||||
node_size=128,
|
||||
node_color=node_colors,
|
||||
node_shape="o").set_urls(urls)
|
||||
|
||||
for reftype in self.edges:
|
||||
draw_networkx_edges(self.graph, self.position,
|
||||
edgelist=self.edges[reftype],
|
||||
connectionstyle=f"arc3,rad={edge_rad[reftype]}",
|
||||
style=self.edge_style[reftype][0],
|
||||
edge_color=self.edge_style[reftype][1])
|
||||
|
||||
svg: StringIO = StringIO()
|
||||
fig.savefig(svg, format='svg')
|
||||
svg.seek(0)
|
||||
close(fig)
|
||||
return svg.read().removeprefix('<?xml version="1.0" encoding="utf-8" standalone="no"?>\n<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"\n "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
|
||||
|
||||
|
||||
class RFCartographer:
|
||||
def __init__(self,
|
||||
index: dict[DocType: dict[int, Document]]):
|
||||
self.index: dict[DocType: dict[int, Document]] = index
|
||||
return
|
||||
|
||||
def get_document(self,
|
||||
doctype: DocType,
|
||||
number: int) -> Document | None:
|
||||
return self.index[doctype].get(number, None)
|
||||
|
||||
def map_subnet(self,
|
||||
core: Document,
|
||||
url: str,
|
||||
max_depth: int = 0,
|
||||
node_color: dict[DocType, str] = None,
|
||||
edge_style: dict[str, tuple[str, str]] = None,
|
||||
node_types: list[DocType] = []) -> RFCMap:
|
||||
"""generate a map for the subnet core belongs to"""
|
||||
if node_types == []:
|
||||
node_types = [DocType.RFC, DocType.STD, DocType.BCP,
|
||||
DocType.FYI, DocType.NIC, DocType.IEN, DocType.RTR]
|
||||
nodes: dict = {DocType.RFC: [],
|
||||
DocType.STD: [],
|
||||
DocType.BCP: [],
|
||||
DocType.FYI: [],
|
||||
DocType.NIC: [],
|
||||
DocType.IEN: [],
|
||||
DocType.RTR: []}
|
||||
edges: dict = {'obsoletes': [],
|
||||
'obsoleted by': [],
|
||||
'updates': [],
|
||||
'updated by': [],
|
||||
'is also': [],
|
||||
'see also': []}
|
||||
params: dict[str, dict] = {}
|
||||
if node_color is not None:
|
||||
params['node_color'] = node_color
|
||||
if edge_style is not None:
|
||||
params['edge_style'] = edge_style
|
||||
todo: list[tuple[Document, int]] = [(core, 0)]
|
||||
done: list[Document] = []
|
||||
graph: MultiDiGraph = MultiDiGraph()
|
||||
graph.add_node(core)
|
||||
nodes[core.type].append(core)
|
||||
|
||||
while len(todo) > 0:
|
||||
node: tuple[Document, int] = todo.pop(0)
|
||||
if node[0] not in done:
|
||||
done.append(node[0])
|
||||
if node[1] < max_depth or max_depth <= 0:
|
||||
for neighbor in node[0].get_references():
|
||||
if not neighbor[1].type in node_types:
|
||||
continue
|
||||
if not graph.has_node(neighbor[1]):
|
||||
graph.add_node(neighbor[1])
|
||||
nodes[neighbor[1].type].append(neighbor[1])
|
||||
graph.add_edge(node[0], neighbor[1], reftype=neighbor[0])
|
||||
edges[neighbor[0]].append((node[0], neighbor[1]))
|
||||
todo.append((neighbor[1], node[1]+1))
|
||||
return RFCMap(graph, nodes, edges, url, **params)
|
35
rfcartography/routing.py
Normal file
35
rfcartography/routing.py
Normal file
@ -0,0 +1,35 @@
|
||||
from flask import Flask, Blueprint, redirect, url_for, render_template, abort
|
||||
from werkzeug.wrappers import Response
|
||||
from rfcartography.search import search
|
||||
from rfcartography.details import details
|
||||
|
||||
|
||||
def register_routers(app: Flask) -> None:
|
||||
@app.route('/imprint')
|
||||
def imprint() -> tuple[str, int]:
|
||||
if not 'IMPRINT' in app.config:
|
||||
abort(404)
|
||||
content: dict = {'title': 'Imprint',
|
||||
'content': app.config['IMPRINT']}
|
||||
return render_template('generic.html', **content), 200
|
||||
|
||||
@app.route('/privacy')
|
||||
def privacy() -> tuple[str, int]:
|
||||
if not 'PRIVACY' in app.config:
|
||||
abort(404)
|
||||
content: dict = {'title': 'Privacy',
|
||||
'content': app.config['PRIVACY']}
|
||||
return render_template('generic.html', **content), 200
|
||||
|
||||
# answer favicon requests
|
||||
@app.route('/favicon.ico')
|
||||
def favicon() -> Response:
|
||||
return redirect(url_for('static', filename='favicon.svg'))
|
||||
|
||||
static = Blueprint('static',
|
||||
__name__,
|
||||
static_folder='static')
|
||||
app.register_blueprint(static)
|
||||
app.register_blueprint(search)
|
||||
app.register_blueprint(details)
|
||||
return
|
119
rfcartography/search.py
Normal file
119
rfcartography/search.py
Normal file
@ -0,0 +1,119 @@
|
||||
from flask import Blueprint, abort, request, current_app, render_template
|
||||
from rfcartography.index_parser import DocType, Document
|
||||
|
||||
|
||||
def validate_type(user_input: str | None) -> DocType:
|
||||
"""check if the given input is a DocType
|
||||
return the DocType if it is valid
|
||||
abort with HTTP Status 400 if it isn't valid"""
|
||||
if user_input is None:
|
||||
abort(400)
|
||||
try:
|
||||
doctype: DocType = DocType[user_input.upper()]
|
||||
except KeyError:
|
||||
abort(400)
|
||||
return doctype
|
||||
|
||||
def validate_int(user_input: str | None) -> int:
|
||||
"""check if the given input is a integer
|
||||
return the int if it is valid
|
||||
abort with HTTP Status 400 if it isn't valid"""
|
||||
if user_input is None:
|
||||
abort(400)
|
||||
try:
|
||||
i: int = int(user_input)
|
||||
except ValueError:
|
||||
abort(400)
|
||||
return i
|
||||
|
||||
def validate_color(user_input: str) -> str:
|
||||
"""check if the given user input is a valid color
|
||||
return the string if it is valid
|
||||
abort with HTTP Status 400 if it isn't valid"""
|
||||
if user_input[0] != '#' or len(user_input) != 7:
|
||||
abort(400)
|
||||
for i in range(1,7):
|
||||
if user_input[i] not in '0123456789abcdefABCDEF':
|
||||
abort(400)
|
||||
return user_input
|
||||
|
||||
def validate_linestyle(user_input: str) -> str:
|
||||
"""check if the given user input is a valid linestyle
|
||||
return the string if it is valid
|
||||
abort with HTTP Status 400 if it isn't valid"""
|
||||
if user_input in ['solid', 'dashed', 'dashdot', 'dotted', 'none']:
|
||||
return user_input
|
||||
else:
|
||||
abort(400)
|
||||
|
||||
search: Blueprint = Blueprint('search', __name__)
|
||||
|
||||
@search.route('/', methods=['GET'])
|
||||
def provide_searchform() -> tuple[str, int]:
|
||||
"""handle requests for the search form"""
|
||||
return render_template('search.html'), 200
|
||||
|
||||
@search.route('/map', methods= ['GET'])
|
||||
def handle_search_request() -> tuple[str, int]:
|
||||
"""handle search requests"""
|
||||
params: dict = {}
|
||||
|
||||
doctype: DocType = validate_type(request.args.get('type', None))
|
||||
num: int = validate_int(request.args.get('num', None))
|
||||
depth: int = validate_int(request.args.get('depth', current_app.config['DEPTH_DEFAULT']))
|
||||
nodes: list[DocType] = request.args.getlist('nodes_enabled', validate_type)
|
||||
if nodes == []:
|
||||
nodes = [DocType.RFC, DocType.STD, DocType.BCP, DocType.FYI,
|
||||
DocType.NIC, DocType.IEN, DocType.RTR]
|
||||
params['node_types'] = nodes
|
||||
|
||||
node_colors: dict[DocType, str | None] = {DocType.RFC: request.args.get('rfc_color', None),
|
||||
DocType.STD: request.args.get('std_color', None),
|
||||
DocType.BCP: request.args.get('bcp_color', None),
|
||||
DocType.FYI: request.args.get('fyi_color', None),
|
||||
DocType.NIC: request.args.get('nic_color', None),
|
||||
DocType.IEN: request.args.get('ien_color', None),
|
||||
DocType.RTR: request.args.get('rtr_color', None)}
|
||||
if not all(color is None for color in node_colors.values()):
|
||||
for nodetype in node_colors:
|
||||
node_colors[nodetype] = validate_color(node_colors[nodetype])
|
||||
params['node_color'] = node_colors
|
||||
|
||||
edge_style: dict[str, tuple[str, str]] = \
|
||||
{'obsoletes': (request.args.get('obsoletes_style', None),
|
||||
request.args.get('obsoletes_color', None)),
|
||||
'obsoleted by': (request.args.get('obsoleted_by_style', None),
|
||||
request.args.get('obsoleted_by_color', None)),
|
||||
'updates': (request.args.get('updates_style', None),
|
||||
request.args.get('updates_color', None)),
|
||||
'updated by': (request.args.get('updated_by_style', None),
|
||||
request.args.get('updated_by_color', None)),
|
||||
'is also': (request.args.get('is_also_style', None),
|
||||
request.args.get('is_also_color', None)),
|
||||
'see also': (request.args.get('see_also_style', None),
|
||||
request.args.get('see_also_color', None))}
|
||||
if not all(arg is None for edge_type in edge_style.values() for arg in edge_type):
|
||||
for edge_type in edge_style:
|
||||
edge_style[edge_type] = (validate_linestyle(edge_style[edge_type][0]),
|
||||
validate_color(edge_style[edge_type][1]))
|
||||
params['edge_style'] = edge_style
|
||||
|
||||
url: str = "http://" + current_app.config['SERVER_NAME']
|
||||
if url[-1] != '/':
|
||||
url = url + '/'
|
||||
|
||||
doc: Document = current_app.cartographer.get_document(doctype, num)
|
||||
if doc is None:
|
||||
abort(404)
|
||||
else:
|
||||
rfc_map: RFCMap = current_app.cartographer.map_subnet(doc, url, depth, **params)
|
||||
content: dict = {'core_node_id': doc.docID(),
|
||||
'map': rfc_map.draw(),
|
||||
'nodes': nodes,
|
||||
'node_colors': rfc_map.get_node_colors(),
|
||||
'edge_style': rfc_map.get_edge_styles()}
|
||||
if not doctype in nodes:
|
||||
content['nodes'].append(doctype)
|
||||
return render_template('map.html', **content), 200
|
||||
|
||||
|
47
rfcartography/static/css/map.css
Normal file
47
rfcartography/static/css/map.css
Normal file
@ -0,0 +1,47 @@
|
||||
main h1 + svg {
|
||||
width: 70%;
|
||||
height: auto;
|
||||
}
|
||||
|
||||
main h1 + svg + div {
|
||||
width: 30%;
|
||||
float: right;
|
||||
padding: 1rem;
|
||||
}
|
||||
|
||||
main div table {
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
main div table tr td:first-of-type {
|
||||
height: 2rem;
|
||||
width: 3rem;
|
||||
padding-right: 1rem;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
main div table tr td:nth-of-type(2) {
|
||||
height: 2rem;
|
||||
}
|
||||
|
||||
.node {
|
||||
width: 1.5rem;
|
||||
height: auto;
|
||||
}
|
||||
|
||||
.edge {
|
||||
width: 3rem;
|
||||
height: auto;
|
||||
vertical-align: text-top;
|
||||
}
|
||||
|
||||
@media (max-width: 860px) {
|
||||
main h1 + svg {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
main h1 + svg + div {
|
||||
width: 100%;
|
||||
float: none;
|
||||
}
|
||||
}
|
48
rfcartography/static/css/reset.css
Normal file
48
rfcartography/static/css/reset.css
Normal file
@ -0,0 +1,48 @@
|
||||
/* http://meyerweb.com/eric/tools/css/reset/
|
||||
v2.0 | 20110126
|
||||
License: none (public domain)
|
||||
*/
|
||||
|
||||
html, body, div, span, applet, object, iframe,
|
||||
h1, h2, h3, h4, h5, h6, p, blockquote, pre,
|
||||
a, abbr, acronym, address, big, cite, code,
|
||||
del, dfn, em, img, ins, kbd, q, s, samp,
|
||||
small, strike, strong, sub, sup, tt, var,
|
||||
b, u, i, center,
|
||||
dl, dt, dd, ol, ul, li,
|
||||
fieldset, form, label, legend,
|
||||
table, caption, tbody, tfoot, thead, tr, th, td,
|
||||
article, aside, canvas, details, embed,
|
||||
figure, figcaption, footer, header, hgroup,
|
||||
menu, nav, output, ruby, section, summary,
|
||||
time, mark, audio, video {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
border: 0;
|
||||
font-size: 100%;
|
||||
font: inherit;
|
||||
vertical-align: baseline;
|
||||
}
|
||||
/* HTML5 display-role reset for older browsers */
|
||||
article, aside, details, figcaption, figure,
|
||||
footer, header, hgroup, menu, nav, section {
|
||||
display: block;
|
||||
}
|
||||
body {
|
||||
line-height: 1;
|
||||
}
|
||||
ol, ul {
|
||||
list-style: none;
|
||||
}
|
||||
blockquote, q {
|
||||
quotes: none;
|
||||
}
|
||||
blockquote:before, blockquote:after,
|
||||
q:before, q:after {
|
||||
content: '';
|
||||
content: none;
|
||||
}
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
border-spacing: 0;
|
||||
}
|
290
rfcartography/static/css/rfcartography.css
Normal file
290
rfcartography/static/css/rfcartography.css
Normal file
@ -0,0 +1,290 @@
|
||||
@font-face {
|
||||
font-family: 'Courier Prime';
|
||||
font-style: normal;
|
||||
font-weight: normal;
|
||||
font-stretch: normal;
|
||||
font-display: swap;
|
||||
src: url('/static/fonts/CourierPrime-Regular.ttf') format('truetype'),
|
||||
local('Courier Prime');
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: 'Courier Prime';
|
||||
font-style: normal;
|
||||
font-weight: bold;
|
||||
font-stretch: normal;
|
||||
font-display: swap;
|
||||
src: url('/static/fonts/CourierPrime-Bold.ttf') format('truetype'),
|
||||
local('Courier Prime');
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: 'Courier Prime';
|
||||
font-style: italic;
|
||||
font-weight: normal;
|
||||
font-stretch: normal;
|
||||
font-display: swap;
|
||||
src: url('/static/fonts/CourierPrime-Italic.ttf') format('truetype'),
|
||||
local('Courier Prime');
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: 'Courier Prime';
|
||||
font-style: italic;
|
||||
font-weight: bold;
|
||||
font-stretch: normal;
|
||||
font-display: swap;
|
||||
src: url('/static/fonts/CourierPrime-BoldItalic.ttf') format('truetype'),
|
||||
local('Courier Prime');
|
||||
}
|
||||
|
||||
:root {
|
||||
--background-body: #FAFAFA;
|
||||
--background-item: #FFFFFF;
|
||||
--accent-blue: #2072b1;
|
||||
--accent-red: #c21a7e;
|
||||
--text: #000000;
|
||||
--gradient: linear-gradient(45deg, #2072b1 10%, #c21a7e 90%);
|
||||
--body-width: 80vw;
|
||||
}
|
||||
|
||||
@media (max-width: 860px) {
|
||||
:root {
|
||||
--body-width: 100vw;
|
||||
}
|
||||
}
|
||||
|
||||
/* consider padding and borders for width and height */
|
||||
*,
|
||||
*::before,
|
||||
*::after {
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
@media (prefers-reduced-motion: no-preference) {
|
||||
html {
|
||||
scroll-behavior: smooth;
|
||||
}
|
||||
}
|
||||
|
||||
html {
|
||||
font-size: 1rem;
|
||||
}
|
||||
|
||||
body {
|
||||
font-family: 'Courier Prime', monospace;
|
||||
line-height: 1.8;
|
||||
max-width: var(--body-width);
|
||||
min-height: 100vh;
|
||||
overflow-x: hidden;
|
||||
background-color: var(--background-body);
|
||||
text-rendering: optimizeLegibility;
|
||||
margin: auto;
|
||||