1
0
Fork 0
mirror of https://github.com/ganelson/inform.git synced 2024-06-26 04:00:43 +03:00

Turned I7 extension IDs into inbuild works

This commit is contained in:
Graham Nelson 2020-02-01 23:28:37 +00:00
parent 5faf2f8557
commit 70280f6be0
24 changed files with 655 additions and 622 deletions

View file

@ -43,7 +43,7 @@ int main(int argc, char **argv) {
text_stream *T;
LOOP_OVER_LINKED_LIST(T, text_stream, find_list) {
linked_list *L = NEW_LINKED_LIST(inbuild_search_result);
inbuild_work *work = Model::work(kit_genre, T, I"");
inbuild_work *work = Works::new(kit_genre, T, I"");
inbuild_requirement *req = Model::requirement(work,
VersionNumbers::null(), VersionNumbers::null());
Nests::locate(req, nest_list, L);
@ -139,7 +139,7 @@ void Main::option(int id, int val, text_stream *arg, void *state) {
}
void Main::bareword(int id, text_stream *arg, void *state) {
Main::load_one(arg);
Main::load_one(arg, TRUE);
}
void Main::load_many(pathname *P) {
@ -148,38 +148,18 @@ void Main::load_many(pathname *P) {
while (Directories::next(D, LEAFNAME)) {
TEMPORARY_TEXT(FILENAME);
WRITE_TO(FILENAME, "%p%c%S", P, FOLDER_SEPARATOR, LEAFNAME);
Main::load_one(FILENAME);
Main::load_one(FILENAME, FALSE);
DISCARD_TEXT(FILENAME);
}
DISCARD_TEXT(LEAFNAME);
Directories::close(D);
}
void Main::load_one(text_stream *arg) {
int pos = Str::len(arg) - 1, dotpos = -1;
while (pos >= 0) {
wchar_t c = Str::get_at(arg, pos);
if (c == FOLDER_SEPARATOR) break;
if (c == '.') dotpos = pos;
pos--;
}
if (dotpos >= 0) {
TEMPORARY_TEXT(extension);
Str::substr(extension, Str::at(arg, dotpos+1), Str::end(arg));
if (Str::eq(extension, I"i7x")) {
;
}
DISCARD_TEXT(extension);
void Main::load_one(text_stream *arg, int throwing_error) {
inbuild_copy *C = Model::claim(arg);
if (C == NULL) {
if (throwing_error) Errors::with_text("unable to identify '%S'", arg);
return;
}
if (Str::get_last_char(arg) == FOLDER_SEPARATOR)
Str::delete_last_character(arg);
int kitpos = Str::len(arg) - 3;
if ((kitpos >= 0) && (Str::get_at(arg, kitpos) == 'K') &&
(Str::get_at(arg, kitpos+1) == 'i') &&
(Str::get_at(arg, kitpos+2) == 't')) {
pathname *P = Pathnames::from_text(arg);
inform_kit *K = Kits::load_at(Pathnames::directory_name(P), P);
ADD_TO_LINKED_LIST(K->as_copy, inbuild_copy, targets);
}
ADD_TO_LINKED_LIST(C, inbuild_copy, targets);
}

View file

@ -22,6 +22,7 @@ Setting up the use of this module.
@e build_step_MT
@e inbuild_nest_MT
@e inbuild_search_result_MT
@e inbuild_work_database_entry_array_MT
=
ALLOCATE_INDIVIDUALLY(inform_kit)
@ -39,6 +40,8 @@ ALLOCATE_INDIVIDUALLY(build_step)
ALLOCATE_INDIVIDUALLY(inbuild_nest)
ALLOCATE_INDIVIDUALLY(inbuild_search_result)
ALLOCATE_IN_ARRAYS(inbuild_work_database_entry, 100)
@h The beginning.
=
@ -49,6 +52,7 @@ void InbuildModule::start(void) {
@<Register this module's debugging log writers@>;
@<Register this module's command line switches@>;
Kits::start();
Extensions::start();
}
@
@ -57,7 +61,7 @@ void InbuildModule::start(void) {
;
@<Register this module's stream writers@> =
;
Writers::register_writer('X', &Works::writer);
@

View file

@ -33,31 +33,6 @@ inbuild_genre *Model::genre(text_stream *name) {
return gen;
}
@h Works.
A "work" is a single creative work; for example, Bronze by Emily Short might
be a work. Mamy versions of this IF story may exist over time, but they will
all be versions of the same "work".
=
typedef struct inbuild_work {
struct inbuild_genre *genre;
struct text_stream *name;
struct text_stream *author;
MEMORY_MANAGEMENT
} inbuild_work;
inbuild_work *Model::work(inbuild_genre *genre, text_stream *name, text_stream *author) {
inbuild_work *work = CREATE(inbuild_work);
work->genre = genre;
work->name = Str::duplicate(name);
work->author = Str::duplicate(author);
return work;
}
void Model::write_work(OUTPUT_STREAM, inbuild_work *work) {
VMETHOD_CALL(work->genre, GENRE_WRITE_WORK_MTID, OUT, work);
}
@h Editions.
An "edition" of a work is a particular version numbered form of it. For
example, release 7 of Bronze by Emily Short would be an edition of Bronze.
@ -134,9 +109,31 @@ inbuild_copy *Model::copy_in_directory(inbuild_edition *edition, pathname *P, ge
}
void Model::write_copy(OUTPUT_STREAM, inbuild_copy *C) {
Model::write_work(OUT, C->edition->work);
Works::write(OUT, C->edition->work);
inbuild_version_number N = C->edition->version;
if (VersionNumbers::is_null(N) == FALSE) {
WRITE(" v"); VersionNumbers::to_text(OUT, N);
}
}
inbuild_copy *Model::claim(text_stream *arg) {
TEMPORARY_TEXT(ext);
int pos = Str::len(arg) - 1, dotpos = -1;
while (pos >= 0) {
wchar_t c = Str::get_at(arg, pos);
if (c == FOLDER_SEPARATOR) break;
if (c == '.') dotpos = pos;
pos--;
}
if (dotpos >= 0)
Str::substr(ext, Str::at(arg, dotpos+1), Str::end(arg));
int directory_status = NOT_APPLICABLE;
if (Str::get_last_char(arg) == FOLDER_SEPARATOR) {
Str::delete_last_character(arg);
directory_status = TRUE;
}
inbuild_copy *C = NULL;
if (C == NULL) C = Kits::claim(arg, ext, directory_status);
DISCARD_TEXT(ext);
return C;
}

View file

@ -20,9 +20,12 @@ Instead they are used to represent the absence of a version number.
=
inbuild_version_number VersionNumbers::null(void) {
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wconditional-uninitialized"
inbuild_version_number V;
for (int i=0; i<VERSION_NUMBER_DEPTH; i++) V.version_numbers[i] = -1;
return V;
#pragma clang diagnostic pop
}
int VersionNumbers::is_null(inbuild_version_number V) {

View file

@ -0,0 +1,451 @@
[Works::] Works.
To store, hash code and compare title/author pairs used to identify works.
@h Works.
A "work" is a single artistic or programming creation; for example, the IF
story Bronze by Emily Short might be a work. Mamy versions of this IF story
may exist over time, but they will all be versions of the same "work".
Extensions are also works: for example, Epistemology by Eric Eve is a work.
Works are identified by the pair of title and author name, each of which is an
ISO Latin-1 string limited in length, with certain bad-news characters
excluded (such as |/| and |:|) so that they can be used directly in filenames.
However, we will not want to compare these by string comparison: so we
hash-code the combination for speed. The following structure holds a
combination of the textual names and the hash code:
=
typedef struct inbuild_work {
struct inbuild_genre *genre;
struct text_stream *author_name;
struct text_stream *raw_author_name;
struct text_stream *title;
struct text_stream *raw_title;
int inbuild_work_hash_code; /* hash code derived from the above */
MEMORY_MANAGEMENT
} inbuild_work;
@ Each work structure is written only once, and its title and author name are
not subsequently altered. We therefore hash-code on arrival. As when
hashing vocabulary, we apply the X 30011 algorithm, this time with 499
(coprime to 30011) as base, to the text of the Unix-style pathname
|Author/Title|.
Though it is probably the case that the author name and title supplied are
already of normalised casing, we do not want to rely on that. Works intending
to represent (e.g.) the same extension but named with different casing
conventions would fail to match: and this could happen if a new build of
Inform were published which made a subtle change to the casing conventions,
but which continued to use an extension dictionary file first written by
previous builds under the previous conventions.
The hash code is an integer between 0 and the following constant minus 1,
derived from its title and author name.
@d WORK_HASH_CODING_BASE 499
=
inbuild_work *Works::new(inbuild_genre *genre, text_stream *ti, text_stream *an) {
inbuild_work *work = CREATE(inbuild_work);
work->genre = genre;
work->raw_author_name = Str::duplicate(an);
work->author_name = Str::duplicate(an);
work->raw_title = Str::duplicate(ti);
work->title = Str::duplicate(ti);
Works::normalise_casing(work->author_name);
Works::normalise_casing(work->title);
unsigned int hc = 0;
LOOP_THROUGH_TEXT(pos, work->author_name)
hc = hc*30011 + (unsigned int) Str::get(pos);
hc = hc*30011 + (unsigned int) '/';
LOOP_THROUGH_TEXT(pos, work->title)
hc = hc*30011 + (unsigned int) Str::get(pos);
hc = hc % WORK_HASH_CODING_BASE;
work->inbuild_work_hash_code = (int) hc;
return work;
}
void Works::set_raw(inbuild_work *work, text_stream *raw_an, text_stream *raw_ti) {
work->raw_author_name = Str::duplicate(raw_an);
work->raw_title = Str::duplicate(raw_ti);
}
void Works::write(OUTPUT_STREAM, inbuild_work *work) {
VMETHOD_CALL(work->genre, GENRE_WRITE_WORK_MTID, OUT, work);
}
void Works::write_to_HTML_file(OUTPUT_STREAM, inbuild_work *work, int fancy) {
WRITE("%S", work->raw_title);
if (fancy) HTML::begin_colour(OUT, I"404040");
WRITE(" by ");
if (fancy) HTML::end_colour(OUT);
WRITE("%S", work->raw_author_name);
}
void Works::write_link_to_HTML_file(OUTPUT_STREAM, inbuild_work *work) {
HTML_OPEN_WITH("a", "href='Extensions/%S/%S.html' style=\"text-decoration: none\"",
work->author_name, work->title);
HTML::begin_colour(OUT, I"404040");
if (Works::is_standard_rules(work)) WRITE("%S", work->title);
else Works::write_to_HTML_file(OUT, work, FALSE);
HTML::end_colour(OUT);
HTML_CLOSE("a");
}
void Works::writer(OUTPUT_STREAM, char *format_string, void *vE) {
inbuild_work *work = (inbuild_work *) vE;
switch (format_string[0]) {
case '<':
if (work == NULL) WRITE("source text");
else {
WRITE("%S", work->raw_title);
if (Works::is_standard_rules(work) == FALSE)
WRITE(" by %S", work->raw_author_name);
}
break;
case 'X':
if (work == NULL) WRITE("<no extension>");
else WRITE("%S by %S", work->raw_title, work->raw_author_name);
break;
default:
internal_error("bad %X extension");
}
}
@ Two works with different hash codes definitely identify different extensions;
if the code is the same, we must use |strcmp| on the actual title and author
name. This is in effect case insensitive, since we normalised casing when
the works were created.
(Note that this is not a lexicographic function suitable for sorting
works into alphabetical order: it cannot be, since the hash code is not
order-preserving. To emphasise this we return true or false rather than a
|strcmp|-style delta value. For |Works::compare|, see below...)
=
int Works::match(inbuild_work *eid1, inbuild_work *eid2) {
if ((eid1 == NULL) || (eid2 == NULL)) internal_error("bad work match");
if (eid1->inbuild_work_hash_code != eid2->inbuild_work_hash_code) return FALSE;
if (Str::eq(eid1->author_name, eid2->author_name) == FALSE) return FALSE;
if (Str::eq(eid1->title, eid2->title) == FALSE) return FALSE;
return TRUE;
}
@ These are quite a deal slower, but trichotomous.
=
int Works::compare(inbuild_work *eid1, inbuild_work *eid2) {
if ((eid1 == NULL) || (eid2 == NULL)) internal_error("bad work match");
int d = Str::cmp(eid1->author_name, eid2->author_name);
if (d != 0) return d;
return Str::cmp(eid1->title, eid2->title);
}
int Works::compare_by_title(inbuild_work *eid1, inbuild_work *eid2) {
if ((eid1 == NULL) || (eid2 == NULL)) internal_error("bad work match");
int d = Str::cmp(eid1->title, eid2->title);
if (d != 0) return d;
return Str::cmp(eid1->author_name, eid2->author_name);
}
int Works::compare_by_date(inbuild_work *eid1, inbuild_work *eid2) {
if ((eid1 == NULL) || (eid2 == NULL)) internal_error("bad work match");
int d = Str::cmp(Works::get_sort_date(eid2), Works::get_sort_date(eid1));
if (d != 0) return d;
d = Str::cmp(eid1->title, eid2->title);
if (d != 0) return d;
return Str::cmp(eid1->author_name, eid2->author_name);
}
int Works::compare_by_length(inbuild_work *eid1, inbuild_work *eid2) {
if ((eid1 == NULL) || (eid2 == NULL)) internal_error("bad work match");
int d = Str::cmp(Works::get_sort_word_count(eid2), Works::get_sort_word_count(eid1));
if (d != 0) return d;
d = Str::cmp(eid1->title, eid2->title);
if (d != 0) return d;
return Str::cmp(eid1->author_name, eid2->author_name);
}
@ Because the Standard Rules are treated slightly differently by the
documentation, and so forth, it's convenient to provide a single function
testing if a work refers to them.
=
inbuild_work *a_work_for_standard_rules = NULL;
int Works::is_standard_rules(inbuild_work *work) {
if (a_work_for_standard_rules == NULL) {
a_work_for_standard_rules =
Works::new(extension_genre, I"Standard Rules", I"Graham Nelson");
Works::add_to_database(a_work_for_standard_rules, HYPOTHETICAL_WDBC);
}
return Works::match(work, a_work_for_standard_rules);
}
inbuild_work *a_work_for_basic_inform = NULL;
int Works::is_basic_inform(inbuild_work *work) {
if (a_work_for_basic_inform == NULL) {
a_work_for_basic_inform =
Works::new(extension_genre, I"Basic Inform", I"Graham Nelson");
Works::add_to_database(a_work_for_basic_inform, HYPOTHETICAL_WDBC);
}
return Works::match(work, a_work_for_basic_inform);
}
@h The database of known works.
We will need to be able to give rapid answers to questions like "is there
an installed extension with this work?" and "does any entry in the dictionary
relate to this work?": there may be many extensions and very many dictionary
entries, so we keep an incidence count of each work and in what context it
has been used, and store that in a hash table. Note that each distinct work
is recorded only once in the table: this is important, as although an
individual extension can only be loaded or installed once, it could be
referred to in the dictionary dozens or even hundreds of times.
The table is unsorted and is intended for rapid searching. Typically there
will be only a handful of works in the list of those with a given hash code:
indeed, the typical number will be 0 or 1.
Works are entered into the database with one of the following contexts:
@d NO_WDB_CONTEXTS 5
@d LOADED_WDBC 0
@d INSTALLED_WDBC 1
@d DICTIONARY_REFERRED_WDBC 2
@d HYPOTHETICAL_WDBC 3
@d USEWITH_WDBC 4
=
typedef struct inbuild_work_database_entry {
struct inbuild_work *work;
struct inbuild_work_database_entry *hash_next; /* next one in hashed work database */
int incidence_count[NO_WDB_CONTEXTS];
struct text_stream *last_usage_date;
struct text_stream *sort_usage_date;
struct text_stream *word_count_text;
int word_count_number;
} inbuild_work_database_entry;
int work_database_created = FALSE;
inbuild_work_database_entry *hash_of_works[WORK_HASH_CODING_BASE];
void Works::add_to_database(inbuild_work *work, int context) {
if (work_database_created == FALSE) {
work_database_created = TRUE;
for (int i=0; i<WORK_HASH_CODING_BASE; i++) hash_of_works[i] = NULL;
}
int hc = work->inbuild_work_hash_code;
inbuild_work_database_entry *iwde;
for (iwde = hash_of_works[hc]; iwde; iwde = iwde->hash_next)
if (Works::match(work, iwde->work)) {
iwde->incidence_count[context]++;
return;
}
iwde = CREATE(inbuild_work_database_entry);
iwde->hash_next = hash_of_works[hc]; hash_of_works[hc] = iwde;
iwde->work = work;
for (int i=0; i<NO_WDB_CONTEXTS; i++) iwde->incidence_count[i] = 0;
iwde->incidence_count[context] = 1;
iwde->last_usage_date = Str::new();
iwde->sort_usage_date = Str::new();
iwde->word_count_text = Str::new();
}
@ This gives us reasonably rapid access to a shared date:
=
void Works::set_usage_date(inbuild_work *work, text_stream *date) {
inbuild_work_database_entry *iwde;
int hc = work->inbuild_work_hash_code;
for (iwde = hash_of_works[hc]; iwde; iwde = iwde->hash_next)
if (Works::match(work, iwde->work)) {
Str::copy(iwde->last_usage_date, date);
return;
}
}
void Works::set_sort_date(inbuild_work *work, text_stream *date) {
inbuild_work_database_entry *iwde;
int hc = work->inbuild_work_hash_code;
for (iwde = hash_of_works[hc]; iwde; iwde = iwde->hash_next)
if (Works::match(work, iwde->work)) {
Str::copy(iwde->sort_usage_date, date);
return;
}
}
text_stream *Works::get_usage_date(inbuild_work *work) {
inbuild_work_database_entry *iwde;
int hc = work->inbuild_work_hash_code;
for (iwde = hash_of_works[hc]; iwde; iwde = iwde->hash_next)
if (Works::match(work, iwde->work)) {
if (Str::len(iwde->last_usage_date) > 0)
return iwde->last_usage_date;
if (iwde->incidence_count[DICTIONARY_REFERRED_WDBC] > 0)
return I"Once upon a time";
return I"Never";
}
return I"---";
}
text_stream *Works::get_sort_date(inbuild_work *work) {
inbuild_work_database_entry *iwde;
int hc = work->inbuild_work_hash_code;
for (iwde = hash_of_works[hc]; iwde; iwde = iwde->hash_next)
if (Works::match(work, iwde->work)) {
if (Str::len(iwde->sort_usage_date) > 0)
return iwde->sort_usage_date;
if (iwde->incidence_count[DICTIONARY_REFERRED_WDBC] > 0)
return I"00000000000000Once upon a time";
return I"00000000000000Never";
}
return I"000000000000000";
}
void Works::set_word_count(inbuild_work *work, int wc) {
inbuild_work_database_entry *iwde;
int hc = work->inbuild_work_hash_code;
for (iwde = hash_of_works[hc]; iwde; iwde = iwde->hash_next)
if (Works::match(work, iwde->work)) {
WRITE_TO(iwde->word_count_text, "%08d words", wc);
iwde->word_count_number = wc;
return;
}
}
text_stream *Works::get_sort_word_count(inbuild_work *work) {
inbuild_work_database_entry *iwde;
int hc = work->inbuild_work_hash_code;
for (iwde = hash_of_works[hc]; iwde; iwde = iwde->hash_next)
if (Works::match(work, iwde->work)) {
if (Str::len(iwde->word_count_text) > 0)
return iwde->word_count_text;
if (iwde->incidence_count[DICTIONARY_REFERRED_WDBC] > 0)
return I"00000000I did read this, but forgot";
return I"00000000I've never read this";
}
return I"---";
}
int Works::forgot(inbuild_work *work) {
inbuild_work_database_entry *iwde;
int hc = work->inbuild_work_hash_code;
for (iwde = hash_of_works[hc]; iwde; iwde = iwde->hash_next)
if (Works::match(work, iwde->work)) {
if (Str::len(iwde->word_count_text) > 0)
return FALSE;
if (iwde->incidence_count[DICTIONARY_REFERRED_WDBC] > 0)
return TRUE;
return FALSE;
}
return FALSE;
}
int Works::never(inbuild_work *work) {
inbuild_work_database_entry *iwde;
int hc = work->inbuild_work_hash_code;
for (iwde = hash_of_works[hc]; iwde; iwde = iwde->hash_next)
if (Works::match(work, iwde->work)) {
if (Str::len(iwde->word_count_text) > 0)
return FALSE;
if (iwde->incidence_count[DICTIONARY_REFERRED_WDBC] > 0)
return FALSE;
return TRUE;
}
return FALSE;
}
int Works::get_word_count(inbuild_work *work) {
inbuild_work_database_entry *iwde;
int hc = work->inbuild_work_hash_code;
for (iwde = hash_of_works[hc]; iwde; iwde = iwde->hash_next)
if (Works::match(work, iwde->work))
return iwde->word_count_number;
return 0;
}
@ The purpose of the hash table is to enable us to reply quickly when asked
for one of the following usage counts:
=
int Works::no_times_used_in_context(inbuild_work *work, int context) {
inbuild_work_database_entry *iwde;
for (iwde = hash_of_works[work->inbuild_work_hash_code]; iwde; iwde = iwde->hash_next)
if (Works::match(work, iwde->work)) return iwde->incidence_count[context];
return 0;
}
@ The work hash table makes quite interesting reading, so:
=
void Works::log_work_hash_table(void) {
int hc, total = 0;
LOG("Work identifier hash table:\n");
for (hc=0; hc<WORK_HASH_CODING_BASE; hc++) {
inbuild_work_database_entry *iwde;
for (iwde = hash_of_works[hc]; iwde; iwde = iwde->hash_next) {
total++;
LOG("%03d %3d %3d %3d %3d %X\n",
hc, iwde->incidence_count[0], iwde->incidence_count[1],
iwde->incidence_count[2], iwde->incidence_count[3],
iwde->work);
}
}
LOG("%d entries in all\n", total);
}
@h How casing is normalised.
Every word is capitalised, where a word begins at the start of the text,
after a hyphen, or after a bracket. Thus "Every Word Counts", "Even
Double-Barrelled Ones (And Even Parenthetically)".
=
void Works::normalise_casing(text_stream *p) {
int boundary = TRUE;
LOOP_THROUGH_TEXT(pos, p) {
wchar_t c = Str::get(pos);
if (boundary) Str::put(pos, Characters::toupper(c));
else Str::put(pos, Characters::tolower(c));
boundary = FALSE;
if (c == ' ') boundary = TRUE;
if (c == '-') boundary = TRUE;
if (c == '(') boundary = TRUE;
}
}
@h Documentation links.
This is where HTML links to extension documentation are created; the URL for
each extension's page is generated from its |inbuild_work|.
=
void Works::begin_extension_link(OUTPUT_STREAM, inbuild_work *work, text_stream *rubric) {
TEMPORARY_TEXT(link);
WRITE_TO(link, "href='inform://Extensions/Extensions/");
Works::escape_apostrophes(link, work->author_name);
WRITE_TO(link, "/");
Works::escape_apostrophes(link, work->title);
WRITE_TO(link, ".html' ");
if (Str::len(rubric) > 0) WRITE_TO(link, "title=\"%S\" ", rubric);
else WRITE_TO(link, "title=\"%X\" ", work);
WRITE_TO(link, "style=\"text-decoration: none\"");
HTML_OPEN_WITH("a", "%S", link);
DISCARD_TEXT(link);
}
void Works::escape_apostrophes(OUTPUT_STREAM, text_stream *S) {
LOOP_THROUGH_TEXT(pos, S) {
wchar_t c = Str::get(pos);
if ((c == '\'') || (c == '\"') || (c == ' ') || (c == '&') ||
(c == '<') || (c == '>') || (c == '%'))
WRITE("%%%x", (int) c);
else
PUT(c);
}
}
void Works::end_extension_link(OUTPUT_STREAM, inbuild_work *work) {
HTML_CLOSE("a");
}

View file

@ -0,0 +1,37 @@
[Extensions::] Extensions.
An Inform 7 extension.
@h Kits.
= (early code)
inbuild_genre *extension_genre = NULL;
@ =
void Extensions::start(void) {
extension_genre = Model::genre(I"extension");
METHOD_ADD(extension_genre, GENRE_WRITE_WORK_MTID, Extensions::write_work);
METHOD_ADD(extension_genre, GENRE_LOCATION_IN_NEST_MTID, Extensions::location_in_nest);
METHOD_ADD(extension_genre, GENRE_COPY_TO_NEST_MTID, Extensions::copy_to_nest);
}
inbuild_copy *Extensions::claim(text_stream *arg, text_stream *ext, int directory_status) {
if (directory_status == TRUE) return NULL;
if (Str::eq_insensitive(ext, I"i7x")) {
// eventually load into a copy here
return NULL;
}
return NULL;
}
void Extensions::write_work(inbuild_genre *gen, OUTPUT_STREAM, inbuild_work *work) {
WRITE("%X", work);
}
void Extensions::location_in_nest(inbuild_genre *gen, inbuild_nest *N, inbuild_requirement *req, linked_list *search_results) {
;
}
void Extensions::copy_to_nest(inbuild_genre *gen, inbuild_copy *C, inbuild_nest *N, int syncing) {
internal_error("unimplemented");
}

View file

@ -13,13 +13,26 @@ void Kits::start(void) {
METHOD_ADD(kit_genre, GENRE_COPY_TO_NEST_MTID, Kits::copy_to_nest);
}
inbuild_copy *Kits::claim(text_stream *arg, text_stream *ext, int directory_status) {
if (directory_status == FALSE) return NULL;
int kitpos = Str::len(arg) - 3;
if ((kitpos >= 0) && (Str::get_at(arg, kitpos) == 'K') &&
(Str::get_at(arg, kitpos+1) == 'i') &&
(Str::get_at(arg, kitpos+2) == 't')) {
pathname *P = Pathnames::from_text(arg);
inform_kit *K = Kits::load_at(Pathnames::directory_name(P), P);
return K->as_copy;
}
return NULL;
}
void Kits::write_copy(inbuild_genre *gen, OUTPUT_STREAM, inbuild_work *work) {
WRITE("Kit %S", work->name);
WRITE("Kit %S", work->title);
}
void Kits::location_in_nest(inbuild_genre *gen, inbuild_nest *N, inbuild_requirement *req, linked_list *search_results) {
pathname *P = Pathnames::subfolder(N->location, I"Inter");
P = Pathnames::subfolder(P, req->work->name);
P = Pathnames::subfolder(P, req->work->title);
filename *canary = Filenames::in_folder(P, I"kit_metadata.txt");
if (TextFiles::exists(canary)) {
inform_kit *K = Kits::load_at(Pathnames::directory_name(P), P);
@ -32,13 +45,13 @@ void Kits::location_in_nest(inbuild_genre *gen, inbuild_nest *N, inbuild_require
void Kits::copy_to_nest(inbuild_genre *gen, inbuild_copy *C, inbuild_nest *N, int syncing) {
// Model::write_copy(STDOUT, C); PRINT(" --> %p %S\n", N->location, syncing?I"syncing":I"copying");
pathname *dest_kit = Pathnames::subfolder(N->location, I"Inter");
dest_kit = Pathnames::subfolder(dest_kit, C->edition->work->name);
dest_kit = Pathnames::subfolder(dest_kit, C->edition->work->title);
filename *dest_kit_metadata = Filenames::in_folder(dest_kit, I"kit_metadata.txt");
if (TextFiles::exists(dest_kit_metadata)) {
if (syncing == FALSE) {
Errors::with_text("already present in nest (use -sync-to not -copy-to to overwrite)",
C->edition->work->name);
C->edition->work->title);
return;
}
} else {
@ -110,7 +123,7 @@ inform_kit *Kits::load_at(text_stream *name, pathname *P) {
TextFiles::read(F, FALSE,
NULL, FALSE, Kits::read_metadata, NULL, (void *) K);
inbuild_work *work = Model::work(kit_genre, name, NULL);
inbuild_work *work = Works::new(kit_genre, name, NULL);
inbuild_edition *edition = Model::edition(work, K->version);
K->as_copy = Model::copy_in_directory(edition, P, STORE_POINTER_inform_kit(K));

View file

@ -8,11 +8,13 @@ Chapter 1: Setting Up
Inbuild Module
Chapter 2: Conceptual Framework
Version Numbers
Conceptual Model
Works
Version Numbers
Build Graphs
Build Steps
Nests
Chapter 3: The Genres
Kits
Extensions

View file

@ -58,6 +58,7 @@ int Main::core_inform_main(int argc, char *argv[]) {
InterModule::start();
BuildingModule::start();
CodegenModule::start();
InbuildModule::start();
int rv = CoreMain::main(argc, argv);
@ -74,6 +75,7 @@ int Main::core_inform_main(int argc, char *argv[]) {
InterModule::end();
BuildingModule::end();
CodegenModule::end();
InbuildModule::end();
Foundation::end(); /* must be ended last */
return rv;
}

View file

@ -53,7 +53,6 @@ We need to itemise the structures we'll want to allocate:
@e ph_stack_frame_box_MT
@e i6_inclusion_matter_MT
@e literal_list_MT
@e extension_identifier_database_entry_array_MT
@e control_structure_phrase_MT
@e adjective_meaning_MT
@e adjective_meaning_block_MT
@ -178,7 +177,6 @@ ALLOCATE_INDIVIDUALLY(label_namespace)
ALLOCATE_IN_ARRAYS(activity_crossref, 100)
ALLOCATE_IN_ARRAYS(activity_list, 1000)
ALLOCATE_IN_ARRAYS(application, 100)
ALLOCATE_IN_ARRAYS(extension_identifier_database_entry, 100)
ALLOCATE_IN_ARRAYS(i6_schema, 100)
ALLOCATE_IN_ARRAYS(instance_usage, 200)
ALLOCATE_IN_ARRAYS(invocation_options, 100)
@ -290,7 +288,6 @@ we need to use the equivalent of traditional |malloc| and |calloc| routines.
Writers::register_writer_I('B', &CoreModule::writer);
Writers::register_writer('I', &Instances::writer);
Writers::register_writer('L', &LocalVariables::writer);
Writers::register_writer('X', &Extensions::IDs::writer);
@

View file

@ -404,7 +404,7 @@ divided up by the extensions containing the rules which produce them.
contiguous_match = TRUE;
if (no_cms++ == 0) {
TEMPORARY_TEXT(QT);
WRITE_TO(QT, "%<X", Extensions::Files::get_eid(ef));
WRITE_TO(QT, "%<X", Extensions::Files::get_work(ef));
Emit::array_text_entry(QT);
DISCARD_TEXT(QT);
} else

View file

@ -79,11 +79,11 @@ void Problems::quote_invocation(int t, parse_node *p) {
void Problems::expand_invocation(OUTPUT_STREAM, void *p) {
Phrases::TypeData::Textual::inv_write_HTML_representation(OUT, (parse_node *) p);
}
void Problems::quote_extension_id(int t, extension_identifier *p) {
void Problems::quote_extension_id(int t, inbuild_work *p) {
Problems::problem_quote(t, (void *) p, Problems::expand_extension_id);
}
void Problems::expand_extension_id(OUTPUT_STREAM, void *p) {
Extensions::IDs::write_to_HTML_file(OUT, (extension_identifier *) p, FALSE);
Works::write_to_HTML_file(OUT, (inbuild_work *) p, FALSE);
}
void Problems::quote_property(int t, property *p) { Problems::quote_wording(t, p->name); }
void Problems::quote_table(int t, table *tab) {

View file

@ -1113,7 +1113,7 @@ void Rulebooks::index_page(OUTPUT_STREAM, int n) {
@<Index the segment for the rulebooks in this extension@> =
HTML_OPEN("p"); WRITE("<b>From the extension ");
Extensions::IDs::write_to_HTML_file(OUT, Extensions::Files::get_eid(ef), FALSE);
Works::write_to_HTML_file(OUT, Extensions::Files::get_work(ef), FALSE);
WRITE("</b>"); HTML_CLOSE("p");
@<Index rulebooks occurring in this part of the source text@>;

View file

@ -58,8 +58,8 @@ compilation_module *Modules::new(parse_node *from) {
DISCARD_TEXT(pname);
if (owner) {
Hierarchy::markup(C->inter_presence->the_package, EXT_AUTHOR_HMD, owner->ef_id.raw_author_name);
Hierarchy::markup(C->inter_presence->the_package, EXT_TITLE_HMD, owner->ef_id.raw_title);
Hierarchy::markup(C->inter_presence->the_package, EXT_AUTHOR_HMD, owner->ef_work->raw_author_name);
Hierarchy::markup(C->inter_presence->the_package, EXT_TITLE_HMD, owner->ef_work->raw_title);
TEMPORARY_TEXT(V);
WRITE_TO(V, "%+W", Wordings::one_word(owner->version_loaded));
Hierarchy::markup(C->inter_presence->the_package, EXT_VERSION_HMD, V);
@ -78,7 +78,7 @@ compiled from the compilation module will go into a package of that name.
if (owner == standard_rules_extension) WRITE_TO(pname, "standard_rules");
else if (owner == NULL) WRITE_TO(pname, "source_text");
else {
WRITE_TO(pname, "%X", Extensions::Files::get_eid(owner));
WRITE_TO(pname, "%X", Extensions::Files::get_work(owner));
LOOP_THROUGH_TEXT(pos, pname)
if (Str::get(pos) == ' ')
Str::put(pos, '_');

View file

@ -53,7 +53,7 @@ typedef struct heading {
int for_release; /* include this material in a release version? */
int omit_material; /* if set, simply ignore all of this */
int use_with_or_without; /* if TRUE, use with the extension; if FALSE, without */
struct extension_identifier for_use_with; /* e.g. "for use with ... by ..." */
struct inbuild_work *for_use_with; /* e.g. "for use with ... by ..." */
struct wording in_place_of_text; /* e.g. "in place of ... in ... by ..." */
struct wording heading_text; /* once provisos have been stripped away */
struct noun *list_of_contents; /* tagged names defined under this */
@ -189,8 +189,7 @@ and cannot contain information about releasing or about virtual machines.
=
int last_indentation_above_level[NO_HEADING_LEVELS], lial_made = FALSE;
extension_identifier *grammar_eid = NULL;
inbuild_work *work_identified = NULL;
heading *Sentences::Headings::declare(parse_node *PN) {
heading *h = CREATE(heading);
@ -201,6 +200,7 @@ heading *Sentences::Headings::declare(parse_node *PN) {
h->index_definitions_made_under_this = TRUE;
h->use_with_or_without = NOT_APPLICABLE;
h->in_place_of_text = EMPTY_WORDING;
h->for_use_with = NULL;
if ((PN == NULL) || (Wordings::empty(ParseTree::get_text(PN))))
internal_error("heading at textless node");
@ -245,7 +245,6 @@ heading *Sentences::Headings::declare(parse_node *PN) {
@d IN_PLACE_OF_HQ 7
@<Parse heading text for release or other stipulations@> =
grammar_eid = &(h->for_use_with);
current_sentence = PN;
wording W = ParseTree::get_text(PN);
@ -266,6 +265,7 @@ heading *Sentences::Headings::declare(parse_node *PN) {
W = GET_RW(<heading-qualifier>, 1);
}
h->heading_text = W;
h->for_use_with = work_identified;
@ When a heading has been found, we repeatedly try to match it against
<heading-qualifier> to see if it ends with text telling us what to do with
@ -334,7 +334,8 @@ allowed; they should probably be withdrawn.
wording AW = GET_RW(<extension-identifier>, 2);
WRITE_TO(exft, "%+W", TW);
WRITE_TO(exfa, "%+W", AW);
Extensions::IDs::new(grammar_eid, exfa, exft, USEWITH_EIDBC);
work_identified = Works::new(extension_genre, exft, exfa);
Works::add_to_database(work_identified, USEWITH_WDBC);
DISCARD_TEXT(exft);
DISCARD_TEXT(exfa);
@ -565,9 +566,9 @@ the parse tree on quite a large scale, and that's just what we do.
=
void Sentences::Headings::satisfy_individual_heading_dependency(heading *h) {
if (h->level < 1) return;
extension_identifier *eid = &(h->for_use_with);
inbuild_work *work = h->for_use_with;
int loaded = FALSE;
if (Extensions::IDs::no_times_used_in_context(eid, LOADED_EIDBC) != 0) loaded = TRUE;
if (Works::no_times_used_in_context(work, LOADED_WDBC) != 0) loaded = TRUE;
LOGIF(HEADINGS, "SIHD on $H: loaded %d: annotation %d: %W: %d\n", h, loaded,
ParseTree::int_annotation(h->sentence_declaring,
suppress_heading_dependencies_ANNOT),
@ -587,9 +588,9 @@ void Sentences::Headings::satisfy_individual_heading_dependency(heading *h) {
LOOP_OVER(h2, heading)
if ((Wordings::nonempty(h2->heading_text)) &&
(Wordings::match_perhaps_quoted(S, h2->heading_text)) &&
(Extensions::IDs::match(
Extensions::Files::get_eid(
Sentences::Headings::get_extension_containing(h2)), eid))) {
(Works::match(
Extensions::Files::get_work(
Sentences::Headings::get_extension_containing(h2)), work))) {
found = TRUE;
if (h->level != h2->level)
@<Can't replace heading unless level matches@>;
@ -607,7 +608,7 @@ void Sentences::Headings::satisfy_individual_heading_dependency(heading *h) {
@<Can't replace heading in an unincluded extension@> =
current_sentence = h->sentence_declaring;
Problems::quote_source(1, current_sentence);
Problems::quote_extension_id(2, &(h->for_use_with));
Problems::quote_extension_id(2, h->for_use_with);
Problems::Issue::handmade_problem(_p_(PM_HeadingInPlaceOfUnincluded));
Problems::issue_problem_segment(
"In the sentence %1, it looks as if you intend to replace a section "
@ -664,9 +665,9 @@ void Sentences::Headings::suppress_dependencies(parse_node *pn) {
@<Can't replace heading subordinate to another replaced heading@> =
current_sentence = h2->sentence_declaring;
Problems::quote_source(1, current_sentence);
Problems::quote_extension_id(2, &(h2->for_use_with));
Problems::quote_extension_id(2, h2->for_use_with);
Problems::quote_source(3, h->sentence_declaring);
Problems::quote_extension_id(4, &(h->for_use_with));
Problems::quote_extension_id(4, h->for_use_with);
Problems::Issue::handmade_problem(_p_(PM_HeadingInPlaceOfSubordinate));
Problems::issue_problem_segment(
"In the sentence %1, it looks as if you intend to replace a section "
@ -678,13 +679,13 @@ void Sentences::Headings::suppress_dependencies(parse_node *pn) {
@<Can't find heading in the given extension@> =
current_sentence = h->sentence_declaring;
Problems::quote_source(1, current_sentence);
Problems::quote_extension_id(2, &(h->for_use_with));
Problems::quote_extension_id(2, h->for_use_with);
Problems::quote_wording(3, h->in_place_of_text);
Problems::quote_text(4,
"unspecified, that is, the extension didn't have a version number");
extension_file *ef;
LOOP_OVER(ef, extension_file)
if (Extensions::IDs::match(&(h->for_use_with), Extensions::Files::get_eid(ef)))
if (Works::match(h->for_use_with, Extensions::Files::get_work(ef)))
Problems::quote_wording(4,
Wordings::one_word(Extensions::Files::get_version_wn(ef)));
Problems::Issue::handmade_problem(_p_(PM_HeadingInPlaceOfUnknown));

View file

@ -16,7 +16,7 @@ or ECD.
=
typedef struct extension_census_datum {
struct extension_identifier ecd_id; /* title, author, hash code */
struct inbuild_work *ecd_work; /* title, author, hash code */
struct text_stream *version_text; /* such as |23| or |14/060527| */
struct text_stream *VM_requirement; /* such as "(for Z-machine only)" */
int built_in; /* found in the Inform 7 application's private stock */
@ -233,8 +233,8 @@ stored have been waived.
@<Remove filename extension for extensions, if any@>;
Str::copy(raw_title, candidate_title);
Str::copy(raw_author_name, candidate_author_name);
Extensions::IDs::normalise_casing(candidate_author_name);
Extensions::IDs::normalise_casing(candidate_title);
Works::normalise_casing(candidate_author_name);
Works::normalise_casing(candidate_title);
if (Str::includes_character(candidate_title, '.')) {
LOG("Title is <%S>\n", candidate_title);
@ -286,7 +286,7 @@ line is terminated by any of |0A|, |0D|, |0A 0D| or |0D 0A|, or by the local
if ((c == '\x0a') || (c == '\x0d') || (c == '\n')) break;
if (titling_chars_read < MAX_TITLING_LINE_LENGTH - 1) PUT_TO(titling_line, c);
}
Extensions::IDs::normalise_casing(titling_line);
Works::normalise_casing(titling_line);
@ In the following, all possible newlines are converted to white space, and
all white space before a quoted rubric text is ignored. We need to do this
@ -421,8 +421,8 @@ which the user had installed to override this built-in extension.
@<See if we duplicate the title and author name of an extension already found in another domain@> =
extension_census_datum *other;
LOOP_OVER(other, extension_census_datum)
if ((Str::eq(candidate_author_name, other->ecd_id.author_name))
&& (Str::eq(candidate_title, other->ecd_id.title))
if ((Str::eq(candidate_author_name, other->ecd_work->author_name))
&& (Str::eq(candidate_title, other->ecd_work->title))
&& ((other->built_in) || (cs->origin == ORIGIN_WAS_BUILT_IN_EXTENSIONS_AREA))) {
other->overriding_a_built_in_extension = TRUE;
overridden_by_an_extension_already_found = TRUE;
@ -435,8 +435,9 @@ truncate it.
@<Create a new census datum for this extension, which has passed all tests@> =
ecd = CREATE(extension_census_datum);
Extensions::IDs::new(&(ecd->ecd_id), candidate_author_name, candidate_title, INSTALLED_EIDBC);
Extensions::IDs::set_raw(&(ecd->ecd_id), raw_author_name, raw_title);
ecd->ecd_work = Works::new(extension_genre, candidate_title, candidate_author_name);
Works::add_to_database(ecd->ecd_work, INSTALLED_WDBC);
Works::set_raw(ecd->ecd_work, raw_author_name, raw_title);
ecd->VM_requirement = Str::duplicate(requirement_text);
if (Str::len(version_text) > MAX_VERSION_NUMBER_LENGTH)
Str::truncate(version_text, MAX_VERSION_NUMBER_LENGTH); /* truncate to maximum legal length */
@ -791,8 +792,8 @@ the usual ones seen in Mac OS X applications such as iTunes.
@<Insert a subtitling row in the census sorting, if necessary@> =
if ((d == CE_BY_AUTHOR) &&
(Str::ne(current_author_name, ecd->ecd_id.author_name))) {
Str::copy(current_author_name, ecd->ecd_id.author_name);
(Str::ne(current_author_name, ecd->ecd_work->author_name))) {
Str::copy(current_author_name, ecd->ecd_work->author_name);
@<Begin a tinted census line@>;
@<Print the author's line in the extension census table@>;
@<End a tinted census line@>;
@ -831,13 +832,13 @@ the usual ones seen in Mac OS X applications such as iTunes.
@ Used only in "by author".
@<Print the author's line in the extension census table@> =
WRITE("%S", ecd->ecd_id.raw_author_name);
WRITE("%S", ecd->ecd_work->raw_author_name);
extension_census_datum *ecd2;
int cu = 0, cn = 0, j;
for (j = i; j < no_entries; j++) {
ecd2 = sorted_census_results[j];
if (Str::ne(current_author_name, ecd2->ecd_id.author_name)) break;
if (Str::ne(current_author_name, ecd2->ecd_work->author_name)) break;
if (Extensions::Census::ecd_used(ecd2)) cu++;
else cn++;
}
@ -900,23 +901,23 @@ where all is optional except the title part.
WRITE("&nbsp;");
HTML_TAG_WITH("img", "%s", bulletornot);
Extensions::IDs::begin_extension_link(OUT, &(ecd->ecd_id), ecd->rubric);
Works::begin_extension_link(OUT, ecd->ecd_work, ecd->rubric);
if (d != CE_BY_AUTHOR) {
HTML::begin_colour(OUT, I"404040");
WRITE("%S", ecd->ecd_id.raw_title);
if (Str::len(ecd->ecd_id.raw_title) + Str::len(ecd->ecd_id.raw_author_name) > 45) {
WRITE("%S", ecd->ecd_work->raw_title);
if (Str::len(ecd->ecd_work->raw_title) + Str::len(ecd->ecd_work->raw_author_name) > 45) {
HTML_TAG("br");
WRITE("&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;");
} else
WRITE(" ");
WRITE("by %S", ecd->ecd_id.raw_author_name);
WRITE("by %S", ecd->ecd_work->raw_author_name);
HTML::end_colour(OUT);
} else {
HTML::begin_colour(OUT, I"404040");
WRITE("%S", ecd->ecd_id.raw_title);
WRITE("%S", ecd->ecd_work->raw_title);
HTML::end_colour(OUT);
}
Extensions::IDs::end_extension_link(OUT, &(ecd->ecd_id));
Works::end_extension_link(OUT, ecd->ecd_work);
if (Str::len(ecd->VM_requirement)) {
@<Append icons which signify the VM requirements of the extension@>;
@ -960,19 +961,19 @@ the first and last word and just look at what is in between:
area = pathname_of_extensions[MATERIALS_FS_AREA];
}
if (ecd->built_in) HTML_TAG_WITH("img", "%s", opener)
else HTML::Javascript::open_file(OUT, area, ecd->ecd_id.raw_author_name, opener);
else HTML::Javascript::open_file(OUT, area, ecd->ecd_work->raw_author_name, opener);
@<Print column 4 of the census line@> =
HTML_OPEN_WITH("span", "class=\"smaller\"");
if ((d == CE_BY_DATE) || (d == CE_BY_INSTALL)) {
WRITE("%S", Extensions::IDs::get_usage_date(&(ecd->ecd_id)));
WRITE("%S", Works::get_usage_date(ecd->ecd_work));
} else if (d == CE_BY_LENGTH) {
if (Extensions::IDs::forgot(&(ecd->ecd_id)))
if (Works::forgot(ecd->ecd_work))
WRITE("I did read this, but forgot");
else if (Extensions::IDs::never(&(ecd->ecd_id)))
else if (Works::never(ecd->ecd_work))
WRITE("I've never read this");
else
WRITE("%d words", Extensions::IDs::get_word_count(&(ecd->ecd_id)));
WRITE("%d words", Works::get_word_count(ecd->ecd_work));
} else {
if (Str::len(ecd->rubric) > 0)
WRITE("%S", ecd->rubric);
@ -992,10 +993,8 @@ int Extensions::Census::installation_region(extension_census_datum *ecd) {
}
int Extensions::Census::ecd_used(extension_census_datum *ecd) {
if ((Extensions::IDs::no_times_used_in_context(
&(ecd->ecd_id), LOADED_EIDBC) > 0) ||
(Extensions::IDs::no_times_used_in_context(
&(ecd->ecd_id), DICTIONARY_REFERRED_EIDBC) > 0))
if ((Works::no_times_used_in_context(ecd->ecd_work, LOADED_WDBC) > 0) ||
(Works::no_times_used_in_context(ecd->ecd_work, DICTIONARY_REFERRED_WDBC) > 0))
return TRUE;
return FALSE;
}
@ -1006,13 +1005,13 @@ int Extensions::Census::ecd_used(extension_census_datum *ecd) {
int Extensions::Census::compare_ecd_by_title(const void *ecd1, const void *ecd2) {
extension_census_datum *e1 = *((extension_census_datum **) ecd1);
extension_census_datum *e2 = *((extension_census_datum **) ecd2);
return Extensions::IDs::compare_by_title(&(e1->ecd_id), &(e2->ecd_id));
return Works::compare_by_title(e1->ecd_work, e2->ecd_work);
}
int Extensions::Census::compare_ecd_by_author(const void *ecd1, const void *ecd2) {
extension_census_datum *e1 = *((extension_census_datum **) ecd1);
extension_census_datum *e2 = *((extension_census_datum **) ecd2);
return Extensions::IDs::compare(&(e1->ecd_id), &(e2->ecd_id));
return Works::compare(e1->ecd_work, e2->ecd_work);
}
int Extensions::Census::compare_ecd_by_installation(const void *ecd1, const void *ecd2) {
@ -1020,17 +1019,17 @@ int Extensions::Census::compare_ecd_by_installation(const void *ecd1, const void
extension_census_datum *e2 = *((extension_census_datum **) ecd2);
int d = Extensions::Census::installation_region(e1) - Extensions::Census::installation_region(e2);
if (d != 0) return d;
return Extensions::IDs::compare_by_title(&(e1->ecd_id), &(e2->ecd_id));
return Works::compare_by_title(e1->ecd_work, e2->ecd_work);
}
int Extensions::Census::compare_ecd_by_date(const void *ecd1, const void *ecd2) {
extension_census_datum *e1 = *((extension_census_datum **) ecd1);
extension_census_datum *e2 = *((extension_census_datum **) ecd2);
return Extensions::IDs::compare_by_date(&(e1->ecd_id), &(e2->ecd_id));
return Works::compare_by_date(e1->ecd_work, e2->ecd_work);
}
int Extensions::Census::compare_ecd_by_length(const void *ecd1, const void *ecd2) {
extension_census_datum *e1 = *((extension_census_datum **) ecd1);
extension_census_datum *e2 = *((extension_census_datum **) ecd2);
return Extensions::IDs::compare_by_length(&(e1->ecd_id), &(e2->ecd_id));
return Works::compare_by_length(e1->ecd_work, e2->ecd_work);
}

View file

@ -19,7 +19,7 @@ in the dictionary after each successful use of that extension.
=
typedef struct extension_dictionary_entry {
struct extension_identifier ede_id; /* author name and title, with hash code */
struct inbuild_work *ede_work; /* author name and title, with hash code */
struct text_stream *entry_text; /* text of the dictionary entry */
struct text_stream *sorting; /* text reprocessed for sorting purposes */
struct text_stream *type; /* grammatical category, such as "kind" */
@ -98,7 +98,7 @@ format but also recorded the erasure flag.
=
void Extensions::Dictionary::log_entry(extension_dictionary_entry *ede) {
LOG("ede: %4d %d |%S|%S|%S|%S|\n", ede->allocation_id,
ede->erased, ede->ede_id.author_name, ede->ede_id.title,
ede->erased, ede->ede_work->author_name, ede->ede_work->title,
ede->entry_text, ede->type);
}
@ -129,7 +129,7 @@ void Extensions::Dictionary::erase_entries_of_uninstalled_extensions(void) {
LOGIF(EXTENSIONS_CENSUS, "Erasure of dictionary entries for uninstalled extensions\n");
LOOP_OVER(ede, extension_dictionary_entry)
if ((ede->erased == FALSE) &&
(Extensions::IDs::no_times_used_in_context(&(ede->ede_id), INSTALLED_EIDBC) == 0)) {
(Works::no_times_used_in_context(ede->ede_work, INSTALLED_WDBC) == 0)) {
ede->erased = TRUE;
LOGIF(EXTENSIONS_CENSUS, "Erased $d", ede);
}
@ -148,7 +148,7 @@ void Extensions::Dictionary::erase_entries(extension_file *ef) {
LOGIF(EXTENSIONS_CENSUS, "Erasure of dictionary entries for $x\n", ef);
LOOP_OVER(ede, extension_dictionary_entry)
if ((ede->erased == FALSE) &&
(Extensions::IDs::match(&(ede->ede_id), &(ef->ef_id)))) {
(Works::match(ede->ede_work, ef->ef_work))) {
ede->erased = TRUE;
LOGIF(EXTENSIONS_CENSUS, "Erased $d", ede);
}
@ -162,19 +162,20 @@ void Extensions::Dictionary::new_entry(text_stream *category, extension_file *ef
if (Wordings::nonempty(W)) { /* a safety precaution: never index the empty text */
TEMPORARY_TEXT(headword);
WRITE_TO(headword, "%+W", W);
Extensions::Dictionary::new_dictionary_entry_raw(category, ef->ef_id.author_name, ef->ef_id.title, headword);
Extensions::Dictionary::new_dictionary_entry_raw(category, ef->ef_work->author_name, ef->ef_work->title, headword);
DISCARD_TEXT(headword);
}
}
void Extensions::Dictionary::new_entry_from_stream(text_stream *category, extension_file *ef, text_stream *headword) {
Extensions::Dictionary::new_dictionary_entry_raw(category, ef->ef_id.author_name, ef->ef_id.title, headword);
Extensions::Dictionary::new_dictionary_entry_raw(category, ef->ef_work->author_name, ef->ef_work->title, headword);
}
void Extensions::Dictionary::new_dictionary_entry_raw(text_stream *category,
text_stream *author, text_stream *title, text_stream *headword) {
extension_dictionary_entry *ede = CREATE(extension_dictionary_entry);
Extensions::IDs::new(&(ede->ede_id), author, title, DICTIONARY_REFERRED_EIDBC);
ede->ede_work = Works::new(extension_genre, title, author);
Works::add_to_database(ede->ede_work, DICTIONARY_REFERRED_WDBC);
ede->entry_text = Str::duplicate(headword);
ede->type = Str::duplicate(category);
ede->sorting = Str::new();
@ -198,9 +199,9 @@ void Extensions::Dictionary::new_dictionary_entry_raw(text_stream *category,
break;
}
}
if (Str::len(sdate) > 0) Extensions::IDs::set_sort_date(&(ede->ede_id), sdate);
if (wc > 0) Extensions::IDs::set_word_count(&(ede->ede_id), wc);
if (Str::len(udate) > 0) Extensions::IDs::set_usage_date(&(ede->ede_id), udate);
if (Str::len(sdate) > 0) Works::set_sort_date(ede->ede_work, sdate);
if (wc > 0) Works::set_word_count(ede->ede_work, wc);
if (Str::len(udate) > 0) Works::set_usage_date(ede->ede_work, udate);
DISCARD_TEXT(sdate);
DISCARD_TEXT(udate);
}
@ -332,7 +333,7 @@ for any future increase of the above maxima without fuss.)
@<Write line to the dictionary file from single entry@> =
WRITE_TO(DICTF, "|%S|%S|%S|%S|\n",
ede->ede_id.author_name, ede->ede_id.title,
ede->ede_work->author_name, ede->ede_work->title,
ede->entry_text, ede->type);
@h Sorting the extension dictionary.
@ -526,20 +527,20 @@ not seem to have arisen from homonyms like "lead" (the substance) versus
=
void Extensions::Dictionary::extension_clash(extension_dictionary_entry *ede1, extension_dictionary_entry *ede2) {
extension_dictionary_entry *left = NULL, *right = NULL;
extension_identifier *leftx, *rightx;
inbuild_work *leftx, *rightx;
known_extension_clash *kec;
if ((ede1 == NULL) || (ede2 == NULL)) internal_error("bad extension clash");
int d = Extensions::IDs::compare(&(ede1->ede_id), &(ede2->ede_id)); /* compare source extensions */
int d = Works::compare(ede1->ede_work, ede2->ede_work); /* compare source extensions */
@<Ignore apparent clashes which are in fact not troublesome@>;
if (d < 0) { left = ede1; right = ede2; }
if (d > 0) { left = ede2; right = ede1; }
leftx = &(left->ede_id); rightx = &(right->ede_id);
leftx = left->ede_work; rightx = right->ede_work;
LOOP_OVER(kec, known_extension_clash)
if ((kec->first_known) && (Extensions::IDs::match(leftx, &(kec->leftx->ede_id)))) {
if ((kec->first_known) && (Works::match(leftx, kec->leftx->ede_work))) {
@<Search list of KECs deriving from the same left extension as this clash@>;
return;
}
@ -568,7 +569,7 @@ extension anywhere in the list, we must add the new pair of definitions:
@<Search list of KECs deriving from the same left extension as this clash@> =
while (kec) {
if (Extensions::IDs::match(rightx, &(kec->rightx->ede_id))) {
if (Works::match(rightx, kec->rightx->ede_work)) {
kec->number_clashes++; return;
}
if (kec->next == NULL) {
@ -610,14 +611,14 @@ matter to the UTF-8 HTML file:
@<Write a paragraph about extensions clashing with the lefthand one here@> =
known_extension_clash *example;
HTML_OPEN("b");
Extensions::IDs::write_to_HTML_file(OUT, &(kec->leftx->ede_id), FALSE);
Works::write_to_HTML_file(OUT, kec->leftx->ede_work, FALSE);
HTML_CLOSE("b");
WRITE(": ");
for (example = kec; example; example = example->next) {
WRITE("clash with ");
HTML_OPEN("b");
Extensions::IDs::write_to_HTML_file(OUT, &(example->rightx->ede_id), FALSE);
Works::write_to_HTML_file(OUT, example->rightx->ede_work, FALSE);
HTML_CLOSE("b");
WRITE(" (on ");
if (example->number_clashes > 1)
@ -683,6 +684,6 @@ A vs B, A vs C, then B vs C. This has $O(N^2)$ running time, so if there are
if (tint) HTML::end_colour(OUT);
WRITE(" - <i>%S</i>&nbsp;&nbsp;&nbsp;", ede->type);
HTML_OPEN_WITH("span", "class=\"smaller\"");
Extensions::IDs::write_link_to_HTML_file(OUT, &(ede->ede_id));
Works::write_link_to_HTML_file(OUT, ede->ede_work);
HTML_CLOSE("span");
HTML_CLOSE("p");

View file

@ -58,20 +58,20 @@ is any, as well as the correct identifying headings and requirements.
=
int Extensions::Documentation::write_extension_documentation_page(extension_census_datum *ecd, extension_file *ef,
int eg_number) {
extension_identifier *eid = NULL;
inbuild_work *work = NULL;
text_stream DOCF_struct;
text_stream *DOCF = &DOCF_struct;
FILE *TEST_DOCF;
int page_exists_already, no_egs = 0;
if (ecd) eid = &(ecd->ecd_id); else if (ef) eid = &(ef->ef_id);
if (ecd) work = ecd->ecd_work; else if (ef) work = ef->ef_work;
else internal_error("WEDP incorrectly called");
LOGIF(EXTENSIONS_CENSUS, "WEDP %s (%X)/%d\n", (ecd)?"ecd":" ef", eid, eg_number);
LOGIF(EXTENSIONS_CENSUS, "WEDP %s (%X)/%d\n", (ecd)?"ecd":" ef", work, eg_number);
TEMPORARY_TEXT(leaf);
Str::copy(leaf, eid->title);
Str::copy(leaf, work->title);
if (eg_number > 0) WRITE_TO(leaf, "-eg%d", eg_number);
filename *name = Locations::of_extension_documentation(leaf, eid->author_name);
filename *name = Locations::of_extension_documentation(leaf, work->author_name);
page_exists_already = FALSE;
TEST_DOCF = Filenames::fopen(name, "r");
@ -87,7 +87,7 @@ int Extensions::Documentation::write_extension_documentation_page(extension_cens
if (ef == NULL) internal_error("null EF in extension documentation writer");
if (Pathnames::create_in_file_system(
Pathnames::subfolder(pathname_of_extension_docs_inner, eid->author_name)) == 0)
Pathnames::subfolder(pathname_of_extension_docs_inner, work->author_name)) == 0)
return 0;
if (STREAM_OPEN_TO_FILE(DOCF, name, UTF8_ENC) == FALSE)
@ -111,12 +111,12 @@ calls.
@<Convert ECD to a text-only EF@> =
feed_t id = Feeds::begin();
Feeds::feed_stream(eid->raw_author_name);
Feeds::feed_stream(work->raw_author_name);
Feeds::feed_text(L" ");
wording AW = Feeds::end(id);
id = Feeds::begin();
Feeds::feed_stream(eid->raw_title);
Feeds::feed_stream(work->raw_title);
Feeds::feed_text(L" ");
wording TW = Feeds::end(id);
@ -153,10 +153,10 @@ different template:
@<Write documentation for a specific extension into the page@> =
HTML_OPEN("p");
if (Extensions::IDs::is_standard_rules(eid) == FALSE)
if (Works::is_standard_rules(work) == FALSE)
@<Write Javascript paste icon for source text to include this extension@>;
WRITE("<b>");
Extensions::IDs::write_to_HTML_file(OUT, eid, TRUE);
Works::write_to_HTML_file(OUT, work, TRUE);
WRITE("</b>");
HTML_CLOSE("p");
HTML_OPEN("p");
@ -175,7 +175,7 @@ different template:
@<Write Javascript paste icon for source text to include this extension@> =
TEMPORARY_TEXT(inclusion_text);
WRITE_TO(inclusion_text, "Include %X.\n\n\n", eid);
WRITE_TO(inclusion_text, "Include %X.\n\n\n", work);
HTML::Javascript::paste_stream(OUT, inclusion_text);
DISCARD_TEXT(inclusion_text);
WRITE("&nbsp;");

View file

@ -142,7 +142,7 @@ its purpose.
=
typedef struct extension_file {
struct extension_identifier ef_id; /* Texts of title and author with hash code */
struct inbuild_work *ef_work; /* Texts of title and author with hash code */
struct wording author_text; /* Author's name */
struct wording title_text; /* Extension name */
struct wording body_text; /* Body of source text supplied in extension, if any */
@ -171,7 +171,7 @@ extension_file *Extensions::Files::new(wording AW, wording NW, wording VMW, int
extension_file *ef = CREATE(extension_file);
ef->author_text = AW;
ef->title_text = NW;
@<Create EID for new extension file@>;
@<Create work for new extension file@>;
ef->min_version_needed = version_word;
ef->inclusion_sentence = current_sentence;
ef->VM_restriction_text = VMW;
@ -196,7 +196,7 @@ title names, and then produce problem messages in the event of only longish
ones, unless the census is going on: in which case it's better to leave the
matter to the census errors system elsewhere.
@<Create EID for new extension file@> =
@<Create work for new extension file@> =
TEMPORARY_TEXT(exft);
TEMPORARY_TEXT(exfa);
WRITE_TO(exft, "%+W", ef->title_text);
@ -219,8 +219,9 @@ matter to the census errors system elsewhere.
Str::truncate(exft, MAX_EXTENSION_AUTHOR_LENGTH-1);
}
}
Extensions::IDs::new(&(ef->ef_id), exfa, exft, LOADED_EIDBC);
if (Extensions::IDs::is_standard_rules(&(ef->ef_id))) standard_rules_extension = ef;
ef->ef_work = Works::new(extension_genre, exft, exfa);
Works::add_to_database(ef->ef_work, LOADED_WDBC);
if (Works::is_standard_rules(ef->ef_work)) standard_rules_extension = ef;
DISCARD_TEXT(exft);
DISCARD_TEXT(exfa);
@ -262,8 +263,8 @@ source_file *Extensions::Files::get_corresponding_source_file(extension_file *ef
@ When headings cross-refer to extensions, they need to read extension IDs, so:
=
extension_identifier *Extensions::Files::get_eid(extension_file *ef) {
return &(ef->ef_id);
inbuild_work *Extensions::Files::get_work(extension_file *ef) {
return ef->ef_work;
}
@ A few problem messages need the version number loaded, so:
@ -459,10 +460,10 @@ and author. These are printed as I6 strings, hence the ISO encoding.
=
void Extensions::Files::credit_ef(OUTPUT_STREAM, extension_file *ef, int with_newline) {
WRITE("%S", ef->ef_id.raw_title);
WRITE("%S", ef->ef_work->raw_title);
if (ef->version_loaded >= 0)
WRITE(" version %+W", Wordings::one_word(ef->version_loaded));
WRITE(" by %S", ef->ef_id.raw_author_name);
WRITE(" by %S", ef->ef_work->raw_author_name);
if (Str::len(ef->extra_credit_as_lexed) > 0) WRITE(" (%S)", ef->extra_credit_as_lexed);
if (with_newline) WRITE("\n");
}
@ -513,9 +514,9 @@ void Extensions::Files::index_extensions_from(OUTPUT_STREAM, extension_file *fro
HTML_OPEN_WITH("li", "class=\"leaded indent2\"");
HTML_OPEN("span");
WRITE("%+W ", ef->title_text);
Extensions::IDs::begin_extension_link(OUT, &(ef->ef_id), NULL);
Works::begin_extension_link(OUT, ef->ef_work, NULL);
HTML_TAG_WITH("img", "border=0 src=inform:/doc_images/help.png");
Extensions::IDs::end_extension_link(OUT, &(ef->ef_id));
Works::end_extension_link(OUT, ef->ef_work);
if (ef != standard_rules_extension) { /* give author and inclusion links, but not for SR */
WRITE(" by %+W", ef->author_text);
}
@ -571,7 +572,7 @@ void Extensions::Files::update_census(void) {
LOOP_OVER(ef, extension_file) Extensions::Documentation::write_detailed(ef);
Extensions::Files::write_sketchy_documentation_for_extensions_found();
Extensions::Dictionary::write_back();
if (Log::aspect_switched_on(EXTENSIONS_CENSUS_DA)) Extensions::IDs::log_EID_hash_table();
if (Log::aspect_switched_on(EXTENSIONS_CENSUS_DA)) Works::log_work_hash_table();
}
@ Documenting extensions seen but not used: we run through the census

View file

@ -1,454 +0,0 @@
[Extensions::IDs::] Extension Identifiers.
To store, hash code and compare title/author pairs used to identify
extensions which, though installed, are not necessarily used in the present
source text.
@h Definitions.
@ Extensions are identified by the pair of title and author name, each of
which is an ISO Latin-1 string limited in length, with certain bad-news
characters excluded (such as |/| and |:|) so that they can be used
directly in filenames. However, we will not want to compare these by
string comparison: so we hash-code the combination for speed. The
following structure holds a combination of the textual names and the
hash code:
=
typedef struct extension_identifier {
struct text_stream *author_name;
struct text_stream *raw_author_name;
struct text_stream *title;
struct text_stream *raw_title;
int extension_id_hash_code; /* hash code derived from the above */
} extension_identifier;
@ Each EID is given a hash code -- an integer between 0 and the following
constant minus 1, derived from its title and author name.
@d EI_HASH_CODING_BASE 499
@ EIDs are created with one of the following contexts:
@d NO_EIDB_CONTEXTS 5
@d LOADED_EIDBC 0
@d INSTALLED_EIDBC 1
@d DICTIONARY_REFERRED_EIDBC 2
@d HYPOTHETICAL_EIDBC 3
@d USEWITH_EIDBC 4
=
typedef struct extension_identifier_database_entry {
struct extension_identifier *eide_id;
struct extension_identifier_database_entry *hash_next; /* next one in hashed EID database */
int incidence_count[NO_EIDB_CONTEXTS];
struct text_stream *last_usage_date;
struct text_stream *sort_usage_date;
struct text_stream *word_count_text;
int word_count_number;
} extension_identifier_database_entry;
@
@d EXTENSIONS_PRESENT
@ Each EID structure is written only once, and its title and author name are
not subsequently altered. We therefore hash-code on arrival. As when
hashing vocabulary, we apply the X 30011 algorithm, this time with 499
(coprime to 30011) as base, to the text of the Unix-style pathname
|Author/Title|.
It is important that no EID structure ever be modified or destroyed once
created, so it must not be stored inside a transient data structure like a
|specification|.
Though it is probably the case that the author name and title supplied are
already of normalised casing, we do not want to rely on that. EIDs of the
same extension but named with different casing conventions would fail to
match: and this could happen if a new build of Inform were published which
made a subtle change to the casing conventions, but which continued to use
an extension dictionary file first written by previous builds under the
previous conventions.
=
void Extensions::IDs::new(extension_identifier *eid, text_stream *an, text_stream *ti, int context) {
eid->raw_author_name = Str::duplicate(an);
eid->author_name = Str::duplicate(an);
eid->raw_title = Str::duplicate(ti);
eid->title = Str::duplicate(ti);
Extensions::IDs::normalise_casing(eid->author_name);
Extensions::IDs::normalise_casing(eid->title);
unsigned int hc = 0;
LOOP_THROUGH_TEXT(pos, eid->author_name)
hc = hc*30011 + (unsigned int) Str::get(pos);
hc = hc*30011 + (unsigned int) '/';
LOOP_THROUGH_TEXT(pos, eid->title)
hc = hc*30011 + (unsigned int) Str::get(pos);
hc = hc % EI_HASH_CODING_BASE;
eid->extension_id_hash_code = (int) hc;
Extensions::IDs::add_EID_to_database(eid, context);
}
void Extensions::IDs::set_raw(extension_identifier *eid, text_stream *raw_an, text_stream *raw_ti) {
eid->raw_author_name = Str::duplicate(raw_an);
eid->raw_title = Str::duplicate(raw_ti);
}
void Extensions::IDs::write_to_HTML_file(OUTPUT_STREAM, extension_identifier *eid, int fancy) {
WRITE("%S", eid->raw_title);
if (fancy) HTML::begin_colour(OUT, I"404040");
WRITE(" by ");
if (fancy) HTML::end_colour(OUT);
WRITE("%S", eid->raw_author_name);
}
void Extensions::IDs::write_link_to_HTML_file(OUTPUT_STREAM, extension_identifier *eid) {
HTML_OPEN_WITH("a", "href='Extensions/%S/%S.html' style=\"text-decoration: none\"",
eid->author_name, eid->title);
HTML::begin_colour(OUT, I"404040");
if (Extensions::IDs::is_standard_rules(eid)) WRITE("%S", eid->title);
else Extensions::IDs::write_to_HTML_file(OUT, eid, FALSE);
HTML::end_colour(OUT);
HTML_CLOSE("a");
}
void Extensions::IDs::writer(OUTPUT_STREAM, char *format_string, void *vE) {
extension_identifier *eid = (extension_identifier *) vE;
switch (format_string[0]) {
case '<':
if (eid == NULL) WRITE("source text");
else {
WRITE("%S", eid->raw_title);
if (Extensions::IDs::is_standard_rules(eid) == FALSE)
WRITE(" by %S", eid->raw_author_name);
}
break;
case 'X':
if (eid == NULL) WRITE("<no extension>");
else WRITE("%S by %S", eid->raw_title, eid->raw_author_name);
break;
default:
internal_error("bad %X extension");
}
}
@ Two EIDs with different hash codes definitely identify different extensions;
if the code is the same, we must use |strcmp| on the actual title and author
name. This is in effect case insensitive, since we normalised casing when
the EIDs were created.
(Note that this is not a lexicographic function suitable for sorting
EIDs into alphabetical order: it cannot be, since the hash code is not
order-preserving. To emphasise this we return true or false rather than a
|strcmp|-style delta value. For |Extensions::IDs::compare|, see below...)
=
int Extensions::IDs::match(extension_identifier *eid1, extension_identifier *eid2) {
if ((eid1 == NULL) || (eid2 == NULL)) internal_error("bad eid match");
if (eid1->extension_id_hash_code != eid2->extension_id_hash_code) return FALSE;
if (Str::eq(eid1->author_name, eid2->author_name) == FALSE) return FALSE;
if (Str::eq(eid1->title, eid2->title) == FALSE) return FALSE;
return TRUE;
}
@ These are quite a deal slower, but trichotomous.
=
int Extensions::IDs::compare(extension_identifier *eid1, extension_identifier *eid2) {
if ((eid1 == NULL) || (eid2 == NULL)) internal_error("bad eid match");
int d = Str::cmp(eid1->author_name, eid2->author_name);
if (d != 0) return d;
return Str::cmp(eid1->title, eid2->title);
}
int Extensions::IDs::compare_by_title(extension_identifier *eid1, extension_identifier *eid2) {
if ((eid1 == NULL) || (eid2 == NULL)) internal_error("bad eid match");
int d = Str::cmp(eid1->title, eid2->title);
if (d != 0) return d;
return Str::cmp(eid1->author_name, eid2->author_name);
}
int Extensions::IDs::compare_by_date(extension_identifier *eid1, extension_identifier *eid2) {
if ((eid1 == NULL) || (eid2 == NULL)) internal_error("bad eid match");
int d = Str::cmp(Extensions::IDs::get_sort_date(eid2), Extensions::IDs::get_sort_date(eid1));
if (d != 0) return d;
d = Str::cmp(eid1->title, eid2->title);
if (d != 0) return d;
return Str::cmp(eid1->author_name, eid2->author_name);
}
int Extensions::IDs::compare_by_length(extension_identifier *eid1, extension_identifier *eid2) {
if ((eid1 == NULL) || (eid2 == NULL)) internal_error("bad eid match");
int d = Str::cmp(Extensions::IDs::get_sort_word_count(eid2), Extensions::IDs::get_sort_word_count(eid1));
if (d != 0) return d;
d = Str::cmp(eid1->title, eid2->title);
if (d != 0) return d;
return Str::cmp(eid1->author_name, eid2->author_name);
}
@ Because the Standard Rules are treated slightly differently by the
documentation, and so forth, it's convenient to provide a single function
which asks if a given EID is talking about them.
=
int an_eid_for_standard_rules_created = FALSE;
extension_identifier an_eid_for_standard_rules;
int an_eid_for_basic_inform_created = FALSE;
extension_identifier an_eid_for_basic_inform;
int Extensions::IDs::is_standard_rules(extension_identifier *eid) {
if (an_eid_for_standard_rules_created == FALSE) {
an_eid_for_standard_rules_created = TRUE;
Extensions::IDs::new(&an_eid_for_standard_rules,
I"Graham Nelson", I"Standard Rules", HYPOTHETICAL_EIDBC);
}
return Extensions::IDs::match(eid, &an_eid_for_standard_rules);
}
int Extensions::IDs::is_basic_inform(extension_identifier *eid) {
if (an_eid_for_basic_inform_created == FALSE) {
an_eid_for_basic_inform_created = TRUE;
Extensions::IDs::new(&an_eid_for_basic_inform,
I"Graham Nelson", I"Basic Inform", HYPOTHETICAL_EIDBC);
}
return Extensions::IDs::match(eid, &an_eid_for_basic_inform);
}
@h The database of known EIDs.
We will need to be able to give rapid answers to questions like "is there
an installed extension with this EID?" and "does any entry in the dictionary
relate to this EID?": there may be many extensions and very many dictionary
entries, so we keep an incidence count of each EID and in what context it
has been used, and store that in a hash table. Note that each distinct EID
is recorded only once in the table: this is important, as although an
individual extension can only be loaded or installed once, it could be
referred to in the dictionary dozens or even hundreds of times.
The table is unsorted and is intended for rapid searching. Typically there
will be only a handful of EIDs in the list of those with a given hash code:
indeed, the typical number will be 0 or 1.
=
int EID_database_created = FALSE;
extension_identifier_database_entry *hash_of_EIDEs[EI_HASH_CODING_BASE];
void Extensions::IDs::add_EID_to_database(extension_identifier *eid, int context) {
if (EID_database_created == FALSE) {
EID_database_created = TRUE;
for (int i=0; i<EI_HASH_CODING_BASE; i++) hash_of_EIDEs[i] = NULL;
}
int hc = eid->extension_id_hash_code;
extension_identifier_database_entry *eide;
for (eide = hash_of_EIDEs[hc]; eide; eide = eide->hash_next)
if (Extensions::IDs::match(eid, eide->eide_id)) {
eide->incidence_count[context]++;
return;
}
eide = CREATE(extension_identifier_database_entry);
eide->hash_next = hash_of_EIDEs[hc]; hash_of_EIDEs[hc] = eide;
eide->eide_id = eid;
for (int i=0; i<NO_EIDB_CONTEXTS; i++) eide->incidence_count[i] = 0;
eide->incidence_count[context] = 1;
eide->last_usage_date = Str::new();
eide->sort_usage_date = Str::new();
eide->word_count_text = Str::new();
}
@ This gives us reasonably rapid access to a shared date:
=
void Extensions::IDs::set_usage_date(extension_identifier *eid, text_stream *date) {
extension_identifier_database_entry *eide;
int hc = eid->extension_id_hash_code;
for (eide = hash_of_EIDEs[hc]; eide; eide = eide->hash_next)
if (Extensions::IDs::match(eid, eide->eide_id)) {
Str::copy(eide->last_usage_date, date);
return;
}
}
void Extensions::IDs::set_sort_date(extension_identifier *eid, text_stream *date) {
extension_identifier_database_entry *eide;
int hc = eid->extension_id_hash_code;
for (eide = hash_of_EIDEs[hc]; eide; eide = eide->hash_next)
if (Extensions::IDs::match(eid, eide->eide_id)) {
Str::copy(eide->sort_usage_date, date);
return;
}
}
text_stream *Extensions::IDs::get_usage_date(extension_identifier *eid) {
extension_identifier_database_entry *eide;
int hc = eid->extension_id_hash_code;
for (eide = hash_of_EIDEs[hc]; eide; eide = eide->hash_next)
if (Extensions::IDs::match(eid, eide->eide_id)) {
if (Str::len(eide->last_usage_date) > 0)
return eide->last_usage_date;
if (eide->incidence_count[DICTIONARY_REFERRED_EIDBC] > 0)
return I"Once upon a time";
return I"Never";
}
return I"---";
}
text_stream *Extensions::IDs::get_sort_date(extension_identifier *eid) {
extension_identifier_database_entry *eide;
int hc = eid->extension_id_hash_code;
for (eide = hash_of_EIDEs[hc]; eide; eide = eide->hash_next)
if (Extensions::IDs::match(eid, eide->eide_id)) {
if (Str::len(eide->sort_usage_date) > 0)
return eide->sort_usage_date;
if (eide->incidence_count[DICTIONARY_REFERRED_EIDBC] > 0)
return I"00000000000000Once upon a time";
return I"00000000000000Never";
}
return I"000000000000000";
}
void Extensions::IDs::set_word_count(extension_identifier *eid, int wc) {
extension_identifier_database_entry *eide;
int hc = eid->extension_id_hash_code;
for (eide = hash_of_EIDEs[hc]; eide; eide = eide->hash_next)
if (Extensions::IDs::match(eid, eide->eide_id)) {
WRITE_TO(eide->word_count_text, "%08d words", wc);
eide->word_count_number = wc;
return;
}
}
text_stream *Extensions::IDs::get_sort_word_count(extension_identifier *eid) {
extension_identifier_database_entry *eide;
int hc = eid->extension_id_hash_code;
for (eide = hash_of_EIDEs[hc]; eide; eide = eide->hash_next)
if (Extensions::IDs::match(eid, eide->eide_id)) {
if (Str::len(eide->word_count_text) > 0)
return eide->word_count_text;
if (eide->incidence_count[DICTIONARY_REFERRED_EIDBC] > 0)
return I"00000000I did read this, but forgot";
return I"00000000I've never read this";
}
return I"---";
}
int Extensions::IDs::forgot(extension_identifier *eid) {
extension_identifier_database_entry *eide;
int hc = eid->extension_id_hash_code;
for (eide = hash_of_EIDEs[hc]; eide; eide = eide->hash_next)
if (Extensions::IDs::match(eid, eide->eide_id)) {
if (Str::len(eide->word_count_text) > 0)
return FALSE;
if (eide->incidence_count[DICTIONARY_REFERRED_EIDBC] > 0)
return TRUE;
return FALSE;
}
return FALSE;
}
int Extensions::IDs::never(extension_identifier *eid) {
extension_identifier_database_entry *eide;
int hc = eid->extension_id_hash_code;
for (eide = hash_of_EIDEs[hc]; eide; eide = eide->hash_next)
if (Extensions::IDs::match(eid, eide->eide_id)) {
if (Str::len(eide->word_count_text) > 0)
return FALSE;
if (eide->incidence_count[DICTIONARY_REFERRED_EIDBC] > 0)
return FALSE;
return TRUE;
}
return FALSE;
}
int Extensions::IDs::get_word_count(extension_identifier *eid) {
extension_identifier_database_entry *eide;
int hc = eid->extension_id_hash_code;
for (eide = hash_of_EIDEs[hc]; eide; eide = eide->hash_next)
if (Extensions::IDs::match(eid, eide->eide_id))
return eide->word_count_number;
return 0;
}
@ The purpose of the hash table is to enable us to reply quickly when asked
for one of the following usage counts:
=
int Extensions::IDs::no_times_used_in_context(extension_identifier *eid, int context) {
extension_identifier_database_entry *eide;
for (eide = hash_of_EIDEs[eid->extension_id_hash_code]; eide; eide = eide->hash_next)
if (Extensions::IDs::match(eid, eide->eide_id)) return eide->incidence_count[context];
return 0;
}
@ The EID hash table makes quite interesting reading, so:
=
void Extensions::IDs::log_EID_hash_table(void) {
int hc, total = 0;
LOG("Extension identifier hash table:\n");
for (hc=0; hc<EI_HASH_CODING_BASE; hc++) {
extension_identifier_database_entry *eide;
for (eide = hash_of_EIDEs[hc]; eide; eide = eide->hash_next) {
total++;
LOG("%03d %3d %3d %3d %3d %X\n",
hc, eide->incidence_count[0], eide->incidence_count[1],
eide->incidence_count[2], eide->incidence_count[3],
eide->eide_id);
}
}
LOG("%d entries in all\n", total);
}
@h How casing is normalised.
Every word is capitalised, where a word begins at the start of the text,
after a hyphen, or after a bracket. Thus "Every Word Counts", "Even
Double-Barrelled Ones (And Even Parenthetically)".
=
void Extensions::IDs::normalise_casing(text_stream *p) {
int boundary = TRUE;
LOOP_THROUGH_TEXT(pos, p) {
wchar_t c = Str::get(pos);
if (boundary) Str::put(pos, Characters::toupper(c));
else Str::put(pos, Characters::tolower(c));
boundary = FALSE;
if (c == ' ') boundary = TRUE;
if (c == '-') boundary = TRUE;
if (c == '(') boundary = TRUE;
}
}
@h Documentation links.
This is where HTML links to extension documentation are created; the URL for
each extension's page is generated from its ID.
=
void Extensions::IDs::begin_extension_link(OUTPUT_STREAM, extension_identifier *eid, text_stream *rubric) {
TEMPORARY_TEXT(link);
WRITE_TO(link, "href='inform://Extensions/Extensions/");
Extensions::IDs::escape_apostrophes(link, eid->author_name);
WRITE_TO(link, "/");
Extensions::IDs::escape_apostrophes(link, eid->title);
WRITE_TO(link, ".html' ");
if (Str::len(rubric) > 0) WRITE_TO(link, "title=\"%S\" ", rubric);
else WRITE_TO(link, "title=\"%X\" ", eid);
WRITE_TO(link, "style=\"text-decoration: none\"");
HTML_OPEN_WITH("a", "%S", link);
DISCARD_TEXT(link);
}
void Extensions::IDs::escape_apostrophes(OUTPUT_STREAM, text_stream *S) {
LOOP_THROUGH_TEXT(pos, S) {
wchar_t c = Str::get(pos);
if ((c == '\'') || (c == '\"') || (c == ' ') || (c == '&') ||
(c == '<') || (c == '>') || (c == '%'))
WRITE("%%%x", (int) c);
else
PUT(c);
}
}
void Extensions::IDs::end_extension_link(OUTPUT_STREAM, extension_identifier *eid) {
HTML_CLOSE("a");
}

View file

@ -69,7 +69,6 @@ which, we also handle extension installation, uninstallation and
documentation here."
Extension Files
Including Extensions
Extension Identifiers
Extension Census
Extension Dictionary
Extension Documentation

View file

@ -780,9 +780,9 @@ void Index::link_to_location(OUTPUT_STREAM, source_location sl, int nonbreaking_
if (ef) {
if (ef != standard_rules_extension) {
if (nonbreaking_space) WRITE("&nbsp;"); else WRITE(" ");
Extensions::IDs::begin_extension_link(OUT, Extensions::Files::get_eid(ef), NULL);
Works::begin_extension_link(OUT, Extensions::Files::get_work(ef), NULL);
HTML_TAG_WITH("img", "border=0 src=inform:/doc_images/Revealext.png");
Extensions::IDs::end_extension_link(OUT, Extensions::Files::get_eid(ef));
Works::end_extension_link(OUT, Extensions::Files::get_work(ef));
}
return;
}

View file

@ -83,14 +83,14 @@ void Problems::Buffer::copy_source_reference_into_problem_buffer(wording W) {
WRITE_TO(PBUFF, "'");
Problems::Buffer::copy_text_into_problem_buffer(W);
text_stream *paraphrase = file;
#ifdef EXTENSIONS_PRESENT
#ifdef INBUILD_MODULE
paraphrase = I"source text";
extension_file *ef = SourceFiles::get_extension_corresponding(referred);
if (ef) {
extension_identifier *eid = Extensions::Files::get_eid(ef);
if ((eid) && (Extensions::IDs::is_standard_rules(eid)))
inbuild_work *work = Extensions::Files::get_work(ef);
if ((work) && (Works::is_standard_rules(work)))
paraphrase = I"the Standard Rules";
else if ((eid) && (Extensions::IDs::is_basic_inform(eid)))
else if ((work) && (Works::is_basic_inform(work)))
paraphrase = I"Basic Inform";
else
paraphrase = file;

View file

@ -118,7 +118,7 @@ which they differ.
if (pos) {
#ifdef CORE_MODULE
extension_file *ef = SourceFiles::get_extension_corresponding(pos);
if (ef) WRITE_TO(PBUFF, "</b> in the extension <b>%X", Extensions::Files::get_eid(ef));
if (ef) WRITE_TO(PBUFF, "</b> in the extension <b>%X", Extensions::Files::get_work(ef));
#endif
}
WRITE_TO(PBUFF, "</b>:");