From 0477a44d073b4790a6d86dbb6bee73c31f538b90 Mon Sep 17 00:00:00 2001 From: hrszpuk <107559570+hrszpuk@users.noreply.github.com> Date: Tue, 11 Jun 2024 06:02:27 +0100 Subject: [PATCH 01/17] Updated lexer to use Options.Symbols instead of literals --- lexer.odin | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/lexer.odin b/lexer.odin index 7c044c1..c3a15b5 100644 --- a/lexer.odin +++ b/lexer.odin @@ -26,10 +26,16 @@ lex :: proc(l: ^Lexer) -> [dynamic]Token { for l.pos < len(l.input) { switch c = next(l); c { case '\n': append(&l.tokens, Token{.EOL, "\n", l.line, l.col}) - case ';', '#': append(&l.tokens, Token{.COMMENT, lexId(l).value, l.line, l.col}) - case ']': append(&l.tokens, Token{.RSB, "]", l.line, l.col}) - case '[': append(&l.tokens, Token{.LSB, "[", l.line, l.col}) - case '=', ':': append(&l.tokens, Token{.DELIMITER, "=", l.line, l.col}) + case Options.Symbols.Comment: append(&l.tokens, lexComment(l)) + case Options.Symbols.SectionRight: + s, i := utf8.encode_rune(Options.Symbols.SectionRight) + append(&l.tokens, Token{.SECTION_RIGHT, string(s[:i]), l.line, l.col}) + case Options.Symbols.SectionLeft: + s, i := utf8.encode_rune(Options.Symbols.SectionLeft) + append(&l.tokens, Token{.SECTION_LEFT, string(s[:i]), l.line, l.col}) + case Options.Symbols.Delimiter: + s, i := utf8.encode_rune(Options.Symbols.Delimiter) + append(&l.tokens, Token{.DELIMITER, string(s[:i]), l.line, l.col}) case ' ', '\t', '\r': break case: append(&l.tokens, lexId(l)) } @@ -45,13 +51,13 @@ lexId :: proc(l: ^Lexer) -> Token { for l.pos < len(l.input) { c := next(l) - if c == 0 || c == '\n' || c == '[' || c == ']' || c == '=' { + if c == 0 || c == '\n' || c == Options.Symbols.SectionRight || c == Options.Symbols.SectionLeft || c == Options.Symbols.SectionRight { back(l) break } } - return Token{.ID, utf8.runes_to_string(l.input[start:l.pos]), l.line, l.col} + return Token{.IDENTIFIER, utf8.runes_to_string(l.input[start:l.pos]), l.line, l.col} } lexComment :: proc(l: ^Lexer) -> Token { @@ -65,7 +71,7 @@ lexComment :: proc(l: ^Lexer) -> Token { } } - return Token{.ID, utf8.runes_to_string(l.input[start:l.pos]), l.line, l.col} + return Token{.COMMENT, utf8.runes_to_string(l.input[start:l.pos]), l.line, l.col} } back :: proc(l: ^Lexer) { From a3f26261fc4a6d98d63c3bc1ef69624e27a1b495 Mon Sep 17 00:00:00 2001 From: hrszpuk <107559570+hrszpuk@users.noreply.github.com> Date: Tue, 11 Jun 2024 06:03:30 +0100 Subject: [PATCH 02/17] Updated parser to use Options.Symbols instead of literals --- parser.odin | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/parser.odin b/parser.odin index 3889fd5..09da68d 100644 --- a/parser.odin +++ b/parser.odin @@ -25,8 +25,8 @@ parse :: proc(p: ^Parser) { for p.pos < len(p.tokens) { t = p.tokens[p.pos] #partial switch t.type { - case .LSB: parse_section(p) - case .ID: parse_key(p) + case .SECTION_LEFT: parse_section(p) + case .IDENTIFIER: parse_key(p) case .COMMENT: p.pos += 1 // TODO Add comments to config? case .EOL: p.pos += 1 case .EOF: return @@ -59,7 +59,7 @@ parse_section :: proc(p: ^Parser) { p.pos += 1 section_name := strings.clone(p.tokens[p.pos].value) p.pos += 1 - if p.tokens[p.pos].type != .RSB { + if p.tokens[p.pos].type != .SECTION_RIGHT { fmt.println("Expected closing bracket") return } From 0c4f5331bc44840a1fdd515813a2199fde3600af Mon Sep 17 00:00:00 2001 From: hrszpuk <107559570+hrszpuk@users.noreply.github.com> Date: Tue, 11 Jun 2024 06:04:26 +0100 Subject: [PATCH 03/17] All IgnoreDuplicate* is now AllowDuplicate* in Options.Rules --- options.odin | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/options.odin b/options.odin index a6778ab..f3508cf 100644 --- a/options.odin +++ b/options.odin @@ -2,7 +2,7 @@ package ini import "core:bufio" -// Ini is not standardised. Therefore I've given the programmer the ability to interchange symbols and rules. +// Ini is not standardised. Therefore I've let the user interchange symbols and rules that will apply during pasing/serialisation. IniOptions :: struct { // Symbols @@ -31,10 +31,10 @@ IniOptions :: struct { AllowNestedSections: bool, // Whether nested sections are allowed or not (default: true) AllowSpacesInKeys: bool, // Whether spaces are allowed to be a part of keys or not (default: false) AllowSpacesInValues: bool, // Whether spaces are allowed to be a part of values or not (default: true) + AllowDuplicateKeys: bool, // Whether duplicate keys are ignored or not (default: false) + AllowDuplicateSections: bool, // Whether duplicate sections are ignored or not (default: false) IgnoreCaseSensitivity: bool, // Whether case sensitivity is ignored or not (default: false) - IgnoreDuplicateKeys: bool, // Whether duplicate keys are ignored or not (default: false) - IgnoreDuplicateSections: bool, // Whether duplicate sections are ignored or not (default: false) IgnoreDelimiterPadding: bool, // Whether delimiter padding is ignored or not (default: true) IgnoreSectionNamePadding: bool, // Whether section name padding is ignored or not (default: true) IgnoreValueQuotes: bool, // Whether the quotes around a value are counted as part of the value or not (default: true) From 70063b3e78639c0570ff9ea67e12217516c7871f Mon Sep 17 00:00:00 2001 From: hrszpuk <107559570+hrszpuk@users.noreply.github.com> Date: Tue, 11 Jun 2024 06:04:38 +0100 Subject: [PATCH 04/17] Added serialisation-specific rules --- options.odin | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/options.odin b/options.odin index f3508cf..c6c3bc9 100644 --- a/options.odin +++ b/options.odin @@ -26,6 +26,7 @@ IniOptions :: struct { Rules: struct { // If you are reading these rules and think there are more that could be added open an issue (https://github.com/hrszpuk/odin-ini) + // Parsing AllowEmptyValues: bool, // Whether empty values are allowed or not (default: true) AllowEmptySections: bool, // Whether empty sections are allowed or not (default: true) AllowNestedSections: bool, // Whether nested sections are allowed or not (default: true) @@ -38,6 +39,20 @@ IniOptions :: struct { IgnoreDelimiterPadding: bool, // Whether delimiter padding is ignored or not (default: true) IgnoreSectionNamePadding: bool, // Whether section name padding is ignored or not (default: true) IgnoreValueQuotes: bool, // Whether the quotes around a value are counted as part of the value or not (default: true) + + IgnoreComments: bool, // Whether to ignore comments when parsing (default: false). + + // Generation + PaddingSymbol: rune, // The symbol used for padding (default: ' ') + DelimiterPaddingAmount: uint, // The amount of padding around the delimiter (default: 1) + SectionNamePaddingAmount: uint, // The amount of padding around the delimiter (default: 1) + BeforeSectionLinePaddingAmount: uint, // The amount of lines placed before a section header (default: 1) + StatementCommentPaddingAmount: uint, // The amount of padding before a comment starts after a statement (default: 1) + EmptyLineCommentPaddingAmount: uint, // The amount of padding before a comment starts on an empty line (default: 0) + + GenerateComments: bool, // Whether to generate comments or not (default: true) + GenerateCommentsJson: bool, // Whether to generate comments in json or not (default: false) + GenerateCommentsNameJson: string, // The name of the comment field if generating in json (default: "__comment__") }, Debug: bool, // Debugging mode will print debug information (default: false) @@ -59,6 +74,19 @@ Options : IniOptions = { false, true, true, + + false, + + ' ', + 1, + 0, + 1, + 1, + 0, + + true, + false, + "__comment__", }, false, // Debug } From 5396bfe5657b50ec1ada0a2a13ec22a0ad633e45 Mon Sep 17 00:00:00 2001 From: hrszpuk <107559570+hrszpuk@users.noreply.github.com> Date: Tue, 11 Jun 2024 06:04:55 +0100 Subject: [PATCH 05/17] Read now uses new token types --- read.odin | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/read.odin b/read.odin index 2a30043..7781cf1 100644 --- a/read.odin +++ b/read.odin @@ -18,7 +18,7 @@ read_from_string :: proc(content: string, name := "config") -> ^Config { parse(p) for token in tokens { - if token.type == .ID { + if token.type == .IDENTIFIER { delete(token.value) } } From 6ba83f39e1736819ed1e6f7797a23ccaf8abf87a Mon Sep 17 00:00:00 2001 From: hrszpuk <107559570+hrszpuk@users.noreply.github.com> Date: Tue, 11 Jun 2024 06:05:03 +0100 Subject: [PATCH 06/17] Updated token types --- token.odin | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/token.odin b/token.odin index 927bac4..e5f735a 100644 --- a/token.odin +++ b/token.odin @@ -3,14 +3,13 @@ package ini import "core:strings" TokenType :: enum { - ID, // Literally anything - QM, // " - LSB, // [ - RSB, // ] - COMMENT, // # or ; - DELIMITER, // = or : - EOL, // \n - EOF // End of file + IDENTIFIER, + SECTION_LEFT, + SECTION_RIGHT, + COMMENT, + DELIMITER, + EOL, + EOF } Token :: struct { From 8e4fd6b6dff1aba9787d270bb8adcd0e020fe52a Mon Sep 17 00:00:00 2001 From: hrszpuk <107559570+hrszpuk@users.noreply.github.com> Date: Tue, 11 Jun 2024 06:05:24 +0100 Subject: [PATCH 07/17] write_to_string now uses Options.Symbols instead of literals --- write.odin | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/write.odin b/write.odin index 994cbed..3ae443a 100644 --- a/write.odin +++ b/write.odin @@ -16,24 +16,26 @@ write_to_string :: proc(c: ^Config, prefix := false) -> string { for key, value in c.keys { if value.keys != nil { - strings.write_string(§ions, "\n[") + strings.write_string(§ions, "\n") + strings.write_rune(§ions, Options.Symbols.SectionLeft) if prefix { - section_head := fmt.aprintf("%s.%s", c.value, value.value) + section_head := fmt.aprintf("%s%v%s", c.value, Options.Symbols.NestedSection, value.value) strings.write_string(§ions, section_head) - new_value := fmt.aprintf("%s.%s", c.value, value.value) + new_value := fmt.aprintf("%s%v%s", c.value, Options.Symbols.NestedSection, value.value) delete(value.value) value.value = new_value delete(section_head) } else { strings.write_string(§ions, value.value) } - strings.write_string(§ions, "]\n") + strings.write_rune(§ions, Options.Symbols.SectionRight) + strings.write_string(§ions, "\n") section := write_to_string(value, true) strings.write_string(§ions, section) delete(section) } else { strings.write_string(&keys, key) - strings.write_string(&keys, "=") + strings.write_rune(&keys, Options.Symbols.Delimiter) strings.write_string(&keys, value.value) strings.write_string(&keys, "\n") } From ccdb1a191075c965d9c2bbf6beaee01c36006bc4 Mon Sep 17 00:00:00 2001 From: hrszpuk <107559570+hrszpuk@users.noreply.github.com> Date: Tue, 11 Jun 2024 06:05:37 +0100 Subject: [PATCH 08/17] Updated basic.odin example --- examples/basic/basic.odin | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/examples/basic/basic.odin b/examples/basic/basic.odin index fb13c6e..30d6013 100644 --- a/examples/basic/basic.odin +++ b/examples/basic/basic.odin @@ -4,17 +4,25 @@ import ini "../.." import "core:fmt" main :: proc() { - config := ini.read_from_file("../../tests/sections.ini") + + ini.Options.Symbols.NestedSection = '/' + config := ini.new_config("config") defer ini.destroy_config(config) ini.set(config, "wife", "Monica") - children := ini.add_section(config, "children") - ini.set(children, "daughter", "Jessica") - ini.set(children, "son", "Billy") + section := ini.add_section(config, "children") + ini.set(section, "kevin", "kevin") + + section_nested := ini.add_section(section, "grandchildren") + ini.set(section_nested, "Anna", "Anna") + + section_nested2 := ini.add_section(section, "grandchildren2") + ini.set(section_nested2, "Anna2", "Anna2") + + section_nested3 := ini.add_section(section_nested2, "grandchildren3") + ini.set(section_nested3, "Anna3", "Anna2") + ini.write_to_file(config, "test.ini") - s := ini.write_to_string(config) - defer delete(s) - fmt.println(s) } \ No newline at end of file From c811dbc46cdd7689f457164685b08acc2456dae7 Mon Sep 17 00:00:00 2001 From: hrszpuk <107559570+hrszpuk@users.noreply.github.com> Date: Tue, 11 Jun 2024 06:39:40 +0100 Subject: [PATCH 09/17] Added comments to lexer + spacing --- lexer.odin | 52 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 30 insertions(+), 22 deletions(-) diff --git a/lexer.odin b/lexer.odin index c3a15b5..57204f2 100644 --- a/lexer.odin +++ b/lexer.odin @@ -3,6 +3,8 @@ package ini import "core:fmt" import "core:unicode/utf8" +// Lexer will take the INI text and convert it to a list of tokens. +// "key = value" = [Token{"key", .IDENTIFIER, ...}, Token{"=", .DELIMITER, ...}, Token{"value", .IDENTIFIER, ...}] Lexer :: struct { tokens: [dynamic]Token, input: []rune, @@ -11,6 +13,7 @@ Lexer :: struct { col: int, } +// Creates a new lexer with the given input and returns a pointer to it. new_lexer :: proc(input: string) -> ^Lexer { l := new(Lexer) l.tokens = make([dynamic]Token, 0, len(input)/2) @@ -21,23 +24,39 @@ new_lexer :: proc(input: string) -> ^Lexer { return l } +// Produces the dynamic array of tokens from the input stored in the lexer struct lex :: proc(l: ^Lexer) -> [dynamic]Token { - c: rune + c: rune // Current character + for l.pos < len(l.input) { + + // Most symbols can be directly added, identifiers and comments are lexed in their own functions switch c = next(l); c { - case '\n': append(&l.tokens, Token{.EOL, "\n", l.line, l.col}) - case Options.Symbols.Comment: append(&l.tokens, lexComment(l)) - case Options.Symbols.SectionRight: + + case Options.Symbols.Comment: // Defaults to ; + append(&l.tokens, lexId(l)) + + case Options.Symbols.SectionRight: // Defaults to ] s, i := utf8.encode_rune(Options.Symbols.SectionRight) append(&l.tokens, Token{.SECTION_RIGHT, string(s[:i]), l.line, l.col}) - case Options.Symbols.SectionLeft: + + case Options.Symbols.SectionLeft: // Defaults to [ s, i := utf8.encode_rune(Options.Symbols.SectionLeft) append(&l.tokens, Token{.SECTION_LEFT, string(s[:i]), l.line, l.col}) - case Options.Symbols.Delimiter: + + case Options.Symbols.Delimiter: // Defaults to = s, i := utf8.encode_rune(Options.Symbols.Delimiter) append(&l.tokens, Token{.DELIMITER, string(s[:i]), l.line, l.col}) - case ' ', '\t', '\r': break - case: append(&l.tokens, lexId(l)) + + case '\n': + append(&l.tokens, Token{.EOL, "\n", l.line, l.col}) + + // Ignore whitespace + case ' ', '\t', '\r': + break + + case: + append(&l.tokens, lexId(l)) } } @@ -46,32 +65,21 @@ lex :: proc(l: ^Lexer) -> [dynamic]Token { return l.tokens } +// Tokenises identifiers (anything that is not whitespace or in Options.Symbols) +// This also lexs comments btw lexId :: proc(l: ^Lexer) -> Token { start := l.pos - 1 for l.pos < len(l.input) { c := next(l) - if c == 0 || c == '\n' || c == Options.Symbols.SectionRight || c == Options.Symbols.SectionLeft || c == Options.Symbols.SectionRight { - back(l) - break - } - } - - return Token{.IDENTIFIER, utf8.runes_to_string(l.input[start:l.pos]), l.line, l.col} -} -lexComment :: proc(l: ^Lexer) -> Token { - start := l.pos - 1 - - for l.pos < len(l.input) { - c := next(l) if c == 0 || c == '\n' { back(l) break } } - return Token{.COMMENT, utf8.runes_to_string(l.input[start:l.pos]), l.line, l.col} + return Token{.IDENTIFIER, utf8.runes_to_string(l.input[start:l.pos]), l.line, l.col} } back :: proc(l: ^Lexer) { From c40df95817deca8191c4b72dca9198fce1d7343a Mon Sep 17 00:00:00 2001 From: hrszpuk <107559570+hrszpuk@users.noreply.github.com> Date: Tue, 11 Jun 2024 06:39:44 +0100 Subject: [PATCH 10/17] Added comments to parser + spacing --- parser.odin | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/parser.odin b/parser.odin index 09da68d..59e12c2 100644 --- a/parser.odin +++ b/parser.odin @@ -3,7 +3,7 @@ package ini import "core:strings" import "core:fmt" -// TODO update for new config struct (no more Section) +// The parser reads the list of tokens generated by the lexer and converts them into an ini.Config Parser :: struct { pos: int, tokens: [dynamic]Token, @@ -11,6 +11,7 @@ Parser :: struct { section: ^Config, } +// Creates a new parser with the given token list and config then returns a pointer to it. new_parser :: proc(tokens: [dynamic]Token, config: ^Config) -> ^Parser { p := new(Parser) p.pos = 0 @@ -20,10 +21,13 @@ new_parser :: proc(tokens: [dynamic]Token, config: ^Config) -> ^Parser { return p } +// Parses the tokens into a config struct parse :: proc(p: ^Parser) { t: Token + for p.pos < len(p.tokens) { t = p.tokens[p.pos] + #partial switch t.type { case .SECTION_LEFT: parse_section(p) case .IDENTIFIER: parse_key(p) @@ -37,13 +41,16 @@ parse :: proc(p: ^Parser) { } } +// Parses a "key = value" statement. parse_key :: proc(p: ^Parser) { key := strings.clone(p.tokens[p.pos].value) p.pos += 1 + if p.tokens[p.pos].type != .DELIMITER { fmt.println("Expected delimiter after key") return } + p.pos += 1 value := strings.clone(p.tokens[p.pos].value) p.pos += 1 @@ -55,19 +62,23 @@ parse_key :: proc(p: ^Parser) { } } +// Parses a [section] statement parse_section :: proc(p: ^Parser) { p.pos += 1 section_name := strings.clone(p.tokens[p.pos].value) p.pos += 1 + if p.tokens[p.pos].type != .SECTION_RIGHT { fmt.println("Expected closing bracket") return } + p.pos += 1 if p.tokens[p.pos].type != .EOL { fmt.println("Expected newline after section") return } + p.pos += 1 p.section = add_section(p.config, section_name) } \ No newline at end of file From d893ecf426f4ffde24770b30bab7bdd0cd391778 Mon Sep 17 00:00:00 2001 From: hrszpuk <107559570+hrszpuk@users.noreply.github.com> Date: Tue, 11 Jun 2024 06:39:46 +0100 Subject: [PATCH 11/17] Added comments to parser + spacing --- write.odin | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/write.odin b/write.odin index 3ae443a..2c44d54 100644 --- a/write.odin +++ b/write.odin @@ -8,6 +8,7 @@ import "core:fmt" write_to_string :: proc(c: ^Config, prefix := false) -> string { if c == nil || c.keys == nil do return "" // I HOPE THIS WILL STOP THE VOICES FROM SCREAMING AT ME + // All sections come after the global keys (keys without a section, stored within the first ^Config) keys := strings.builder_make_none() defer strings.builder_destroy(&keys) @@ -18,11 +19,17 @@ write_to_string :: proc(c: ^Config, prefix := false) -> string { if value.keys != nil { strings.write_string(§ions, "\n") strings.write_rune(§ions, Options.Symbols.SectionLeft) + + // Section name appends to a "prefix" which is made up of the previous structs. if prefix { section_head := fmt.aprintf("%s%v%s", c.value, Options.Symbols.NestedSection, value.value) strings.write_string(§ions, section_head) new_value := fmt.aprintf("%s%v%s", c.value, Options.Symbols.NestedSection, value.value) delete(value.value) + + // TODO(hrs) directly modifying the values means post-serialisation values will be different... If you serialise twice you'll get double the nesting. + // Perhaps "prefix" could be a string of previous nesting? Override it for each new section/nested section. + value.value = new_value delete(section_head) } else { @@ -47,7 +54,7 @@ write_to_string :: proc(c: ^Config, prefix := false) -> string { } // Write the ini.Config to a file at the given path. -// If no file is found one will be created (hopefully). +// If no file is found one will be created. write_to_file :: proc(c: ^Config, filename: string) -> bool { out := write_to_string(c) defer delete(out) @@ -57,6 +64,7 @@ write_to_file :: proc(c: ^Config, filename: string) -> bool { // Converts an ini.Config into a valid Json string. // "key = value" becomes "key":"value", // "[section]" becomes "section":{ ... }, +// This will not "pretty print" the string. It will all be on a single line. write_to_json :: proc(c: ^Config) -> string { output := strings.builder_make_none() defer strings.builder_destroy(&output) @@ -65,6 +73,7 @@ write_to_json :: proc(c: ^Config) -> string { for key, value in c.keys { if value.keys == nil { + // Handles key = value strings.write_string(&output, "\"") strings.write_string(&output, key) strings.write_string(&output, "\"") @@ -74,6 +83,7 @@ write_to_json :: proc(c: ^Config) -> string { strings.write_string(&output, "\"") strings.write_string(&output, ",") } else { + // Handles [section] strings.write_string(&output, "\"") strings.write_string(&output, key) strings.write_string(&output, "\"") @@ -85,7 +95,7 @@ write_to_json :: proc(c: ^Config) -> string { } } - strings.pop_rune(&output) + strings.pop_rune(&output) // pop removes a redundent comma at the end of each section (kinda dumb) strings.write_string(&output, "}") return strings.clone(strings.to_string(output)) From 06bd88bebacae4e350ce7b2f55126bd8778ddb8e Mon Sep 17 00:00:00 2001 From: hrszpuk <107559570+hrszpuk@users.noreply.github.com> Date: Tue, 11 Jun 2024 14:27:01 +0100 Subject: [PATCH 12/17] Added log.debug messages --- read.odin | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/read.odin b/read.odin index 7781cf1..8f9e082 100644 --- a/read.odin +++ b/read.odin @@ -2,8 +2,10 @@ package ini import "core:fmt" import "core:os" +import "core:log" read_from_string :: proc(content: string, name := "config") -> ^Config { + log.debugf("Processing string of length %d", len(content)) config := new_config(name) l := new_lexer(content) @@ -17,6 +19,7 @@ read_from_string :: proc(content: string, name := "config") -> ^Config { parse(p) + log.debug("Deleting dynamic token array") for token in tokens { if token.type == .IDENTIFIER { delete(token.value) @@ -31,7 +34,9 @@ read_from_file :: proc(path: string) -> (config: ^Config = nil, ok := false) #op data, success := os.read_entire_file_from_filename(path) defer delete(data) if success { + log.debugf("Successfully read %d bytes from %s", len(data), path) return read_from_string(string(data), path), true } + log.errorf("Could not read from %s", path) return } From a033643338d7aac93400812b97fa9a1628871d5d Mon Sep 17 00:00:00 2001 From: hrszpuk <107559570+hrszpuk@users.noreply.github.com> Date: Tue, 11 Jun 2024 14:27:22 +0100 Subject: [PATCH 13/17] Added log.debug messages --- write.odin | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/write.odin b/write.odin index 2c44d54..cf096cc 100644 --- a/write.odin +++ b/write.odin @@ -3,10 +3,17 @@ package ini import "core:os" import "core:strings" import "core:fmt" +import "core:log" // Converts an ini config to a string (key=value, [section], etc) write_to_string :: proc(c: ^Config, prefix := false) -> string { - if c == nil || c.keys == nil do return "" // I HOPE THIS WILL STOP THE VOICES FROM SCREAMING AT ME + if c == nil { + log.error("Provided ^Config is nil. Returning an empty string.") + return "" + } else if c.keys == nil { + log.error("Provided ^Config.key is nil. Returning an empty string.") + return "" + } // All sections come after the global keys (keys without a section, stored within the first ^Config) keys := strings.builder_make_none() @@ -22,6 +29,7 @@ write_to_string :: proc(c: ^Config, prefix := false) -> string { // Section name appends to a "prefix" which is made up of the previous structs. if prefix { + // TODO(hrs) section_head and new_value are literally the exact same... WTF? section_head := fmt.aprintf("%s%v%s", c.value, Options.Symbols.NestedSection, value.value) strings.write_string(§ions, section_head) new_value := fmt.aprintf("%s%v%s", c.value, Options.Symbols.NestedSection, value.value) @@ -30,6 +38,7 @@ write_to_string :: proc(c: ^Config, prefix := false) -> string { // TODO(hrs) directly modifying the values means post-serialisation values will be different... If you serialise twice you'll get double the nesting. // Perhaps "prefix" could be a string of previous nesting? Override it for each new section/nested section. + log.debugf("Applied nesting prefix to %s (will generate as %s)", value.value, new_value) value.value = new_value delete(section_head) } else { @@ -39,12 +48,15 @@ write_to_string :: proc(c: ^Config, prefix := false) -> string { strings.write_string(§ions, "\n") section := write_to_string(value, true) strings.write_string(§ions, section) + log.debugf("Generated section from %v", value) + delete(section) } else { strings.write_string(&keys, key) strings.write_rune(&keys, Options.Symbols.Delimiter) strings.write_string(&keys, value.value) strings.write_string(&keys, "\n") + log.debugf("Generated key value pair from '%s' and '%s'", key, value.value) } } @@ -58,7 +70,14 @@ write_to_string :: proc(c: ^Config, prefix := false) -> string { write_to_file :: proc(c: ^Config, filename: string) -> bool { out := write_to_string(c) defer delete(out) - return os.write_entire_file(filename, transmute([]u8)out) + bytes := transmute([]u8)out + result := os.write_entire_file(filename, bytes) + if result { + log.debugf("Successfully wrote %d bytes to file %s", len(bytes), filename) + } else { + log.errorf("Failed to write %d bytes to file %s", len(bytes), filename) + } + return result } // Converts an ini.Config into a valid Json string. @@ -66,6 +85,14 @@ write_to_file :: proc(c: ^Config, filename: string) -> bool { // "[section]" becomes "section":{ ... }, // This will not "pretty print" the string. It will all be on a single line. write_to_json :: proc(c: ^Config) -> string { + if c == nil { + log.error("Provided ^Config is nil. Returning an empty string.") + return "" + } else if c.keys == nil { + log.error("Provided ^Config.key is nil. Returning an empty string.") + return "" + } + output := strings.builder_make_none() defer strings.builder_destroy(&output) @@ -82,6 +109,7 @@ write_to_json :: proc(c: ^Config) -> string { strings.write_string(&output, value.value) strings.write_string(&output, "\"") strings.write_string(&output, ",") + log.debugf("Generated key value pair from '%s' and '%s'", key, value.value) } else { // Handles [section] strings.write_string(&output, "\"") @@ -92,6 +120,7 @@ write_to_json :: proc(c: ^Config) -> string { strings.write_string(&output, section) delete(section) strings.write_string(&output, ",") + log.debugf("Generated section from %v", value) } } From 7da2839b2e5328c09380a2719a94f8154f958159 Mon Sep 17 00:00:00 2001 From: hrszpuk <107559570+hrszpuk@users.noreply.github.com> Date: Tue, 11 Jun 2024 14:27:26 +0100 Subject: [PATCH 14/17] Added log.debug messages --- parser.odin | 71 +++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 63 insertions(+), 8 deletions(-) diff --git a/parser.odin b/parser.odin index 59e12c2..3f03acb 100644 --- a/parser.odin +++ b/parser.odin @@ -2,6 +2,7 @@ package ini import "core:strings" import "core:fmt" +import "core:log" // The parser reads the list of tokens generated by the lexer and converts them into an ini.Config Parser :: struct { @@ -13,6 +14,13 @@ Parser :: struct { // Creates a new parser with the given token list and config then returns a pointer to it. new_parser :: proc(tokens: [dynamic]Token, config: ^Config) -> ^Parser { + if config == nil { + log.errorf("Provided ^Config is nil. Returning nil.") + return nil + } + + log.debugf("Creating parser with given token array of %d tokens", len(tokens)) + p := new(Parser) p.pos = 0 p.tokens = tokens @@ -23,8 +31,12 @@ new_parser :: proc(tokens: [dynamic]Token, config: ^Config) -> ^Parser { // Parses the tokens into a config struct parse :: proc(p: ^Parser) { - t: Token + if p == nil { + log.errorf("Provided ^Parser is nil. Cannot parse. Returning.") + return + } + t: Token for p.pos < len(p.tokens) { t = p.tokens[p.pos] @@ -33,10 +45,13 @@ parse :: proc(p: ^Parser) { case .IDENTIFIER: parse_key(p) case .COMMENT: p.pos += 1 // TODO Add comments to config? case .EOL: p.pos += 1 - case .EOF: return + case .EOF: + log.debugf("EOF found at position %d. Finished parsing.", p.pos+1) + return case: - fmt.println("Unexpected token: ", t) - p.pos += 1 // TODO Errors + log.errorf("Unexpected token: '%s' (%v) @%d:%d. Ignoring.", t.value, t.type, t.line, t.col) + skip_line(p, p.tokens[p.pos].line) + p.pos += 1 } } } @@ -47,17 +62,31 @@ parse_key :: proc(p: ^Parser) { p.pos += 1 if p.tokens[p.pos].type != .DELIMITER { - fmt.println("Expected delimiter after key") + log.errorf( + "Expected %v ('%s') but found %v ('%s'). Cannot recover value of key '%s'. This value will be missing.", + TokenType.DELIMITER, + Options.Symbols.Delimiter, + p.tokens[p.pos].type, + p.tokens[p.pos].value, + key + ) + delete(key) + skip_line(p, p.tokens[p.pos].line) return } p.pos += 1 + // TODO(hrs) check for extra identifiers. + // because of whitespace separating multiple identifiers, and that values could have spaces in them, + // we should check if there any anymore identifiers until an EOL then stich the values of any we find together. value := strings.clone(p.tokens[p.pos].value) p.pos += 1 if p.section == nil { + log.debugf("Successfully parsed key '%s' with a value of '%s'", key, value) set(p.config, key, value) } else { + log.debugf("Successfully parsed key '%s' with a value of '%s' in section '%s'", key, value, p.section.value) set(p.section, key, value) } } @@ -69,16 +98,42 @@ parse_section :: proc(p: ^Parser) { p.pos += 1 if p.tokens[p.pos].type != .SECTION_RIGHT { - fmt.println("Expected closing bracket") - return + log.errorf( + "Expected %v ('%s') but found %v ('%s').", + TokenType.SECTION_RIGHT, + Options.Symbols.SectionRight, + p.tokens[p.pos].type, + p.tokens[p.pos].value + ) } p.pos += 1 if p.tokens[p.pos].type != .EOL { - fmt.println("Expected newline after section") + log.errorf( + "Expected %v ('\n') but found %v ('%s').", + TokenType.EOL, + p.tokens[p.pos].type, + p.tokens[p.pos].value + ) + skip_line(p, p.tokens[p.pos].line) + delete(section_name) return } p.pos += 1 p.section = add_section(p.config, section_name) +} + +// If an unrecoverable error occurs during parsing then the parser will opt to skip all the tokens on the current line. +// This prevents additional errors from spawning because of an error prior in the list of tokens. +skip_line :: proc(p: ^Parser, line: int) { + unskipped_pos := p.pos + for p.tokens[p.pos].type != .EOL && p.tokens[p.pos].type != .EOF { + p.pos += 1 + } + + // No point warning about skipping tokens if we didn't skip any + if unskipped_pos != p.pos { + log.warnf("Attempted to prevent further errors on line %d by skipping %d token(s).", line, p.pos-unskipped_pos) + } } \ No newline at end of file From ea700528a8d6eeea47c0157ed1fdd9dd2f176100 Mon Sep 17 00:00:00 2001 From: hrszpuk <107559570+hrszpuk@users.noreply.github.com> Date: Tue, 11 Jun 2024 14:27:44 +0100 Subject: [PATCH 15/17] Removed debugging mode compile with -debug flag instead. --- options.odin | 3 --- 1 file changed, 3 deletions(-) diff --git a/options.odin b/options.odin index c6c3bc9..be6fa9d 100644 --- a/options.odin +++ b/options.odin @@ -54,8 +54,6 @@ IniOptions :: struct { GenerateCommentsJson: bool, // Whether to generate comments in json or not (default: false) GenerateCommentsNameJson: string, // The name of the comment field if generating in json (default: "__comment__") }, - - Debug: bool, // Debugging mode will print debug information (default: false) } // These options can be changed at runtime. @@ -88,5 +86,4 @@ Options : IniOptions = { false, "__comment__", }, - false, // Debug } From 47a8d8ff637af1a1aae2255c25791c19d272ff85 Mon Sep 17 00:00:00 2001 From: hrszpuk <107559570+hrszpuk@users.noreply.github.com> Date: Tue, 11 Jun 2024 14:27:55 +0100 Subject: [PATCH 16/17] Added log.debug messages --- lexer.odin | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/lexer.odin b/lexer.odin index 57204f2..c5e3eef 100644 --- a/lexer.odin +++ b/lexer.odin @@ -2,6 +2,7 @@ package ini import "core:fmt" import "core:unicode/utf8" +import "core:log" // Lexer will take the INI text and convert it to a list of tokens. // "key = value" = [Token{"key", .IDENTIFIER, ...}, Token{"=", .DELIMITER, ...}, Token{"value", .IDENTIFIER, ...}] @@ -15,6 +16,7 @@ Lexer :: struct { // Creates a new lexer with the given input and returns a pointer to it. new_lexer :: proc(input: string) -> ^Lexer { + log.debugf("Created ^Lexer with given string of length %d", len(input)) l := new(Lexer) l.tokens = make([dynamic]Token, 0, len(input)/2) l.input = utf8.string_to_runes(input) @@ -34,17 +36,21 @@ lex :: proc(l: ^Lexer) -> [dynamic]Token { switch c = next(l); c { case Options.Symbols.Comment: // Defaults to ; + log.debug("Found comment") append(&l.tokens, lexId(l)) case Options.Symbols.SectionRight: // Defaults to ] + log.debug("Found section right") s, i := utf8.encode_rune(Options.Symbols.SectionRight) append(&l.tokens, Token{.SECTION_RIGHT, string(s[:i]), l.line, l.col}) case Options.Symbols.SectionLeft: // Defaults to [ + log.debug("Found section left") s, i := utf8.encode_rune(Options.Symbols.SectionLeft) append(&l.tokens, Token{.SECTION_LEFT, string(s[:i]), l.line, l.col}) case Options.Symbols.Delimiter: // Defaults to = + log.debug("Found delimiter") s, i := utf8.encode_rune(Options.Symbols.Delimiter) append(&l.tokens, Token{.DELIMITER, string(s[:i]), l.line, l.col}) @@ -73,13 +79,21 @@ lexId :: proc(l: ^Lexer) -> Token { for l.pos < len(l.input) { c := next(l) - if c == 0 || c == '\n' { + if c == 0 || + c == '\n' || + c == Options.Symbols.Delimiter || + c == Options.Symbols.SectionRight || + c == Options.Symbols.SectionLeft || + c == Options.Symbols.Delimiter || + c == Options.Symbols.Comment { back(l) break } } - return Token{.IDENTIFIER, utf8.runes_to_string(l.input[start:l.pos]), l.line, l.col} + str := utf8.runes_to_string(l.input[start:l.pos]) + log.debugf("Found identifier '%s' @%d:%d", str, l.line, l.col) + return Token{.IDENTIFIER, str, l.line, l.col} } back :: proc(l: ^Lexer) { From ff46faf5d3ffbfed5d9f4c3f28d17dba0a37552e Mon Sep 17 00:00:00 2001 From: hrszpuk <107559570+hrszpuk@users.noreply.github.com> Date: Tue, 11 Jun 2024 14:27:57 +0100 Subject: [PATCH 17/17] Added log.debug messages --- ini.odin | 59 +++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 50 insertions(+), 9 deletions(-) diff --git a/ini.odin b/ini.odin index ddb00c0..5b4c844 100644 --- a/ini.odin +++ b/ini.odin @@ -2,6 +2,7 @@ package ini import "core:fmt" import "core:strings" +import "core:log" // Ini Config Type // .value is the value of an entry stored in the config (or section of the config), or the name of the config. @@ -22,7 +23,7 @@ new_config :: proc(name: string) -> ^Config { p := new(Config) p.value = strings.clone(name) p.keys = make(map[string]^Config) - + log.debugf("Created a new config with the name '%s'.", name) return p } @@ -44,17 +45,26 @@ destroy_config :: proc(c: ^Config) { } delete(c.value) free(c) + log.debugf("Destroyed config with the name '%s'.", c.value) } // Adds a new section to the given config and returns a pointer to it. add_section :: proc(c: ^Config, name: string) -> (^Config, bool) #optional_ok { - if c == nil || has_key(c, name) do return nil, false + if c == nil { + log.errorf("Provided ^Config is nil. Returning nil.") + return nil, false + } else if has_key(c, name) { + log.errorf("Provided ^Config already contains the key '%s'. Returning nil.", name) + return nil, false + } + s := new(Config) s.value = strings.clone(name) s.keys = make(map[string]^Config) c.keys[name] = s + log.debugf("Added section '%s' to config '%s'.", name, c.value) return s, true } @@ -63,7 +73,13 @@ set :: proc{set_key, set_section} // Sets the value of a given key. set_key :: proc(c: ^Config, key: string, value: string) -> bool { - if c == nil || has_key(c, key) do return false + if c == nil { + log.errorf("Provided ^Config is nil. Returning.") + return false + } else if has_key(c, key) { + log.errorf("Provided ^Config already contains the key '%s'. Returning.", key) + return false + } c.keys[key] = new(Config) c.keys[key].value = strings.clone(value) @@ -72,7 +88,17 @@ set_key :: proc(c: ^Config, key: string, value: string) -> bool { // Sets the value of a given key (specifically for sections). set_section :: proc(c: ^Config, key: string, value: ^Config) -> bool { - if c == nil || value == nil || has_key(c, key) do return false + if c == nil { + log.errorf("Provided ^Config is nil. Returning.") + return false + } else if has_key(c, key) { + log.errorf("Provided ^Config already contains the key '%s'. Returning.", key) + return false + } else if value == nil { + log.errorf("Provided section (^Config) is nil. Returning.") + return false + } + c.keys[key] = value return true } @@ -82,14 +108,20 @@ get :: proc{get_key} // Returns the value of a key in the config. Does not support returning sections. get_key :: proc(c: ^Config, key: string) -> (string, bool) #optional_ok { - if c == nil do return "", false + if c == nil { + log.errorf("Provided ^Config is nil. Cannot get key '%s'.", key) + return "", false + } return c.keys[key].value, true } // Finds a section by name and returns a pointer to it. // If no section matches the name, it returns nil. get_section :: proc(c: ^Config, name: string) -> (^Config, bool) #optional_ok { - if c == nil do return nil, false + if c == nil { + log.errorf("Provided ^Config is nil. Cannot get section '%s'. Returning nil.", name) + return nil, false + } return c.keys[name], true } @@ -106,7 +138,10 @@ is_section :: proc(c: ^Config, name: string) -> bool { // Removes a key from a config/section remove :: proc(c: ^Config, name: string) -> bool { - if !has_key(c, name) do return false + if !has_key(c, name) { + log.warnf("Cannot remove key '%s' from config because '%s' does not exist.", name, name) + return false + } value := c.keys[name] destroy_config(value) delete_key(&c.keys, name) @@ -115,7 +150,10 @@ remove :: proc(c: ^Config, name: string) -> bool { // Removes a key from a config/section and returns the value pop_key :: proc(c: ^Config, name: string) -> (string, bool) #optional_ok { - if !has_key(c, name) do return "", false + if !has_key(c, name) { + log.errorf("Cannot pop key '%s' from config because '%s' does not exist.", name, name) + return "", false + } value := c.keys[name] defer free(value) delete_key(&c.keys, name) @@ -124,7 +162,10 @@ pop_key :: proc(c: ^Config, name: string) -> (string, bool) #optional_ok { // Removes a key from a config/section pop_section :: proc(c: ^Config, name: string) -> (^Config, bool) #optional_ok { - if !has_key(c, name) do return nil, false + if !has_key(c, name) { + log.errorf("Cannot pop section '%s' from config because '%s' does not exist.", name, name) + return nil, false + } value := c.keys[name] delete_key(&c.keys, name) return value, true