diff --git a/examples/basic/basic.odin b/examples/basic/basic.odin index fb13c6e..30d6013 100644 --- a/examples/basic/basic.odin +++ b/examples/basic/basic.odin @@ -4,17 +4,25 @@ import ini "../.." import "core:fmt" main :: proc() { - config := ini.read_from_file("../../tests/sections.ini") + + ini.Options.Symbols.NestedSection = '/' + config := ini.new_config("config") defer ini.destroy_config(config) ini.set(config, "wife", "Monica") - children := ini.add_section(config, "children") - ini.set(children, "daughter", "Jessica") - ini.set(children, "son", "Billy") + section := ini.add_section(config, "children") + ini.set(section, "kevin", "kevin") + + section_nested := ini.add_section(section, "grandchildren") + ini.set(section_nested, "Anna", "Anna") + + section_nested2 := ini.add_section(section, "grandchildren2") + ini.set(section_nested2, "Anna2", "Anna2") + + section_nested3 := ini.add_section(section_nested2, "grandchildren3") + ini.set(section_nested3, "Anna3", "Anna2") + ini.write_to_file(config, "test.ini") - s := ini.write_to_string(config) - defer delete(s) - fmt.println(s) } \ No newline at end of file diff --git a/ini.odin b/ini.odin index ddb00c0..5b4c844 100644 --- a/ini.odin +++ b/ini.odin @@ -2,6 +2,7 @@ package ini import "core:fmt" import "core:strings" +import "core:log" // Ini Config Type // .value is the value of an entry stored in the config (or section of the config), or the name of the config. @@ -22,7 +23,7 @@ new_config :: proc(name: string) -> ^Config { p := new(Config) p.value = strings.clone(name) p.keys = make(map[string]^Config) - + log.debugf("Created a new config with the name '%s'.", name) return p } @@ -44,17 +45,26 @@ destroy_config :: proc(c: ^Config) { } delete(c.value) free(c) + log.debugf("Destroyed config with the name '%s'.", c.value) } // Adds a new section to the given config and returns a pointer to it. add_section :: proc(c: ^Config, name: string) -> (^Config, bool) #optional_ok { - if c == nil || has_key(c, name) do return nil, false + if c == nil { + log.errorf("Provided ^Config is nil. Returning nil.") + return nil, false + } else if has_key(c, name) { + log.errorf("Provided ^Config already contains the key '%s'. Returning nil.", name) + return nil, false + } + s := new(Config) s.value = strings.clone(name) s.keys = make(map[string]^Config) c.keys[name] = s + log.debugf("Added section '%s' to config '%s'.", name, c.value) return s, true } @@ -63,7 +73,13 @@ set :: proc{set_key, set_section} // Sets the value of a given key. set_key :: proc(c: ^Config, key: string, value: string) -> bool { - if c == nil || has_key(c, key) do return false + if c == nil { + log.errorf("Provided ^Config is nil. Returning.") + return false + } else if has_key(c, key) { + log.errorf("Provided ^Config already contains the key '%s'. Returning.", key) + return false + } c.keys[key] = new(Config) c.keys[key].value = strings.clone(value) @@ -72,7 +88,17 @@ set_key :: proc(c: ^Config, key: string, value: string) -> bool { // Sets the value of a given key (specifically for sections). set_section :: proc(c: ^Config, key: string, value: ^Config) -> bool { - if c == nil || value == nil || has_key(c, key) do return false + if c == nil { + log.errorf("Provided ^Config is nil. Returning.") + return false + } else if has_key(c, key) { + log.errorf("Provided ^Config already contains the key '%s'. Returning.", key) + return false + } else if value == nil { + log.errorf("Provided section (^Config) is nil. Returning.") + return false + } + c.keys[key] = value return true } @@ -82,14 +108,20 @@ get :: proc{get_key} // Returns the value of a key in the config. Does not support returning sections. get_key :: proc(c: ^Config, key: string) -> (string, bool) #optional_ok { - if c == nil do return "", false + if c == nil { + log.errorf("Provided ^Config is nil. Cannot get key '%s'.", key) + return "", false + } return c.keys[key].value, true } // Finds a section by name and returns a pointer to it. // If no section matches the name, it returns nil. get_section :: proc(c: ^Config, name: string) -> (^Config, bool) #optional_ok { - if c == nil do return nil, false + if c == nil { + log.errorf("Provided ^Config is nil. Cannot get section '%s'. Returning nil.", name) + return nil, false + } return c.keys[name], true } @@ -106,7 +138,10 @@ is_section :: proc(c: ^Config, name: string) -> bool { // Removes a key from a config/section remove :: proc(c: ^Config, name: string) -> bool { - if !has_key(c, name) do return false + if !has_key(c, name) { + log.warnf("Cannot remove key '%s' from config because '%s' does not exist.", name, name) + return false + } value := c.keys[name] destroy_config(value) delete_key(&c.keys, name) @@ -115,7 +150,10 @@ remove :: proc(c: ^Config, name: string) -> bool { // Removes a key from a config/section and returns the value pop_key :: proc(c: ^Config, name: string) -> (string, bool) #optional_ok { - if !has_key(c, name) do return "", false + if !has_key(c, name) { + log.errorf("Cannot pop key '%s' from config because '%s' does not exist.", name, name) + return "", false + } value := c.keys[name] defer free(value) delete_key(&c.keys, name) @@ -124,7 +162,10 @@ pop_key :: proc(c: ^Config, name: string) -> (string, bool) #optional_ok { // Removes a key from a config/section pop_section :: proc(c: ^Config, name: string) -> (^Config, bool) #optional_ok { - if !has_key(c, name) do return nil, false + if !has_key(c, name) { + log.errorf("Cannot pop section '%s' from config because '%s' does not exist.", name, name) + return nil, false + } value := c.keys[name] delete_key(&c.keys, name) return value, true diff --git a/lexer.odin b/lexer.odin index 7c044c1..c5e3eef 100644 --- a/lexer.odin +++ b/lexer.odin @@ -2,7 +2,10 @@ package ini import "core:fmt" import "core:unicode/utf8" +import "core:log" +// Lexer will take the INI text and convert it to a list of tokens. +// "key = value" = [Token{"key", .IDENTIFIER, ...}, Token{"=", .DELIMITER, ...}, Token{"value", .IDENTIFIER, ...}] Lexer :: struct { tokens: [dynamic]Token, input: []rune, @@ -11,7 +14,9 @@ Lexer :: struct { col: int, } +// Creates a new lexer with the given input and returns a pointer to it. new_lexer :: proc(input: string) -> ^Lexer { + log.debugf("Created ^Lexer with given string of length %d", len(input)) l := new(Lexer) l.tokens = make([dynamic]Token, 0, len(input)/2) l.input = utf8.string_to_runes(input) @@ -21,51 +26,74 @@ new_lexer :: proc(input: string) -> ^Lexer { return l } +// Produces the dynamic array of tokens from the input stored in the lexer struct lex :: proc(l: ^Lexer) -> [dynamic]Token { - c: rune + c: rune // Current character + for l.pos < len(l.input) { + + // Most symbols can be directly added, identifiers and comments are lexed in their own functions switch c = next(l); c { - case '\n': append(&l.tokens, Token{.EOL, "\n", l.line, l.col}) - case ';', '#': append(&l.tokens, Token{.COMMENT, lexId(l).value, l.line, l.col}) - case ']': append(&l.tokens, Token{.RSB, "]", l.line, l.col}) - case '[': append(&l.tokens, Token{.LSB, "[", l.line, l.col}) - case '=', ':': append(&l.tokens, Token{.DELIMITER, "=", l.line, l.col}) - case ' ', '\t', '\r': break - case: append(&l.tokens, lexId(l)) - } - } - append(&l.tokens, Token{.EOF, "", l.line, l.col}) + case Options.Symbols.Comment: // Defaults to ; + log.debug("Found comment") + append(&l.tokens, lexId(l)) - return l.tokens -} + case Options.Symbols.SectionRight: // Defaults to ] + log.debug("Found section right") + s, i := utf8.encode_rune(Options.Symbols.SectionRight) + append(&l.tokens, Token{.SECTION_RIGHT, string(s[:i]), l.line, l.col}) -lexId :: proc(l: ^Lexer) -> Token { - start := l.pos - 1 + case Options.Symbols.SectionLeft: // Defaults to [ + log.debug("Found section left") + s, i := utf8.encode_rune(Options.Symbols.SectionLeft) + append(&l.tokens, Token{.SECTION_LEFT, string(s[:i]), l.line, l.col}) - for l.pos < len(l.input) { - c := next(l) - if c == 0 || c == '\n' || c == '[' || c == ']' || c == '=' { - back(l) + case Options.Symbols.Delimiter: // Defaults to = + log.debug("Found delimiter") + s, i := utf8.encode_rune(Options.Symbols.Delimiter) + append(&l.tokens, Token{.DELIMITER, string(s[:i]), l.line, l.col}) + + case '\n': + append(&l.tokens, Token{.EOL, "\n", l.line, l.col}) + + // Ignore whitespace + case ' ', '\t', '\r': break + + case: + append(&l.tokens, lexId(l)) } } - return Token{.ID, utf8.runes_to_string(l.input[start:l.pos]), l.line, l.col} + append(&l.tokens, Token{.EOF, "", l.line, l.col}) + + return l.tokens } -lexComment :: proc(l: ^Lexer) -> Token { +// Tokenises identifiers (anything that is not whitespace or in Options.Symbols) +// This also lexs comments btw +lexId :: proc(l: ^Lexer) -> Token { start := l.pos - 1 for l.pos < len(l.input) { c := next(l) - if c == 0 || c == '\n' { + + if c == 0 || + c == '\n' || + c == Options.Symbols.Delimiter || + c == Options.Symbols.SectionRight || + c == Options.Symbols.SectionLeft || + c == Options.Symbols.Delimiter || + c == Options.Symbols.Comment { back(l) break } } - return Token{.ID, utf8.runes_to_string(l.input[start:l.pos]), l.line, l.col} + str := utf8.runes_to_string(l.input[start:l.pos]) + log.debugf("Found identifier '%s' @%d:%d", str, l.line, l.col) + return Token{.IDENTIFIER, str, l.line, l.col} } back :: proc(l: ^Lexer) { diff --git a/options.odin b/options.odin index a6778ab..be6fa9d 100644 --- a/options.odin +++ b/options.odin @@ -2,7 +2,7 @@ package ini import "core:bufio" -// Ini is not standardised. Therefore I've given the programmer the ability to interchange symbols and rules. +// Ini is not standardised. Therefore I've let the user interchange symbols and rules that will apply during pasing/serialisation. IniOptions :: struct { // Symbols @@ -26,21 +26,34 @@ IniOptions :: struct { Rules: struct { // If you are reading these rules and think there are more that could be added open an issue (https://github.com/hrszpuk/odin-ini) + // Parsing AllowEmptyValues: bool, // Whether empty values are allowed or not (default: true) AllowEmptySections: bool, // Whether empty sections are allowed or not (default: true) AllowNestedSections: bool, // Whether nested sections are allowed or not (default: true) AllowSpacesInKeys: bool, // Whether spaces are allowed to be a part of keys or not (default: false) AllowSpacesInValues: bool, // Whether spaces are allowed to be a part of values or not (default: true) + AllowDuplicateKeys: bool, // Whether duplicate keys are ignored or not (default: false) + AllowDuplicateSections: bool, // Whether duplicate sections are ignored or not (default: false) IgnoreCaseSensitivity: bool, // Whether case sensitivity is ignored or not (default: false) - IgnoreDuplicateKeys: bool, // Whether duplicate keys are ignored or not (default: false) - IgnoreDuplicateSections: bool, // Whether duplicate sections are ignored or not (default: false) IgnoreDelimiterPadding: bool, // Whether delimiter padding is ignored or not (default: true) IgnoreSectionNamePadding: bool, // Whether section name padding is ignored or not (default: true) IgnoreValueQuotes: bool, // Whether the quotes around a value are counted as part of the value or not (default: true) - }, - Debug: bool, // Debugging mode will print debug information (default: false) + IgnoreComments: bool, // Whether to ignore comments when parsing (default: false). + + // Generation + PaddingSymbol: rune, // The symbol used for padding (default: ' ') + DelimiterPaddingAmount: uint, // The amount of padding around the delimiter (default: 1) + SectionNamePaddingAmount: uint, // The amount of padding around the delimiter (default: 1) + BeforeSectionLinePaddingAmount: uint, // The amount of lines placed before a section header (default: 1) + StatementCommentPaddingAmount: uint, // The amount of padding before a comment starts after a statement (default: 1) + EmptyLineCommentPaddingAmount: uint, // The amount of padding before a comment starts on an empty line (default: 0) + + GenerateComments: bool, // Whether to generate comments or not (default: true) + GenerateCommentsJson: bool, // Whether to generate comments in json or not (default: false) + GenerateCommentsNameJson: string, // The name of the comment field if generating in json (default: "__comment__") + }, } // These options can be changed at runtime. @@ -59,6 +72,18 @@ Options : IniOptions = { false, true, true, + + false, + + ' ', + 1, + 0, + 1, + 1, + 0, + + true, + false, + "__comment__", }, - false, // Debug } diff --git a/parser.odin b/parser.odin index 3889fd5..3f03acb 100644 --- a/parser.odin +++ b/parser.odin @@ -2,8 +2,9 @@ package ini import "core:strings" import "core:fmt" +import "core:log" -// TODO update for new config struct (no more Section) +// The parser reads the list of tokens generated by the lexer and converts them into an ini.Config Parser :: struct { pos: int, tokens: [dynamic]Token, @@ -11,7 +12,15 @@ Parser :: struct { section: ^Config, } +// Creates a new parser with the given token list and config then returns a pointer to it. new_parser :: proc(tokens: [dynamic]Token, config: ^Config) -> ^Parser { + if config == nil { + log.errorf("Provided ^Config is nil. Returning nil.") + return nil + } + + log.debugf("Creating parser with given token array of %d tokens", len(tokens)) + p := new(Parser) p.pos = 0 p.tokens = tokens @@ -20,54 +29,111 @@ new_parser :: proc(tokens: [dynamic]Token, config: ^Config) -> ^Parser { return p } +// Parses the tokens into a config struct parse :: proc(p: ^Parser) { + if p == nil { + log.errorf("Provided ^Parser is nil. Cannot parse. Returning.") + return + } + t: Token for p.pos < len(p.tokens) { t = p.tokens[p.pos] + #partial switch t.type { - case .LSB: parse_section(p) - case .ID: parse_key(p) + case .SECTION_LEFT: parse_section(p) + case .IDENTIFIER: parse_key(p) case .COMMENT: p.pos += 1 // TODO Add comments to config? case .EOL: p.pos += 1 - case .EOF: return + case .EOF: + log.debugf("EOF found at position %d. Finished parsing.", p.pos+1) + return case: - fmt.println("Unexpected token: ", t) - p.pos += 1 // TODO Errors + log.errorf("Unexpected token: '%s' (%v) @%d:%d. Ignoring.", t.value, t.type, t.line, t.col) + skip_line(p, p.tokens[p.pos].line) + p.pos += 1 } } } +// Parses a "key = value" statement. parse_key :: proc(p: ^Parser) { key := strings.clone(p.tokens[p.pos].value) p.pos += 1 + if p.tokens[p.pos].type != .DELIMITER { - fmt.println("Expected delimiter after key") + log.errorf( + "Expected %v ('%s') but found %v ('%s'). Cannot recover value of key '%s'. This value will be missing.", + TokenType.DELIMITER, + Options.Symbols.Delimiter, + p.tokens[p.pos].type, + p.tokens[p.pos].value, + key + ) + delete(key) + skip_line(p, p.tokens[p.pos].line) return } + p.pos += 1 + // TODO(hrs) check for extra identifiers. + // because of whitespace separating multiple identifiers, and that values could have spaces in them, + // we should check if there any anymore identifiers until an EOL then stich the values of any we find together. value := strings.clone(p.tokens[p.pos].value) p.pos += 1 if p.section == nil { + log.debugf("Successfully parsed key '%s' with a value of '%s'", key, value) set(p.config, key, value) } else { + log.debugf("Successfully parsed key '%s' with a value of '%s' in section '%s'", key, value, p.section.value) set(p.section, key, value) } } +// Parses a [section] statement parse_section :: proc(p: ^Parser) { p.pos += 1 section_name := strings.clone(p.tokens[p.pos].value) p.pos += 1 - if p.tokens[p.pos].type != .RSB { - fmt.println("Expected closing bracket") - return + + if p.tokens[p.pos].type != .SECTION_RIGHT { + log.errorf( + "Expected %v ('%s') but found %v ('%s').", + TokenType.SECTION_RIGHT, + Options.Symbols.SectionRight, + p.tokens[p.pos].type, + p.tokens[p.pos].value + ) } + p.pos += 1 if p.tokens[p.pos].type != .EOL { - fmt.println("Expected newline after section") + log.errorf( + "Expected %v ('\n') but found %v ('%s').", + TokenType.EOL, + p.tokens[p.pos].type, + p.tokens[p.pos].value + ) + skip_line(p, p.tokens[p.pos].line) + delete(section_name) return } + p.pos += 1 p.section = add_section(p.config, section_name) +} + +// If an unrecoverable error occurs during parsing then the parser will opt to skip all the tokens on the current line. +// This prevents additional errors from spawning because of an error prior in the list of tokens. +skip_line :: proc(p: ^Parser, line: int) { + unskipped_pos := p.pos + for p.tokens[p.pos].type != .EOL && p.tokens[p.pos].type != .EOF { + p.pos += 1 + } + + // No point warning about skipping tokens if we didn't skip any + if unskipped_pos != p.pos { + log.warnf("Attempted to prevent further errors on line %d by skipping %d token(s).", line, p.pos-unskipped_pos) + } } \ No newline at end of file diff --git a/read.odin b/read.odin index 2a30043..8f9e082 100644 --- a/read.odin +++ b/read.odin @@ -2,8 +2,10 @@ package ini import "core:fmt" import "core:os" +import "core:log" read_from_string :: proc(content: string, name := "config") -> ^Config { + log.debugf("Processing string of length %d", len(content)) config := new_config(name) l := new_lexer(content) @@ -17,8 +19,9 @@ read_from_string :: proc(content: string, name := "config") -> ^Config { parse(p) + log.debug("Deleting dynamic token array") for token in tokens { - if token.type == .ID { + if token.type == .IDENTIFIER { delete(token.value) } } @@ -31,7 +34,9 @@ read_from_file :: proc(path: string) -> (config: ^Config = nil, ok := false) #op data, success := os.read_entire_file_from_filename(path) defer delete(data) if success { + log.debugf("Successfully read %d bytes from %s", len(data), path) return read_from_string(string(data), path), true } + log.errorf("Could not read from %s", path) return } diff --git a/token.odin b/token.odin index 927bac4..e5f735a 100644 --- a/token.odin +++ b/token.odin @@ -3,14 +3,13 @@ package ini import "core:strings" TokenType :: enum { - ID, // Literally anything - QM, // " - LSB, // [ - RSB, // ] - COMMENT, // # or ; - DELIMITER, // = or : - EOL, // \n - EOF // End of file + IDENTIFIER, + SECTION_LEFT, + SECTION_RIGHT, + COMMENT, + DELIMITER, + EOL, + EOF } Token :: struct { diff --git a/write.odin b/write.odin index 994cbed..cf096cc 100644 --- a/write.odin +++ b/write.odin @@ -3,11 +3,19 @@ package ini import "core:os" import "core:strings" import "core:fmt" +import "core:log" // Converts an ini config to a string (key=value, [section], etc) write_to_string :: proc(c: ^Config, prefix := false) -> string { - if c == nil || c.keys == nil do return "" // I HOPE THIS WILL STOP THE VOICES FROM SCREAMING AT ME + if c == nil { + log.error("Provided ^Config is nil. Returning an empty string.") + return "" + } else if c.keys == nil { + log.error("Provided ^Config.key is nil. Returning an empty string.") + return "" + } + // All sections come after the global keys (keys without a section, stored within the first ^Config) keys := strings.builder_make_none() defer strings.builder_destroy(&keys) @@ -16,26 +24,39 @@ write_to_string :: proc(c: ^Config, prefix := false) -> string { for key, value in c.keys { if value.keys != nil { - strings.write_string(§ions, "\n[") + strings.write_string(§ions, "\n") + strings.write_rune(§ions, Options.Symbols.SectionLeft) + + // Section name appends to a "prefix" which is made up of the previous structs. if prefix { - section_head := fmt.aprintf("%s.%s", c.value, value.value) + // TODO(hrs) section_head and new_value are literally the exact same... WTF? + section_head := fmt.aprintf("%s%v%s", c.value, Options.Symbols.NestedSection, value.value) strings.write_string(§ions, section_head) - new_value := fmt.aprintf("%s.%s", c.value, value.value) + new_value := fmt.aprintf("%s%v%s", c.value, Options.Symbols.NestedSection, value.value) delete(value.value) + + // TODO(hrs) directly modifying the values means post-serialisation values will be different... If you serialise twice you'll get double the nesting. + // Perhaps "prefix" could be a string of previous nesting? Override it for each new section/nested section. + + log.debugf("Applied nesting prefix to %s (will generate as %s)", value.value, new_value) value.value = new_value delete(section_head) } else { strings.write_string(§ions, value.value) } - strings.write_string(§ions, "]\n") + strings.write_rune(§ions, Options.Symbols.SectionRight) + strings.write_string(§ions, "\n") section := write_to_string(value, true) strings.write_string(§ions, section) + log.debugf("Generated section from %v", value) + delete(section) } else { strings.write_string(&keys, key) - strings.write_string(&keys, "=") + strings.write_rune(&keys, Options.Symbols.Delimiter) strings.write_string(&keys, value.value) strings.write_string(&keys, "\n") + log.debugf("Generated key value pair from '%s' and '%s'", key, value.value) } } @@ -45,17 +66,33 @@ write_to_string :: proc(c: ^Config, prefix := false) -> string { } // Write the ini.Config to a file at the given path. -// If no file is found one will be created (hopefully). +// If no file is found one will be created. write_to_file :: proc(c: ^Config, filename: string) -> bool { out := write_to_string(c) defer delete(out) - return os.write_entire_file(filename, transmute([]u8)out) + bytes := transmute([]u8)out + result := os.write_entire_file(filename, bytes) + if result { + log.debugf("Successfully wrote %d bytes to file %s", len(bytes), filename) + } else { + log.errorf("Failed to write %d bytes to file %s", len(bytes), filename) + } + return result } // Converts an ini.Config into a valid Json string. // "key = value" becomes "key":"value", // "[section]" becomes "section":{ ... }, +// This will not "pretty print" the string. It will all be on a single line. write_to_json :: proc(c: ^Config) -> string { + if c == nil { + log.error("Provided ^Config is nil. Returning an empty string.") + return "" + } else if c.keys == nil { + log.error("Provided ^Config.key is nil. Returning an empty string.") + return "" + } + output := strings.builder_make_none() defer strings.builder_destroy(&output) @@ -63,6 +100,7 @@ write_to_json :: proc(c: ^Config) -> string { for key, value in c.keys { if value.keys == nil { + // Handles key = value strings.write_string(&output, "\"") strings.write_string(&output, key) strings.write_string(&output, "\"") @@ -71,7 +109,9 @@ write_to_json :: proc(c: ^Config) -> string { strings.write_string(&output, value.value) strings.write_string(&output, "\"") strings.write_string(&output, ",") + log.debugf("Generated key value pair from '%s' and '%s'", key, value.value) } else { + // Handles [section] strings.write_string(&output, "\"") strings.write_string(&output, key) strings.write_string(&output, "\"") @@ -80,10 +120,11 @@ write_to_json :: proc(c: ^Config) -> string { strings.write_string(&output, section) delete(section) strings.write_string(&output, ",") + log.debugf("Generated section from %v", value) } } - strings.pop_rune(&output) + strings.pop_rune(&output) // pop removes a redundent comma at the end of each section (kinda dumb) strings.write_string(&output, "}") return strings.clone(strings.to_string(output))