diff --git a/.github/workflows/go-test.yml b/.github/workflows/go-test.yml index 6df5455..e1b8687 100644 --- a/.github/workflows/go-test.yml +++ b/.github/workflows/go-test.yml @@ -1,8 +1,6 @@ name: Go on: - push: - branches: [ "main" ] pull_request: branches: [ "main" ] diff --git a/README.md b/README.md index eb47787..a9d8e4c 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ Oolong looks for a configuration file at `~/.oolong.json` 3 ], "noteDirectories": [ - "~/notes", + "~/notes" ], "allowedExtensions": [ ".md", diff --git a/examples/oolong.json b/examples/oolong.json new file mode 100644 index 0000000..00377ac --- /dev/null +++ b/examples/oolong.json @@ -0,0 +1,17 @@ +{ + "ngramRange": [ + 1, + 2, + 3 + ], + "noteDirectories": [], + "allowedExtensions": [ + ".md", + ".mdx", + ".tex", + ".typ" + ], + "pluginPaths": [ + "./scripts/daily_note.lua" + ] +} diff --git a/go.mod b/go.mod index 8b84f20..ed6ba7b 100644 --- a/go.mod +++ b/go.mod @@ -8,3 +8,8 @@ require ( github.com/aaaton/golem/v4 v4.0.0 github.com/aaaton/golem/v4/dicts/en v1.0.1 ) + +require ( + github.com/fsnotify/fsnotify v1.8.0 + golang.org/x/sys v0.13.0 // indirect +) diff --git a/go.sum b/go.sum index 676eb3f..c5b8400 100644 --- a/go.sum +++ b/go.sum @@ -2,5 +2,9 @@ github.com/aaaton/golem/v4 v4.0.0 h1:YHieBS+5Fqir298nJ7fk3EvMcKM/+T5gpMRt4TIAiZ8 github.com/aaaton/golem/v4 v4.0.0/go.mod h1:OfK/S5v9Exsx1yO21WorREuIVV+Y5K2hygP0A9oJCCI= github.com/aaaton/golem/v4/dicts/en v1.0.1 h1:/BsOsh8JTgTkuevwM9axPnAi9CD4rK7TWHNdW/6V3Uo= github.com/aaaton/golem/v4/dicts/en v1.0.1/go.mod h1:1YKRrQNng+KbS+peA7sj3TIa8eqR6T2UqdJ+Tc9xeoA= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/internal/config/config.go b/internal/config/config.go index 2517ec1..880d3c2 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -14,14 +14,16 @@ type OolongConfig struct { NGramRange []int `json:"ngramRange"` AllowedExtensions []string `json:"allowedExtensions"` PluginPaths []string `json:"pluginPaths"` + IgnoreDirectories []string `json:"ignoredDirectories"` } func Config() OolongConfig { return config } -func NotesDirPaths() []string { return config.NotesDirPaths } -func NGramRange() []int { return config.NGramRange } -func AllowedExtensions() []string { return config.AllowedExtensions } -func PluginPaths() []string { return config.PluginPaths } +func NotesDirPaths() []string { return config.NotesDirPaths } +func NGramRange() []int { return config.NGramRange } +func AllowedExtensions() []string { return config.AllowedExtensions } +func PluginPaths() []string { return config.PluginPaths } +func IgnoredDirectories() []string { return config.IgnoreDirectories } // TODO: file watcher for config file, reload on change diff --git a/internal/daemon/daemon.go b/internal/daemon/daemon.go new file mode 100644 index 0000000..4b92e9a --- /dev/null +++ b/internal/daemon/daemon.go @@ -0,0 +1,11 @@ +package daemon + +import "github.com/oolong-sh/oolong/internal/config" + +// Launch perpetually running watchers and run application in the background as a daemon +func Run() { + go runNotesDirsWatcher(config.NotesDirPaths()...) + + // run forever + <-make(chan struct{}) +} diff --git a/internal/daemon/watcher.go b/internal/daemon/watcher.go new file mode 100644 index 0000000..d274415 --- /dev/null +++ b/internal/daemon/watcher.go @@ -0,0 +1,83 @@ +package daemon + +import ( + "errors" + "io/fs" + "log" + "path/filepath" + "slices" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/oolong-sh/oolong/internal/config" + "github.com/oolong-sh/oolong/internal/documents" +) + +// Initialize and run file update watcher for notes directories +func runNotesDirsWatcher(dirs ...string) error { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return err + } + defer watcher.Close() + + dirIgnores := config.IgnoredDirectories() + + for _, dir := range dirs { + // TODO: add oolong ignore system to blacklist certain subdirs/files + if err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if !d.IsDir() { + return nil + } + + // NOTE: this may not be the exact desired behavior for ignores + // - this logic also needs to be replicated in the document reader + if slices.Contains(dirIgnores, filepath.Base(path)) { + return filepath.SkipDir + } + + // TEST: this may need to add path as absolute to get correct results + err = watcher.Add(path) + if err != nil { + return err + } + log.Println("Added watcher on", path) + + return nil + }); err != nil { + return err + } + } + + // watcher handler + // go func() { // running entire function as a goroutine, handler doesn't need to be one + for { + select { + case event, ok := <-watcher.Events: + if !ok { + log.Println("Watcher event channel returned bad result.") + return errors.New("Invalid watcher errors channel value.") + } + // log.Println("Event:", event) + + if event.Has(fsnotify.Write) { + log.Println("Modified file:", event.Name) + + // write event is sent on write start, wait 500ms for write to finish + time.Sleep(500) + + // re-read document + documents.ReadDocuments(event.Name) + // TODO: add dedup timer to prevent multi-write calls + } + case err, ok := <-watcher.Errors: + if !ok { + return errors.New("Invalid watcher errors channel value.") + } + log.Println("error:", err) + } + } + // }() + // <-make(chan struct{}) + // return nil +} diff --git a/internal/documents/corpus.go b/internal/documents/corpus.go index 77446a0..f2acc0e 100644 --- a/internal/documents/corpus.go +++ b/internal/documents/corpus.go @@ -1,122 +1,90 @@ package documents import ( - "fmt" "io/fs" - "os" + "log" "path/filepath" "slices" "sync" "github.com/oolong-sh/oolong/internal/config" - "github.com/oolong-sh/oolong/internal/linking/lexer" - "github.com/oolong-sh/oolong/internal/linking/ngrams" ) -// Read, lex, and extract NGrams for all documents in notes directories specified in config file -func ReadNotesDirs() ([]*Document, error) { - documents := []*Document{} +// DOC: meant to be called with watcher +// assumes paths should not be ignored (should be safe assumption due to watcher ignores) +func ReadDocuments(paths ...string) error { + // read all input files, update state with documents + docs := readHandler(paths...) + + // merge ngram maps and calculate weights + err := updateState(docs) + if err != nil { + return err + } - for _, notesDirPath := range config.NotesDirPaths() { + // TODO: all weights change, but may not need to be recalculated every time + + return nil +} + +// Read, lex, and extract NGrams for all documents in notes directories specified in config file +func ReadNotesDirs() error { + docs := []*Document{} + for _, dir := range config.NotesDirPaths() { // extract all note file paths from notes directory - notePaths := []string{} - if err := filepath.WalkDir(notesDirPath, func(path string, d fs.DirEntry, err error) error { + paths := []string{} + // TODO: add oolong ignore system to blacklist certain subdirs/files + if err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { if d.IsDir() { + if slices.Contains(config.IgnoredDirectories(), filepath.Base(path)) { + return filepath.SkipDir + } return nil } if slices.Contains(config.AllowedExtensions(), filepath.Ext(path)) { - notePaths = append(notePaths, path) + paths = append(paths, path) } return nil }); err != nil { - return nil, err + return err } - // perform a parallel read of found notes files - var wg sync.WaitGroup - wg.Add(len(notePaths)) - docs := make([]*Document, len(notePaths)) - - for i, notePath := range notePaths { - go func(i int, notePath string) { - doc, err := ReadDocument(notePath) - if err != nil { - fmt.Printf("Failed to read file: '%s' %v", notePath, err) - return - } - docs[i] = doc - wg.Done() - }(i, notePath) - } - - wg.Wait() - - // append results to output array - documents = append(documents, docs...) + // read all documents and append results + docs = append(docs, readHandler(paths...)...) } - // - // TEST: for debugging, remove later - // - // write out tokens - b := []byte{} - for _, d := range documents { - for _, t := range d.tokens { - if t.Value == lexer.BreakToken { - continue - } - b = append(b, []byte(fmt.Sprintf("%s, %s, %d\n", t.Lemma, t.Value, t.Zone))...) - } - } - err := os.WriteFile("./tokens.txt", b, 0666) + // merge maps and calculate weights + err := updateState(docs) if err != nil { - panic(err) + return err } - b = []byte{} - b = append(b, []byte("ngram,weight,count\n")...) - ngmap := make(map[string]*ngrams.NGram) - for _, d := range documents { - ngrams.Merge(ngmap, d.ngrams) - } - ngrams.CalcWeights(ngmap, len(documents)) - for _, d := range documents { - for _, ng := range d.ngrams { - b = append(b, []byte(fmt.Sprintf("%s, %f, %d\n", ng.Keyword(), ng.Weight(), ng.Count()))...) - } - } - err = os.WriteFile("./ngrams.txt", b, 0666) - if err != nil { - panic(err) - } - b = []byte{} - b = append(b, []byte("ngram,weight,count,ndocs\n")...) - mng := ngrams.FilterMeaningfulNGrams(ngmap, 2, int(float64(len(documents))/1.5), 4.0) - for _, s := range mng { - b = append(b, []byte(fmt.Sprintf("%s,%f,%d,%d\n", s, ngmap[s].Weight(), ngmap[s].Count(), len(ngmap[s].Documents())))...) - } - err = os.WriteFile("./meaningful-ngrams.csv", b, 0666) - if err != nil { - panic(err) - } - // ngrams.CosineSimilarity(ngmap) + return nil +} - // ngcounts := ngrams.Count(ngmap) - // freq := ngrams.OrderByFrequency(ngcounts, 10) - freq := ngrams.OrderByFrequency(ngmap) - b = []byte{} - for _, v := range freq { - b = append(b, []byte(fmt.Sprintf("%s %f\n", v.Key, v.Value))...) - } - err = os.WriteFile("./ngram-counts.txt", b, 0666) - if err != nil { - panic(err) +// DOC: +func readHandler(paths ...string) []*Document { + docs := make([]*Document, len(paths)) + var wg sync.WaitGroup + + // perform a parallel read of found notes files + wg.Add(len(paths)) + for i, p := range paths { + go func(i int, notePath string) { + doc, err := readDocumentByFile(notePath) + if err != nil { + log.Printf("Failed to read file: '%s' %v", notePath, err) + return + } + // TODO: this could be changed to use channels + docs[i] = doc + wg.Done() + }(i, p) } - // - // TEST: for debugging, remove later - // + wg.Wait() - return documents, nil + // append results to output array + return docs } diff --git a/internal/documents/document.go b/internal/documents/document.go index df3729e..0a45379 100644 --- a/internal/documents/document.go +++ b/internal/documents/document.go @@ -1,8 +1,8 @@ package documents import ( - "fmt" "io" + "log" "os" "github.com/oolong-sh/oolong/internal/config" @@ -25,7 +25,7 @@ func (d *Document) KeywordWeights() map[string]float64 { return d.ngwgts } // Read in a single document file, lex, and generate NGrams // Wraps readDocument for explicit use with files -func ReadDocument(documentPath string) (*Document, error) { +func readDocumentByFile(documentPath string) (*Document, error) { f, err := os.Open(documentPath) if err != nil { return nil, err @@ -44,7 +44,7 @@ func ReadDocument(documentPath string) (*Document, error) { // internal reader function that allows usage of io readers for generalized use func readDocument(r io.Reader, documentPath string) (*Document, error) { l := lexer.New() - fmt.Printf("Running lexer on %s...\n", documentPath) + log.Printf("Running lexer on %s...\n", documentPath) l.Lex(r) doc := &Document{ @@ -52,7 +52,7 @@ func readDocument(r io.Reader, documentPath string) (*Document, error) { tokens: l.Output, } - fmt.Printf("Generating NGrams for %s...\n", documentPath) + log.Printf("Generating NGrams for %s...\n", documentPath) doc.ngrams = ngrams.Generate(doc.tokens, config.NGramRange(), doc.path) // FIX: weight setting must occur after document NGRam maps are merged diff --git a/internal/documents/state.go b/internal/documents/state.go new file mode 100644 index 0000000..85a241e --- /dev/null +++ b/internal/documents/state.go @@ -0,0 +1,45 @@ +package documents + +import ( + "log" + + "github.com/oolong-sh/oolong/internal/linking/ngrams" +) + +// DOC: +var state = Corpus{ + Documents: map[string]*Document{}, + NGrams: map[string]*ngrams.NGram{}, +} + +// DOC: +type Corpus struct { + Documents map[string]*Document + NGrams map[string]*ngrams.NGram +} + +// DOC: +func State() Corpus { return state } + +// DOC: +func updateState(docs []*Document) error { + log.Println("Updating state and recalculating weights...") + + // update state documents + for _, doc := range docs { + state.Documents[doc.path] = doc + } + + // merge resulting ngram maps + for _, d := range state.Documents { + ngrams.Merge(state.NGrams, d.ngrams) + } + + // calculate weights + ngrams.CalcWeights(state.NGrams, len(state.Documents)) + log.Println("Done calculating weights.") + + // TODO: other things? (file writes?) + + return nil +} diff --git a/internal/linking/lexer/lexeme.go b/internal/linking/lexer/lexeme.go index 879d712..674a11c 100644 --- a/internal/linking/lexer/lexeme.go +++ b/internal/linking/lexer/lexeme.go @@ -36,10 +36,10 @@ const ( ) type Lexeme struct { - Lemma string // lexical root of unit (i.e. continues -> continue) - Value string // lexical unit - Row int // row location in file - Col int // column location of first character in file + Lemma string // lexical root of unit (i.e. continues -> continue) + Value string // lexical unit + // Row int // row location in file + // Col int // column location of first character in file LexType LexType // type of lexical unit Zone Zone // document zone } diff --git a/internal/linking/lexer/lexer_test.go b/internal/linking/lexer/lexer_test.go index b50e89a..dddeace 100644 --- a/internal/linking/lexer/lexer_test.go +++ b/internal/linking/lexer/lexer_test.go @@ -15,7 +15,7 @@ var cfg config.OolongConfig func init() { var err error - cfg, err = config.Setup("") + cfg, err = config.Setup("../../../examples/oolong.json") if err != nil { panic(err) } @@ -34,17 +34,19 @@ func TestReadDocumentSimple(t *testing.T) { } expectedTokens := []lexer.Lexeme{ { - Lemma: "hello", - Value: "Hello", - Row: 1, - Col: 1, + Lemma: "hello", + Value: "Hello", + // Row: 1, + // Col: 1, LexType: lexer.Word, + Zone: lexer.Default, }, { - Lemma: "world", - Value: "world", - Row: 1, - Col: 7, + Lemma: "world", + Value: "world", + // Row: 1, + // Col: 7, LexType: lexer.Word, + Zone: lexer.Default, }, } if !slices.Equal(l.Output, expectedTokens) { @@ -62,24 +64,27 @@ func TestReadDocumentSimple(t *testing.T) { } expectedTokens = []lexer.Lexeme{ { - Lemma: "hello", - Value: "Hello", - Row: 1, - Col: 1, + Lemma: "hello", + Value: "Hello", + // Row: 1, + // Col: 1, LexType: lexer.Word, + Zone: lexer.Default, }, { - Value: lexer.BreakToken, - Row: 1, - Col: 8, + Value: lexer.BreakToken, + // Row: 1, + // Col: 8, LexType: lexer.Break, + Zone: lexer.Default, }, { - Lemma: "world", - Value: "World", - Row: 2, - Col: 1, + Lemma: "world", + Value: "world", + // Row: 2, + // Col: 1, LexType: lexer.Word, + Zone: lexer.Default, }, } if !slices.Equal(l.Output, expectedTokens) { @@ -89,8 +94,8 @@ func TestReadDocumentSimple(t *testing.T) { // test with many newlines and multiple single-line lexemes s = "\nHello, \nworld! Foo-bar baz \n\n foo" rd = strings.NewReader(s) - l.Lex(rd) l = lexer.New() + l.Lex(rd) fmt.Println("Input:", s, " Output:", l.Output) if len(l.Output) != 9 { t.Fatalf("Incorrect Document.Content length. Expected %d, got %d", 5, len(l.Output)) diff --git a/internal/linking/lexer/lexer_utils.go b/internal/linking/lexer/lexer_utils.go index b56e03e..2f0063e 100644 --- a/internal/linking/lexer/lexer_utils.go +++ b/internal/linking/lexer/lexer_utils.go @@ -11,13 +11,17 @@ var eof rune = -1 var ( // Heading (e.g., # Heading, ## Heading) - only matches standalone heading lines - headingPattern = regexp.MustCompile(`^(#{1,6})\s+(.+?)\s*$`) + // (?m) is required to allow matching from start/end of line rather than start/end of string + // FIX: these capture groups are sometimes wrapping around lines (probably abandon regex and use more advanced lexer logic) + h1Pattern = regexp.MustCompile(`(?m)^(#)\s+(.+?)\s*$`) + h2Pattern = regexp.MustCompile(`(?m)^(#{2})\s+(.+?)\s*$`) + h3Pattern = regexp.MustCompile(`(?m)^(#{3})\s+(.+?)\s*$`) + h4Pattern = regexp.MustCompile(`(?m)^(#{4})\s+(.+?)\s*$`) + h5Pattern = regexp.MustCompile(`(?m)^(#{5})\s+(.+?)\s*$`) // Bold text (e.g., **bold** or __bold__) - matches inline without lookaheads/behinds boldPattern = regexp.MustCompile(`\*\*(.+?)\*\*|__(.+?)__`) // Italic text (e.g., *italic* or _italic_) - matches inline without lookaheads/behinds italicPattern = regexp.MustCompile(`(?:^|[^\w])(\*(\w+?)\*|_(\w+?)_)(?:[^\w]|$)`) - // Lists (e.g., - item or * item) - matches only at the beginning of a line - listPattern = regexp.MustCompile(`(?m)^\s*([*+-])\s+(.+)$`) // Link (e.g., [text](url)) linkPattern = regexp.MustCompile(`\[(.*?)\]\((.*?)\)`) // Image (e.g., ![alt text](url)) @@ -28,9 +32,9 @@ func (l *Lexer) push(v LexType) { switch v { case Break: l.Output = append(l.Output, Lexeme{ - Value: BreakToken, - Row: l.row, - Col: l.col, + Value: BreakToken, + // Row: l.row, + // Col: l.col, LexType: Break, Zone: l.zone, }) @@ -44,9 +48,9 @@ func (l *Lexer) push(v LexType) { l.Output = append(l.Output, Lexeme{ Lemma: lemma, Value: word, - Row: l.row, + // Row: l.row, // FIX: handles removed characters incorrectly in calculation (what start is probably supposed to used be for) - Col: l.col - 1 - len(word), + // Col: l.col - 1 - len(word), // Col: l.col - l.start, LexType: Word, Zone: l.zone, @@ -102,13 +106,21 @@ func (l *Lexer) ignore() { } func (l *Lexer) detectZone() { - peekBuffer, _ := l.br.Peek(128) // Adjust size as needed + peekBuffer, _ := l.br.Peek(32) switch { // FIX: handle remaining cases // TODO: add capture group for code blocks (might just need a boolean flag for them) - case headingPattern.Match(peekBuffer): + case h1Pattern.Match(peekBuffer): l.zone = H1 + case h2Pattern.Match(peekBuffer): + l.zone = H2 + case h3Pattern.Match(peekBuffer): + l.zone = H3 + case h4Pattern.Match(peekBuffer): + l.zone = H4 + case h5Pattern.Match(peekBuffer): + l.zone = H5 // case sectionPattern.Match(peekBuffer): // l.zone = Default // case boldPattern.Match(peekBuffer): diff --git a/internal/linking/ngrams/frequency.go b/internal/linking/ngrams/frequency.go index 876ac65..31fb499 100644 --- a/internal/linking/ngrams/frequency.go +++ b/internal/linking/ngrams/frequency.go @@ -5,19 +5,19 @@ import ( ) // Calculate term frequency -func tf(ngmap map[string]*NGram, path string) { - // totalCount := 0 - // for _, ng := range ngmap { - // totalCount += ng.documents[path].DocumentCount - // } - - for _, ng := range ngmap { - nginfo := ng.documents[path] - // normalize by document token count - // nginfo.DocumentTF = float64(nginfo.DocumentCount) / float64(totalCount) - nginfo.DocumentTF = float64(nginfo.DocumentCount) - } -} +// func tf(ngmap map[string]*NGram, path string) { +// // totalCount := 0 +// // for _, ng := range ngmap { +// // totalCount += ng.documents[path].DocumentCount +// // } +// +// for _, ng := range ngmap { +// nginfo := ng.documents[path] +// // normalize by document token count +// // nginfo.DocumentTF = float64(nginfo.DocumentCount) / float64(totalCount) +// nginfo.DocumentTF = float64(nginfo.DocumentCount) +// } +// } // Calculate inverse document frequency of NGrams // N is the total number of documents in the text corpus @@ -42,7 +42,8 @@ func idf(ngmap map[string]*NGram, N int) { func tfidf(ngmap map[string]*NGram) { for _, ng := range ngmap { for _, nginfo := range ng.documents { - nginfo.DocumentTfIdf = nginfo.DocumentTF * ng.idf + // nginfo.DocumentTfIdf = nginfo.DocumentTF * ng.idf + nginfo.DocumentWeight = float64(nginfo.DocumentCount) * ng.idf } } } @@ -73,7 +74,8 @@ func bm25(ngmap map[string]*NGram) { b = zoneB[ng.zone] k1 = zoneK1[ng.zone] for path, nginfo := range ng.documents { - nginfo.DocumentBM25 = ng.idf * ((nginfo.DocumentTF * (k1 + 1)) / (nginfo.DocumentTF + k1*(1-b+b*(d[path]/davg)))) + tf := float64(nginfo.DocumentCount) + nginfo.DocumentWeight = ng.idf * ((tf * (k1 + 1)) / (tf + k1*(1-b+b*(d[path]/davg)))) } } } diff --git a/internal/linking/ngrams/ngram.go b/internal/linking/ngrams/ngram.go index e1343fc..1955c6a 100644 --- a/internal/linking/ngrams/ngram.go +++ b/internal/linking/ngrams/ngram.go @@ -25,12 +25,12 @@ type NGram struct { // Information about NGram occurences in a single document type NGramInfo struct { - DocumentCount int - DocumentWeight float64 - DocumentLocations []location - DocumentTF float64 - DocumentTfIdf float64 - DocumentBM25 float64 + DocumentCount int + DocumentWeight float64 + // DocumentLocations []location + // DocumentTF float64 + // DocumentTfIdf float64 + // DocumentBM25 float64 } // location type for occurence of an NGram within a document @@ -94,7 +94,7 @@ func Generate(tokens []lexer.Lexeme, nrange []int, path string) map[string]*NGra } // calculate term frequencies - tf(ngrams, path) + // tf(ngrams, path) // using count instead return ngrams } @@ -104,13 +104,15 @@ func Merge(maps ...map[string]*NGram) { for i := 1; i < len(maps); i++ { for k, vi := range maps[i] { if v0, ok := maps[0][k]; !ok { + // ngram key not found in main map, add it maps[0][k] = vi } else { + // ngram key found in map, merge counts and document info + // weights should be calculated elsewhere after all merges are completed v0.globalCount += vi.globalCount for dk, dv := range vi.documents { v0.documents[dk] = dv } - // weights should be calculated elsewhere after all merges are completed } } } diff --git a/internal/linking/ngrams/ngram_utils.go b/internal/linking/ngrams/ngram_utils.go index ae31d53..838e259 100644 --- a/internal/linking/ngrams/ngram_utils.go +++ b/internal/linking/ngrams/ngram_utils.go @@ -14,7 +14,7 @@ func addNGram(k string, n int, ngmap map[string]*NGram, i int, tokens []lexer.Le doc := ngram.documents[path] doc.DocumentCount++ - doc.DocumentLocations = append(doc.DocumentLocations, location{row: tokens[i].Row, col: tokens[i].Col}) + // doc.DocumentLocations = append(doc.DocumentLocations, location{row: tokens[i].Row, col: tokens[i].Col}) // update ngram zone if current is considered more valuable if tokens[i].Zone < ngram.zone { @@ -25,9 +25,9 @@ func addNGram(k string, n int, ngmap map[string]*NGram, i int, tokens []lexer.Le // create document info struct for ngram documents[path] = &NGramInfo{ - DocumentCount: 1, - DocumentWeight: 0, - DocumentLocations: []location{{row: tokens[i].Row, col: tokens[i].Col}}, + DocumentCount: 1, + DocumentWeight: 0, + // DocumentLocations: []location{{row: tokens[i].Row, col: tokens[i].Col}}, } // create ngram diff --git a/internal/linking/ngrams/similarity.go b/internal/linking/ngrams/similarity.go index 83dfc7b..15167a3 100644 --- a/internal/linking/ngrams/similarity.go +++ b/internal/linking/ngrams/similarity.go @@ -1,7 +1,7 @@ package ngrams import ( - "fmt" + "log" "math" ) @@ -38,12 +38,12 @@ func CosineSimilarity(ngmap map[string]*NGram) { } similarity := calculateCosineSimilarity(vec1, vec2) // TODO: do something other than print here? -- (if this actually ends up being used) - fmt.Printf("%s, %s, %.4f\n", doc1, doc2, similarity) + log.Printf("%s, %s, %.4f\n", doc1, doc2, similarity) } } } -// Construct tf-idf score vectors +// Construct weighting score vectors func constructDocumentVectors(ngmap map[string]*NGram) map[string]map[string]float64 { documentVectors := make(map[string]map[string]float64) @@ -52,7 +52,7 @@ func constructDocumentVectors(ngmap map[string]*NGram) map[string]map[string]flo if _, exists := documentVectors[doc]; !exists { documentVectors[doc] = make(map[string]float64) } - documentVectors[doc][ngram.keyword] = nginfo.DocumentTfIdf + documentVectors[doc][ngram.keyword] = nginfo.DocumentWeight } } diff --git a/internal/linking/ngrams/weighting.go b/internal/linking/ngrams/weighting.go index fa55fa2..d2b66c2 100644 --- a/internal/linking/ngrams/weighting.go +++ b/internal/linking/ngrams/weighting.go @@ -65,18 +65,19 @@ func (ng *NGram) updateWeight() { // TODO: these numbers are subject to change // - document and count adjustments are too high for n=1 - ladj := math.Min(0.1*float64(len(ng.keyword)), 1.1) // length adjustment - cadj := math.Min(0.1*float64(ng.n)*float64(ng.globalCount), 1.5) // count adjustment - dadj := math.Min(0.11*float64(ng.n)*float64(len(ng.documents)), 2) + ladj := math.Min(0.12*float64(len(ng.keyword)), 1.2) // length adjustment + cadj := math.Min(0.08*float64(ng.n)*float64(ng.globalCount), 1.5) // count adjustment + dadj := math.Min(0.08*float64(ng.n)*float64(len(ng.documents)), 1.8) // document occurence adjustment // TODO: heavily prefer count / len(dg.documents) > 1 // cdadj := math.Min(0.5*float64(ng.globalCount)/float64(len(ng.documents)), 2) adjustment := ladj * cadj * nadj[ng.n] * dadj // * cdadj for _, nginfo := range ng.documents { - // TODO: set document weight here - nginfo.DocumentWeight = nginfo.DocumentBM25 * adjustment - w += nginfo.DocumentBM25 + // documentWeight will be bm25 or tf-idf before this point + // apply adjustment to document weight + nginfo.DocumentWeight = nginfo.DocumentWeight * adjustment + w += nginfo.DocumentWeight df++ } diff --git a/main.go b/main.go index 5f302fa..04e865a 100644 --- a/main.go +++ b/main.go @@ -1,12 +1,16 @@ package main import ( + "flag" "fmt" "github.com/oolong-sh/oolong/internal/config" + "github.com/oolong-sh/oolong/internal/daemon" "github.com/oolong-sh/oolong/internal/documents" ) +var daemonFlag = flag.Bool("no-daemon", false, "Run Oolong in no-daemon mode (not recommended)") + func main() { cfg, err := config.Setup("~/.oolong.json") if err != nil { @@ -15,11 +19,14 @@ func main() { } fmt.Println(cfg.PluginPaths) - d, err := documents.ReadNotesDirs() + err = documents.ReadNotesDirs() if err != nil { return } - _ = d - // plugins.InitPlugins(&cfg) + // go plugins.InitPlugins(&cfg) + flag.Parse() + if !*daemonFlag { + daemon.Run() + } }